code stringlengths 17 6.64M |
|---|
class DataNotAvailableLayer(InternalLayer):
'\n This is a dummy layer that is created when the output template is flagged "not available for inference".\n The output template should be passed to the constructor to correctly forward the information\n in case any dependent output is exported with "register_as_extern_data".\n\n See :func:`returnn.tf.network._create_layer`\n '
def __init__(self, layer_class, layer_desc, **kwargs):
'\n :param type[LayerBase] layer_class:\n :param dict[str] layer_desc:\n '
super(DataNotAvailableLayer, self).__init__(**kwargs)
self.layer_class_ = layer_class
self.layer_desc = layer_desc
def get_sub_layer(self, layer_name):
"\n :param str layer_name: name of the sub_layer (right part of '/' separated path)\n :rtype: LayerBase|None\n "
cls = self.layer_class_
assert issubclass(cls, LayerBase)
res = cls.get_sub_layer_out_data_from_opts(layer_name=layer_name, parent_layer_kwargs=self.layer_desc)
if (not res):
return None
(out, sub_layer_class, opts) = res
assert isinstance(out, Data)
assert issubclass(sub_layer_class, LayerBase)
return DataNotAvailableLayer(name=('%s/%s' % (self.name, layer_name)), network=self.network, output=out, layer_class=sub_layer_class, layer_desc=opts)
|
class WrappedInternalLayer(InternalLayer):
'\n This is not supposed to be used by the user. Like :class:`InternalLayer`, only intended for internal usage.\n This layer is supposed to logically wrap another layer.\n '
def __init__(self, base_layer, sources=None, **kwargs):
'\n :param LayerBase base_layer: the layer which we are wrapping\n :param list[LayerBase]|None sources: by default [base_layer]. overwrite to explicitly specify the layer deps\n '
if (sources is None):
sources = [base_layer]
super(WrappedInternalLayer, self).__init__(sources=sources, **kwargs)
self.base_layer = base_layer
self.params.update(base_layer.params)
def __repr__(self):
return ('<%s%s%s %s%r out_type=%s>' % (self.__class__.__name__, f'({self.base_layer.__class__.__name__})', (f'({self.debug_type_name})' if self.debug_type_name else ''), self.network.get_absolute_name_prefix(), self.name, (self.output.get_description(with_name=False) if self.output else None)))
def get_base_absolute_name_scope_prefix(self):
'\n :rtype: str\n '
return self.base_layer.get_base_absolute_name_scope_prefix()
def get_absolute_name_scope_prefix(self):
'\n :rtype: str\n '
return self.base_layer.get_absolute_name_scope_prefix()
|
class ReuseParams():
'\n This is for parameter sharing, i.e. reusing existing `tf.Variable` objects in a new layer,\n instead of creating new variables.\n :func:`ReuseParams.from_config_dict` will be called via :func:`LayerBase.transform_config_dict`.\n '
@classmethod
def from_config_dict(cls, opts, network, get_layer):
'\n This will be called via :func:`LayerBase.transform_config_dict` on the layer option `"reuse_params"`.\n\n :param str|dict[str]|None opts:\n If None, we will return None.\n If str, it will be interpret as a layer name.\n If dict, you can specify:\n "reuse_layer": layer name\n "map": dict where the keys are parameter names, and the values can be:\n A str would be interpret as a layer name.\n None would be interpret as the option `auto_create_missing`.\n A dict would specify :func:`ReuseParams.__init__` options.\n The option reuse_layer would be specified as a str, and represents a layer name.\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n :rtype: ReuseParams|None\n '
if (not opts):
return None
def optional_get_layer(layer_name):
'\n :param str layer_name:\n :rtype: LayerBase|ReuseParams.LazyLayerResolver\n '
from returnn.tf.network import NetworkConstructionDependencyLoopException, LayerNotFound
try:
return get_layer(layer_name)
except (NetworkConstructionDependencyLoopException, LayerNotFound):
return ReuseParams.LazyLayerResolver(layer_name=layer_name, network=network, get_layer=get_layer)
if isinstance(opts, str):
return ReuseParams(reuse_layer=optional_get_layer(opts))
assert isinstance(opts, dict)
opts = opts.copy()
if ('reuse_layer' in opts):
opts['reuse_layer'] = optional_get_layer(opts['reuse_layer'])
if ('map' in opts):
assert isinstance(opts['map'], dict), ("reuse_params['map'] should be a dict but is %s" % (type(opts['map']),))
opts['map'] = opts['map'].copy()
for (key, value) in sorted(opts['map'].items()):
if isinstance(value, str):
value = {'reuse_layer': optional_get_layer(value)}
elif (value is None):
value = {'auto_create_missing': True}
else:
assert isinstance(value, dict)
value = value.copy()
if value.get('reuse_layer', None):
value['reuse_layer'] = optional_get_layer(value['reuse_layer'])
if value.get('layer_output', None):
value['layer_output'] = get_layer(value['layer_output'])
opts['map'][key] = ReuseParams(**value)
return ReuseParams(**opts)
class LazyLayerResolver():
'\n Unfortunately this is a bit tricky and difficult to do right.\n We want to support it because it can happen that e.g. in training, this is layer resolving is not needed,\n and then in search, it is needed, due to different dependencies.\n See :func:`test_reuse_params_map_custom_dep_loop` for an example.\n The params depend on a layer which is not constructed yet and cannot be constructed yet\n because of a dependency loop.\n Thus, here we again try to create it, and if we still get the dependency loop,\n we create the reused-params-layer based on dummy inputs, such that the variables/parameters get created\n and can be used now. Then, later, we are going to recreate the reused-params-layer.\n '
def __init__(self, layer_name, network, get_layer):
'\n :param str layer_name:\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer:\n '
self.layer_name = layer_name
self.network = network
self.get_layer_func = get_layer
self.var_scope = tf_compat.v1.get_variable_scope()
def __repr__(self):
return ('<%s layer %r, net %r>' % (self.__class__.__name__, self.layer_name, self.network))
def get_layer(self):
'\n :rtype: LayerBase\n '
from returnn.tf.network import NetworkConstructionDependencyLoopException, LayerNotFound
from returnn.tf.util.basic import reuse_name_scope
with reuse_name_scope(self.var_scope):
try:
return self.get_layer_func(self.layer_name)
except (NetworkConstructionDependencyLoopException, LayerNotFound):
return self.create_dummy_layer()
def create_dummy_layer(self):
'\n :rtype: LayerBase\n '
from .basic import get_layer_class
print((('ReuseParams: layer %r does not exist yet and there is a dependency loop, ' + 'thus creating it on dummy inputs now') % self.layer_name), file=log.v4)
layer_name = self.layer_name
network = self.network
with_time_dim = False
while (layer_name.startswith('base:') and network.parent_net):
if (network.parent_layer and network.parent_layer.output.have_time_axis()):
with_time_dim = True
layer_name = layer_name[len('base:'):]
network = network.parent_net
def get_dummy_input_layer(layer_name):
'\n :param str layer_name:\n :rtype: LayerBase\n '
if (layer_name in network.layers):
return network.layers[layer_name]
output = None
net = network
def opt_get_layer(layer_name):
'\n :param str layer_name:\n :rtype: LayerBase\n '
if (layer_name in net.layers):
return net.layers[layer_name]
print(('ReuseParams: non-existing layer %r in %r, ignoring...' % (layer_name, net)), file=log.v4)
return InternalLayer(name=layer_name, network=net, output=Data(name=('LazyLayerResolver_dummy_output_%s' % layer_name), shape=((None, 1) if with_time_dim else ())))
if ((self.network.parent_net is network) and self.network.parent_layer):
if layer_name.startswith((self.network.parent_layer.name + '/')):
net = self.network
layer_name = layer_name[(len(net.parent_layer.name) + 1):]
if (layer_name in net.layers):
output = net.layers[layer_name].output.copy_template()
if ((not output.have_time_axis()) and with_time_dim):
output = output.copy_template_adding_time_dim().copy_template_set_ctx(network.get_control_flow_ctx())
if (not output):
layer_desc_ = net.layers_desc[layer_name].copy()
class_name_ = layer_desc_.pop('class')
layer_class_ = get_layer_class(class_name_)
layer_desc_['_network'] = net
layer_desc_['_name'] = layer_name
layer_class_.transform_config_dict(layer_desc_, network=net, get_layer=opt_get_layer)
layer_desc_ = net._create_layer_layer_desc(name=layer_name, layer_desc=layer_desc_)
output = layer_class_.get_out_data_from_opts(**layer_desc_).copy()
output.beam = None
output.placeholder = tf.zeros([(d or 1) for d in output.batch_shape], dtype=output.dtype, name=('%s_dummy' % output.name))
if (not output.size_placeholder):
output.size_placeholder = {}
for (i, dim) in enumerate(output.shape):
if ((dim is None) and (i not in output.size_placeholder)):
output.size_placeholder[i] = tf.ones([1], dtype=tf.int32, name='dummy_reuse_params_size')
output.sanity_check()
print(('ReuseParams: creating dummy input %r with %r' % (layer_name, output)), file=log.v4)
return InternalLayer(name=layer_name, network=network, output=output)
layer_desc = network.layers_desc[layer_name].copy()
class_name = layer_desc.pop('class')
layer_class = get_layer_class(class_name)
layer_desc['_network'] = network
layer_desc['_name'] = layer_name
layer_class.transform_config_dict(layer_desc, network=network, get_layer=get_dummy_input_layer)
with reuse_name_scope(network.get_absolute_name_scope_prefix()[:(- 1)], absolute=True):
return network._create_layer(name=layer_name, layer_class=layer_class, **layer_desc)
def __init__(self, reuse_layer=None, map=None, custom=None, auto_create_missing=False, layer_output=None, shape=None):
'\n :param LayerBase|ReuseParams.LazyLayerResolver|None reuse_layer:\n :param dict[str,ReuseParams]|None map:\n :param (**kwargs)->(tf.Tensor|tf.Variable) custom: see :func:`self.variable_custom_getter`\n :param bool auto_create_missing:\n :param LayerBase|None layer_output:\n :param tuple[Dim]|None shape:\n '
assert (isinstance(reuse_layer, (LayerBase, ReuseParams.LazyLayerResolver)) or (not reuse_layer))
self._reuse_layer = reuse_layer
self.param_map = map
self.custom_func = custom
self.auto_create_missing = auto_create_missing
self.layer_output = layer_output
self.shape = shape
def __repr__(self):
return ('<%s reuse_layer %r, map %r>' % (self.__class__.__name__, self._reuse_layer, self.param_map))
@property
def reuse_layer(self):
'\n :rtype: LayerBase|None\n '
if self._reuse_layer:
if isinstance(self._reuse_layer, ReuseParams.LazyLayerResolver):
self._reuse_layer = self._reuse_layer.get_layer()
assert isinstance(self._reuse_layer, LayerBase)
return self._reuse_layer
return None
def get_variable_scope(self, base_layer, **kwargs):
'\n :param LayerBase base_layer:\n :param kwargs: passed to tf.compat.v1.variable_scope\n :rtype: tf.compat.v1.VariableScope\n '
def _variable_custom_getter(**kwargs_):
return self.variable_custom_getter(base_layer=base_layer, **kwargs_)
with tf_compat.v1.variable_scope(tf_compat.v1.get_variable_scope(), custom_getter=_variable_custom_getter, **kwargs) as scope:
return scope
def variable_custom_getter(self, base_layer, name, shape, dtype, getter, **kwargs):
"\n By TF docs, from :func:`_VariableStore.get_variable`:\n Callable that takes as a first argument the true getter,\n and allows overwriting the internal get_variable method.\n The signature of `custom_getter` should match that of this method,\n but the most future-proof version will allow for changes:\n `def custom_getter(getter, *args, **kwargs)`. Direct access to\n all `get_variable` parameters is also allowed:\n `def custom_getter(getter, name, *args, **kwargs)`. A simple identity\n custom getter that simply creates variables with modified names is:\n ```python\n def custom_getter(getter, name, *args, **kwargs):\n return getter(name + '_suffix', *args, **kwargs)\n ```\n In addition, we get the argument `base_scope_name`, via :func:`self.get_variable_scope`.\n\n :param LayerBase base_layer: we expect that this is the prefix of ``name``\n :param str name: absolute param name\n :param tuple[int]|list[int] shape:\n :param tensorflow.DType dtype:\n :param (...)->tf.Variable getter:\n :rtype: tf.Variable|tf.Tensor\n "
if (self.shape is not None):
assert (tuple(shape) == tuple((d.dimension for d in self.shape))), ('%s: unexpected shape %r for param %r, expected %r' % (self, shape, name, self.shape))
abs_scope_prefix = base_layer.get_absolute_name_scope_prefix()
assert ((not abs_scope_prefix) or abs_scope_prefix.endswith('/'))
assert name.startswith(abs_scope_prefix)
param_name = name[len(abs_scope_prefix):]
if self.custom_func:
return self.custom_func(base_layer=base_layer, reuse_layer=self.reuse_layer, full_name=name, name=param_name, shape=shape, dtype=dtype, getter=getter, **kwargs)
if (self.param_map is not None):
if (not self.auto_create_missing):
assert (param_name in self.param_map)
if (param_name in self.param_map):
return self.param_map[param_name].variable_custom_getter(base_layer=base_layer, name=name, shape=shape, dtype=dtype, getter=getter, **kwargs)
if self.reuse_layer:
if (not self.auto_create_missing):
assert (param_name in self.reuse_layer.params)
if (param_name in self.reuse_layer.params):
return self.reuse_layer.params[param_name]
if self.layer_output:
if (self.shape is not None):
out = self.layer_output.output.copy_compatible_to_dims(self.shape)
return out.placeholder
assert (tuple(shape) == self.layer_output.output.batch_shape)
return self.layer_output.output.placeholder
assert self.auto_create_missing
return getter(name=name, shape=shape, dtype=dtype, **kwargs)
|
class SearchChoices(object):
'\n In beam search, after expanding the beam and then selecting the N best (beam) (see :class:`ChoiceLayer`),\n when doing this multiple times, we need to keep reference where each beam came from,\n and what the current score is, etc.\n Also we could have multiple different such expansions & prunes via different :class:`ChoiceLayer`.\n This is what we keep track here.\n '
def __init__(self, owner, beam_size, is_decided=False, keep_raw=False):
'\n :param LayerBase owner:\n :param int beam_size:\n :param bool is_decided: by :class:`DecideLayer`\n :param bool keep_raw: by :class:`DecideKeepBeamLayer`\n '
assert (beam_size is not None)
self.owner = owner
self._done_src_layer = False
self._src_layer = None
self.src_beams = None
self.beam_size = beam_size
self.beam_scores = None
self.is_decided = is_decided
self.keep_raw = keep_raw
if (not owner.output.beam):
assert (beam_size == 1), ('owner %s has output %s without beam' % (owner, owner.output))
else:
assert (owner.output.beam.beam_size == beam_size)
owner.network.register_search_choices_for_beam(beam=owner.output.beam, search_choices=self)
def __repr__(self):
def short(v):
'\n :param LayerBase|tf.Tensor|None v:\n :return: short repr\n :rtype: str\n '
if isinstance(v, LayerBase):
return repr(v.name)
if isinstance(v, tf.Tensor):
if (v.get_shape().ndims is not None):
return ('shaped:(%s)' % ','.join(map(str, v.get_shape().as_list())))
return 'unknown-ndim'
return repr(v)
s = (' beam_size=%r' % self.beam_size)
if self._done_src_layer:
s += (' src_layer=%s' % short(self._src_layer))
s += (' beam_scores=%s' % short(self.beam_scores))
if self.is_decided:
s += ' is_decided'
if self.keep_raw:
s += ' keep_raw'
return ('<SearchChoices owner=%s%s>' % (short(self.owner), s))
@property
def src_layer(self):
'\n :return: The layer where we had the last search choices.\n :rtype: LayerBase\n '
if (not self._done_src_layer):
self._src_layer = self.owner.network.get_search_choices(base_search_choice=self.owner)
self._done_src_layer = True
return self._src_layer
def set_beam_from_own_rec(self):
'\n Assumes we have set self.owner, and uses those rec vars to set the beam scores.\n '
self.set_beam_from_rec(self.owner.rec_vars_outputs)
def set_beam_from_rec(self, rev_vars_outputs):
'\n :param dict[str,tf.Tensor] rev_vars_outputs: e.g. via :class:`ChoiceLayer`\n '
assert ((rev_vars_outputs.get('choice_scores', None) is not None) and (rev_vars_outputs.get('choice_src_beams', None) is not None))
self.beam_scores = rev_vars_outputs['choice_scores']
self.src_beams = rev_vars_outputs['choice_src_beams']
self.beam_scores.set_shape(self.src_beams.get_shape())
def set_src_beams(self, src_beam_idxs):
'\n :param tf.Tensor src_beam_idxs: source beam index, (batch, beam)\n '
if isinstance(self.beam_size, int):
src_beam_idxs.set_shape((None, self.beam_size))
self.src_beams = src_beam_idxs
self.owner.rec_vars_outputs['choice_src_beams'] = src_beam_idxs
def set_beam_scores(self, scores):
'\n :param tf.Tensor scores: (batch, beam) -> log score\n '
if isinstance(self.beam_size, int):
scores.set_shape((None, self.beam_size))
self.beam_scores = scores
self.owner.rec_vars_outputs['choice_scores'] = scores
def get_src_choices_seq(self):
'\n :return: all SearchChoices we depend on up to the root, including and starting with self\n :rtype: list[SearchChoices]\n '
sources = [self]
choice = self
while True:
src_layer = choice.src_layer
if (not src_layer):
break
assert isinstance(src_layer.search_choices, SearchChoices)
choice = src_layer.search_choices
if (choice in sources):
break
sources.append(choice)
return sources
def get_beam_info(self):
'\n :rtype: returnn.tf.util.data.SearchBeam|None\n '
if (self.owner.output.beam is None):
assert (self.beam_size == 1)
return None
assert (self.owner.output.beam.beam_size == self.beam_size)
return self.owner.output.beam
def __eq__(self, other):
return (self is other)
def __ne__(self, other):
return (self is not other)
@staticmethod
def compare(self, other):
'\n Also see :func:`TFNetwork.get_search_choices.compare_layer`, which is basically the same.\n\n :param SearchChoices|None self:\n :param SearchChoices|None other:\n :return: 0 if equal, -1 if we are smaller, else 1\n :rtype: int\n '
if (self is other):
return 0
if (self is None):
return (- 1)
if (other is None):
return 1
if (self.keep_raw or other.keep_raw):
return 0
self_norm_layer = self.owner.get_normalized_layer()
other_norm_layer = other.owner.get_normalized_layer()
if ((self_norm_layer != self.owner) and (other_norm_layer != other.owner)):
assert (self_norm_layer.search_choices and other_norm_layer.search_choices)
return SearchChoices.compare(self=self_norm_layer.search_choices, other=other_norm_layer.search_choices)
self_src_choices = self.get_src_choices_seq()
other_src_choices = other.get_src_choices_seq()
if ((self in other_src_choices) and (other not in self_src_choices)):
return (- 1)
if ((other in self_src_choices) and (self not in other_src_choices)):
return 1
from pprint import pformat
raise Exception(('Cannot compare search choices\n %r,\n %r\nwhich have traces:\n%s,\n%s' % (self, other, pformat(self_src_choices), pformat(other_src_choices))))
def __cmp__(self, other):
return self.compare(self, other)
def __lt__(self, other):
return (self.__cmp__(other) < 0)
def __gt__(self, other):
return (self.__cmp__(other) > 0)
def translate_to_this_search_beam(self, sources):
'\n :param LayerBase|list[LayerBase]|dict[str,LayerBase|object]|tuple[LayerBase|object]|T sources:\n :return: sources but all layers transformed when needed\n :rtype: T\n '
from .basic import SelectSearchSourcesLayer
d = sources
if isinstance(d, dict):
return {k: self.translate_to_this_search_beam(v) for (k, v) in d.items()}
if isinstance(d, (tuple, list)):
from returnn.util.basic import make_seq_of_type
return make_seq_of_type(type(d), [self.translate_to_this_search_beam(v) for v in d])
if isinstance(d, LayerBase):
return SelectSearchSourcesLayer.select_if_needed(d, search_choices=self)
return d
@classmethod
def translate_to_common_search_beam(cls, layer_desc):
'\n :param list[LayerBase]|dict[str,LayerBase|object] layer_desc:\n :return: sources but all layers transformed when needed\n :rtype: list[LayerBase]|dict[str,LayerBase|object]\n '
assert ('_src_common_search_choices' not in layer_desc)
from tensorflow.python.util import nest
layers_flat = [v for v in nest.flatten(layer_desc) if isinstance(v, LayerBase)]
if (len(layers_flat) <= 1):
return layer_desc
search_choicess = []
for layer in layers_flat:
if (not layer.output.beam):
continue
if layer.network.is_extra_internal_template_construction():
continue
search_choices = layer.get_search_choices()
from pprint import pformat
assert search_choices, ('layer %r has beam %r but no search choices; from layer desc\n%s' % (layer, layer.output.beam, pformat(layer_desc)))
search_choicess.append(search_choices)
if (not search_choicess):
return layer_desc
from functools import cmp_to_key
common_choices = max(search_choicess, key=cmp_to_key(cls.compare))
layer_desc = layer_desc.copy()
layer_desc['_src_common_search_choices'] = common_choices
return common_choices.translate_to_this_search_beam(layer_desc)
|
class Loss(object):
'\n Base class for all losses.\n '
class_name = None
recurrent = False
need_target = True
_check_output_before_softmax = True
def __init__(self, base_network, use_flatten_frames=True, use_normalized_loss=False, custom_norm_factor=None, custom_inv_norm_factor=None, scale=1.0, _check_output_before_softmax=None):
'\n :param returnn.tf.network.TFNetwork base_network:\n :param bool use_flatten_frames: will use :func:`returnn.tf.util.basic.flatten_with_seq_len_mask`\n :param bool use_normalized_loss: the loss used in optimization will be normalized\n :param float|function|None custom_norm_factor:\n The standard norm factor is 1/sum(target_seq_len) if the target has a time-axis,\n or 1/sum(output_seq_len) if there is no target and the output has a time-axis,\n or 1 otherwise. (See :func:`Loss.init` for details.)\n This is used for proper normalization of accumulated loss/error per epoch\n and also proper normalization per batch for reporting,\n no matter if use_normalized_loss is True or False.\n If you want to change this norm factor, you can set this.\n As a function, it takes (self=self, output=output, layer=layer) and returns a float scalar.\n :param LayerBase|None custom_inv_norm_factor: inverse of custom_norm_factor.\n Here we allow to pass a layer.\n Here we also allow to pass any shape and it will automatically be reduced via sum.\n So you could simply pass target_seq_len directly here.\n Basically, for all reporting, it uses sum(loss) * sum(custom_inv_norm_factor).\n :param float scale: additional scale factor for the loss\n :param bool|None _check_output_before_softmax:\n '
self.base_network = base_network
self.use_flatten_frames = use_flatten_frames
self.layer = None
self.output = None
self.output_with_activation = None
self.output_seq_lens = None
self.target = None
self.target_seq_lens = None
self.output_flat = None
self.output_before_softmax_flat = None
if (_check_output_before_softmax is not None):
self._check_output_before_softmax = _check_output_before_softmax
self.target_flat = None
self.loss_norm_factor = None
self.use_normalized_loss = use_normalized_loss
self.custom_norm_factor = custom_norm_factor
self.custom_inv_norm_factor = custom_inv_norm_factor
if custom_inv_norm_factor:
assert (custom_norm_factor is None), ('%s: do not provide both custom_norm_factor and custom_inv_norm_factor' % self)
self.scale = scale
def __repr__(self):
return ('<%s %r>' % (self.__class__.__name__, (self.layer or self.output)))
def _reduce_batch_time(self):
'\n :return: In self.reduce_func, whether to expect that the loss is of shape (batch*time|time*batch,).\n :rtype: bool\n '
if self.use_flatten_frames:
return False
if self.recurrent:
return False
if (not self.output.have_time_axis()):
return False
return True
def _reduce_to_batch_time_with_mask(self, loss, normalize=False):
'\n :param tf.Tensor loss: (batch*time,...) or (time*batch,...) depending if self.output is batch/time major\n :param bool normalize: for remaining dims. False -> use tf.reduce_sum, True -> use tf.reduce_mean\n :return: (batch*time,) or (time*batch,)\n :rtype: tf.Tensor\n '
assert ({self.output.batch_dim_axis, self.output.time_dim_axis} == {0, 1})
if (loss.get_shape().ndims > 1):
reduce_func = (tf.reduce_mean if normalize else tf.reduce_sum)
loss = reduce_func(loss, axis=list(range(1, loss.get_shape().ndims)))
mask = self.output.get_sequence_mask()
mask = tf.reshape(mask, [tf.shape(loss)[0]])
loss = tf.where(mask, loss, tf.zeros_like(loss), 'loss_masked')
return loss
def reduce_func(self, loss):
'\n Reduces the frames.\n Currently the sum, and we do averaging later.\n We might change this logic at some point.\n Also, some code overwrites this function externally,\n e.g. with returnn.tf.util.basic.identity, to not do reducing.\n\n :param tf.Tensor loss: e.g. (batch*time,), or (time_flat,), or (batch*time,dim), etc\n :return: by default just a scalar. but this can be overwritten, to not reduce\n :rtype: tf.Tensor\n '
if self._reduce_batch_time():
loss = self._reduce_to_batch_time_with_mask(loss)
return tf.reduce_sum(loss)
def reduce_to_batch(self, loss, normalize):
'\n :param tf.Tensor loss: e.g. (batch*time,), or (time_flat,), or (batch*time,dim), etc\n :param bool normalize: reduce mean instead of reduce sum\n :return: (batch,)\n :rtype: tf.Tensor\n '
if ((not self.recurrent) and self.output.have_time_axis()):
assert (not self.use_flatten_frames)
assert self._reduce_batch_time()
loss = self._reduce_to_batch_time_with_mask(loss, normalize=normalize)
loss.set_shape((None,))
loss = tf.reshape(loss, tf.shape(self.output.placeholder)[:2])
loss = tf.reduce_sum(loss, axis=self.output.time_dim_axis)
if normalize:
loss /= tf.cast(self.output.get_sequence_lengths(), tf.float32)
elif (loss.get_shape().ndims > 1):
reduce_func = (tf.reduce_mean if normalize else tf.reduce_sum)
loss = reduce_func(loss, axis=list(range(1, loss.get_shape().ndims)))
return loss
@classmethod
def transform_config_dict(cls, d, network, get_layer):
'\n :param dict[str] d: will modify inplace, the loss_opts\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n\n Will modify `d` such that it becomes the kwargs for `self.__init__()`.\n Mostly leaves `d` as-is.\n This is used by `LayerBase.transform_config_dict`.\n '
if (d.get('custom_inv_norm_factor', None) is not None):
d['custom_inv_norm_factor'] = get_layer(d['custom_inv_norm_factor'])
def init_by_layer(self, layer, layer_output_template=None):
'\n :param LayerBase|None layer:\n :param Data|None layer_output_template: maybe alternative template\n '
if (layer_output_template and layer_output_template.have_time_axis() and (not layer.output.have_time_axis())):
layer_output = layer_output_template
else:
layer_output = layer.output
if ((layer is self.layer) and (self.output is layer_output)):
return
self.init(output=layer_output, output_with_activation=layer.output_before_activation, target=layer._get_target_value(), layer=layer)
def _flatten_or_merge(self, data):
"\n :param Data data: (B,T,...) or (T,B,...)\n :return: (B*T|T*B|B',...)\n :rtype: tf.Tensor\n "
x = data.placeholder
if self.use_flatten_frames:
return tf_util.flatten_with_seq_len_mask(x, data.get_sequence_lengths(), time_major=data.is_time_major)
x_shape = tf_util.get_shape(x)
if (data.is_time_major != self.output.is_time_major):
x = tf_util.swapaxes(x, 0, 1)
return tf.reshape(x, ([(x_shape[0] * x_shape[1])] + x_shape[2:]), name='merge_batch_time')
def init(self, output, output_with_activation=None, target=None, layer=None):
'\n :param Data output: generated output\n :param OutputWithActivation|None output_with_activation:\n :param Data target: reference target from dataset\n :param LayerBase|None layer:\n '
if (not self._check_output_before_softmax):
output_with_activation = None
with tf.name_scope('loss_init'):
self.layer = layer
if target:
if output.beam:
if (target.beam != output.beam):
target = target.copy_extend_with_beam(output.beam)
else:
assert (not target.beam)
if ((output.feature_dim_axis is not None) and (output.feature_dim_axis != (output.batch_ndim - 1))):
if output_with_activation:
from returnn.tf.util.basic import move_axis
output_with_activation = OutputWithActivation(x=move_axis(output_with_activation.x, output.feature_dim_axis, (- 1)), act_func=output_with_activation.act_func)
output = output.copy_with_feature_dim_axis((- 1))
self.output = output
self.output_with_activation = output_with_activation
self.target = target
self.output_flat = None
self.output_before_softmax_flat = None
self.target_flat = None
self.output_seq_lens = None
self.target_seq_lens = None
self.loss_norm_factor = 1.0
out_dyn_axes = [axis for (axis, dim) in enumerate(self.output.batch_shape) if (dim is None)]
if (self.output.have_time_axis() and self.output.have_batch_axis()):
self.output_seq_lens = output.get_sequence_lengths()
time_and_batch_dims = (self.output.time_dim_axis, self.output.batch_dim_axis)
assert (time_and_batch_dims in [(0, 1), (1, 0)]), ('output time-batch-dim unexpected: %r (target %r)' % (self.output, self.target))
if (output_with_activation and (output_with_activation.act_func is tf.nn.softmax)):
out_before_act = output.copy(name=('%s_before_softmax' % output.name))
out_before_act.placeholder = output_with_activation.x
self.output_before_softmax_flat = self._flatten_or_merge(out_before_act)
else:
self.output_flat = self._flatten_or_merge(output)
self.output_flat.set_shape(tf.TensorShape(((None,) + output.shape[1:])))
if target:
assert target.have_time_axis()
self.target_seq_lens = target.get_sequence_lengths()
self.target_flat = self._flatten_or_merge(target)
self.loss_norm_factor = (1.0 / tf.cast(tf.reduce_sum(self.target_seq_lens), tf.float32))
else:
self.loss_norm_factor = (1.0 / tf.cast(tf.reduce_sum(self.output_seq_lens), tf.float32))
elif set(out_dyn_axes).difference([self.output.batch_dim_axis]):
assert (len(out_dyn_axes) == 1), f'Loss flattening not implemented for {self.output}'
self.output_flat = output.placeholder
num_el = rf.num_elements_of_shape([self.output.dims[a] for a in out_dyn_axes])
if isinstance(num_el, Data):
num_el = num_el.raw_tensor
self.loss_norm_factor = (1.0 / tf.cast(num_el, tf.float32))
else:
if (output_with_activation and (output_with_activation.act_func is tf.nn.softmax)):
self.output_before_softmax_flat = output_with_activation.x
else:
self.output_flat = output.placeholder
if self.output.have_batch_axis():
self.loss_norm_factor = (1.0 / tf.cast(tf.shape(self.output.placeholder)[self.output.batch_dim_axis], tf.float32))
else:
self.loss_norm_factor = 1.0
if target:
assert (not self.target.have_time_axis())
self.target_flat = target.placeholder
if (self.custom_norm_factor is not None):
if callable(self.custom_norm_factor):
self.loss_norm_factor = self.custom_norm_factor(self=self, output=output, layer=layer)
else:
assert isinstance(self.custom_norm_factor, float)
self.loss_norm_factor = self.custom_norm_factor
if self.custom_inv_norm_factor:
self.loss_norm_factor = (1.0 / tf.cast(tf.reduce_sum(self.custom_inv_norm_factor.output.placeholder), tf.float32))
self._check_init()
def _check_init(self):
"\n Does some checks on self.target and self.output, e.g. if the dense shapes matches.\n You can overwrite this if those checks don't make sense for your derived loss class.\n "
if (not self.target):
assert (not self.need_target), ('%s: did not get target' % self)
return
assert (self.target.placeholder is not None)
if ((self.output_before_softmax_flat is not None) or (self.output_flat is not None)):
assert (self.target_flat is not None), ('%s: have flat output (%r) but not flat targets (%r)' % (self, self.output, self.target))
assert (self.target.ndim_dense == self.output.ndim_dense), ('Number of dimensions mismatch. Target: %s, output: %s' % (self.target, self.output))
expected_output_dim = self.get_auto_output_layer_dim(self.target.feature_dim_or_sparse_dim)
assert (expected_output_dim.dimension == self.output.dim), (('Expected output dim is %r but the output has dim %r. ' % (expected_output_dim, self.output.feature_dim_or_sparse_dim)) + ('Target: %s, output: %s' % (self.target, self.output)))
if self.base_network.get_config().bool('debug_runtime_sanity_checks', False):
with tf.name_scope('Loss_debug_runtime_sanity_checks'):
checks = [self.output.get_runtime_sanity_check_op(), self.target.get_runtime_sanity_check_op()]
out_shape = tf.shape(self.output.placeholder)
target_shape = tf.shape(self.target.placeholder)
if (self.output.have_batch_axis() and self.target.have_batch_axis()):
out_batch_dim = out_shape[self.output.batch_dim_axis]
target_batch_dim = target_shape[self.target.batch_dim_axis]
checks += [tf.Assert(tf.equal(out_batch_dim, target_batch_dim), ['Loss_debug_runtime_sanity_checks', 'batch dim mismatch', 'output:', str(self.output), 'shape', out_shape, 'target:', str(self.target), 'shape', target_shape])]
if (not self.recurrent):
if (self.output.have_time_axis() and self.target.have_time_axis()):
out_time_dim = out_shape[self.output.time_dim_axis]
target_time_dim = target_shape[self.target.time_dim_axis]
checks += [tf.Assert(tf.equal(out_time_dim, target_time_dim), ['Loss_debug_runtime_sanity_checks', 'time dim mismatch', 'output:', str(self.output), 'shape', out_shape, 'target:', str(self.target), 'shape', target_shape])]
if self.output.has_dynamic_size(self.output.time_dim_axis):
assert self.target.has_dynamic_size(self.target.time_dim_axis)
out_sizes = self.output.get_dynamic_size(self.output.time_dim_axis)
target_sizes = self.target.get_dynamic_size(self.target.time_dim_axis)
checks += [tf.Assert(tf.reduce_all(tf.equal(out_sizes, target_sizes)), ['Loss_debug_runtime_sanity_checks', 'dyn seq len mismatch', 'output:', str(self.output), 'shape', out_shape, 'sizes', out_sizes, 'target:', str(self.target), 'shape', target_shape, 'sizes', target_sizes], summarize=20)]
with tf.control_dependencies(checks):
if (self.target_flat is not None):
self.target_flat = tf.identity(self.target_flat)
else:
self.target = self.target.copy()
self.target.placeholder = tf.identity(self.target.placeholder)
def get_error(self):
'\n :return: frame error rate as a scalar value with the default self.reduce_func (see also self.get_value)\n :rtype: tf.Tensor\n '
with tf.name_scope('loss_frame_error'):
assert (self.output.ndim_dense == self.target.ndim_dense)
from returnn.tf.util.basic import check_input_ndim, check_shape_equal
output_flat = self.output_before_softmax_flat
if (output_flat is None):
output_flat = self.output_flat
output_flat = check_input_ndim(output_flat, ndim=2)
last_dim = (tf.rank(output_flat) - 1)
if self.target.sparse:
target_label = check_input_ndim(self.target_flat, ndim=1)
else:
target_flat = check_shape_equal(self.target_flat, output_flat)
target_label = tf.cast(tf.argmax(target_flat, axis=last_dim), tf.int32)
output_label = tf.cast(tf.argmax(output_flat, axis=last_dim), target_label.dtype)
not_equal = tf.not_equal(output_label, target_label)
return self.reduce_func(tf.cast(not_equal, tf.float32))
def get_value(self):
'\n :return: self.reduce_func(loss), which is usually a scalar with the default as if does tf.reduce_sum.\n float32 value. it should *not* be normalized over frames,\n as this will be calculated in :func:`TFEngine.Runner._collect_eval_info`.\n :rtype: tf.Tensor|None\n '
raise NotImplementedError
def get_normalization_factor(self):
'\n :return: factor as a float scalar, usually 1.0 / num_frames. see self.reduce_func.\n :rtype: tf.Tensor\n '
assert (self.loss_norm_factor is not None), 'init not called?'
return tf.convert_to_tensor(self.loss_norm_factor)
@classmethod
def get_auto_output_layer_dim(cls, target_dim):
'\n :param returnn.tensor.Dim target_dim:\n :return: normally just the same as target_dim. e.g. for CTC, we would add 1 for the blank label\n :rtype: returnn.tensor.Dim\n '
return target_dim
@classmethod
def get_default_target(cls, extern_data):
'\n :param TFNetwork.ExternData extern_data:\n :return: default target name, or None if this loss does not have a target\n :rtype: str|None\n '
if (not cls.need_target):
return None
return extern_data.default_target
|
class VariableLayer(LayerBase):
'\n Represents a variable. Can add batch/time dimension if wanted. Can be trainable.\n See defaults.\n '
layer_class = 'variable'
def __init__(self, shape, dtype='float32', add_batch_axis=False, add_time_axis=False, trainable=True, saveable=True, non_critical_for_restore=False, init=None, init_by_layer=None, param_name=None, **kwargs):
'\n :param tuple[int|Dim]|list[int|Dim] shape:\n :param str dtype:\n :param bool add_batch_axis:\n :param bool add_time_axis:\n :param bool trainable: whether it is updated by grad descent\n :param bool saveable: whether it is stored in the checkpoint\n :param bool non_critical_for_restore: if True, and it cannot be found in a checkpoint, it will not be an error\n :param str|float|int|None init: see :func:`returnn.tf.util.basic.get_initializer`. 0 by default.\n Alternatively, you can also use option `init_by_layer`.\n :param LayerBase|None init_by_layer:\n :param str|None param_name: self.name (layer name) by default\n '
shape
super(VariableLayer, self).__init__(trainable=trainable, **kwargs)
assert (not self.sources), ('%s: does not expect any sources' % self)
self.init_by_layer = init_by_layer
dim_tags = list(self.output.dim_tags)
if add_batch_axis:
assert dim_tags[0].is_batch_dim()
dim_tags = dim_tags[1:]
if add_time_axis:
assert (dim_tags[0].dimension == 1)
dim_tags = dim_tags[1:]
shape_ = [d.dimension for d in dim_tags]
assert all(shape_), self.output
with self.var_creation_scope():
if (init_by_layer is None):
if (init is None):
init = 0
initializer = tf_util.get_initializer(init, dtype=dtype, seed=self.network.random.randint((2 ** 31)), eval_local_ns={'layer': self})
else:
assert (init_by_layer is not None)
out_data_base = Tensor(name=self.output.name, dim_tags=dim_tags, dtype=dtype)
initializer = init_by_layer.output.copy_compatible_to(out_data_base).placeholder
shape_ = None
self.var = self.add_param(tf_compat.v1.get_variable(name=(param_name or self.name), shape=shape_, dtype=dtype, initializer=initializer, trainable=trainable), axes_split_info=[d.axis_split_info() for d in dim_tags], trainable=trainable, saveable=saveable, non_critical_for_restore=non_critical_for_restore)
out = self.var
if add_time_axis:
out = tf.expand_dims(out, axis=0)
if add_batch_axis:
batch_dim = self.output.get_batch_dim()
out = tf_util.expand_dims_unbroadcast(out, axis=0, dim=batch_dim)
self.output.placeholder = out
def get_dep_layers(self):
'\n :rtype: list[LayerBase]\n '
deps = super(VariableLayer, self).get_dep_layers()
if self.init_by_layer:
deps.append(self.init_by_layer)
return deps
@classmethod
def transform_config_dict(cls, d, network, get_layer):
'\n :param dict[str] d: will modify inplace\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n '
d.setdefault('from', [])
super(VariableLayer, cls).transform_config_dict(d, network=network, get_layer=get_layer)
if d.get('init_by_layer', None):
d['init_by_layer'] = get_layer(d['init_by_layer'])
@classmethod
def get_out_data_from_opts(cls, name, network, shape, dtype='float32', add_batch_axis=False, add_time_axis=False, **kwargs):
'\n :param str name:\n :param returnn.tf.network.TFNetwork network:\n :param tuple[int|Dim]|list[int|Dim] shape:\n :param str dtype:\n :param bool add_batch_axis:\n :param bool add_time_axis:\n :rtype: Tensor\n '
assert isinstance(shape, (list, tuple))
assert ((len(shape) == 0) or all(shape))
dim_tags = []
for (i, d) in enumerate(shape):
if isinstance(d, Dim):
assert (d.dimension is not None), ('%r: need static dims but got %r' % (name, d))
elif isinstance(d, int):
d = Dim(kind=(Dim.Types.Spatial if (i < (len(shape) - 1)) else Dim.Types.Feature), description=('%s:static:%i' % (name, i)), auto_generated=True, dimension=d)
else:
raise TypeError(('Layer %r: invalid type %s in shape %r' % (name, type(d), shape)))
dim_tags.append(d)
if add_time_axis:
dim_tags.insert(0, Dim(kind=Dim.Types.Time, description=('%s:dummy-time' % name), dimension=1, auto_generated=True))
if add_batch_axis:
from returnn.tensor.dim import batch_dim
dim_tags.insert(0, batch_dim)
return Tensor(name=('%s_output' % name), dim_tags=dim_tags, dtype=dtype, batch=(network.get_global_batch_info() if add_batch_axis else None))
|
class VariableAssignLayer(LayerBase):
'\n Assigns a new value to a variable.\n '
layer_class = 'variable_assign'
def __init__(self, var: LayerBase, value: LayerBase, control_dependencies: Optional[Sequence[LayerBase]]=None, op: str='assign', **kwargs):
'\n :param var:\n :param value:\n :param control_dependencies:\n :param op: "assign" or "add"\n '
super().__init__(**kwargs)
self.var = var
self.value = value
self.control_dependencies = (list(control_dependencies) if control_dependencies else [])
deps = [src.output.placeholder.op for src in self.control_dependencies]
while (not isinstance(var, VariableLayer)):
if isinstance(var, VariableAssignLayer):
deps.append(var.output.placeholder.op)
var = var.var
elif isinstance(var, VariableReadLayer):
deps.append(var.output.placeholder.op)
var = var.var
else:
raise TypeError(f'{self}: invalid var {var!r}')
assert isinstance(var, VariableLayer), f'{self}: var must be a VariableLayer, got {var}'
self.tf_var: tf.Variable = var.var
assert isinstance(self.tf_var, tf.Variable), f'{self}: var must be a tf.Variable, got {self.tf_var}'
value_data = value.output.copy_compatible_to(self.var.output)
with (tf.control_dependencies(deps) if deps else contextlib.nullcontext()):
if (op == 'assign'):
op_ = self.tf_var.assign(value_data.placeholder, read_value=False)
elif (op == 'add'):
op_ = self.tf_var.assign_add(value_data.placeholder, read_value=False)
else:
raise ValueError(f'{self}: invalid op {op!r}')
with (tf.control_dependencies([op_]) if (op_ is not None) else contextlib.nullcontext()):
self.output.placeholder = tf.zeros((), dtype='int32')
def get_dep_layers(self) -> List[LayerBase]:
'deps'
return ((super().get_dep_layers() + [self.var, self.value]) + self.control_dependencies)
@classmethod
def transform_config_dict(cls, d, network, get_layer):
'transform'
d.setdefault('from', [])
super().transform_config_dict(d, network=network, get_layer=get_layer)
d['var'] = get_layer(d['var'])
d['value'] = get_layer(d['value'])
if d.get('control_dependencies'):
d['control_dependencies'] = [get_layer(layer) for layer in d['control_dependencies']]
@classmethod
def get_out_data_from_opts(cls, name: str, var: LayerBase, **kwargs):
'out'
return Tensor(name, dims=(), dtype='int32')
|
class VariableReadLayer(LayerBase):
'\n Read a variable (currently expected from VariableLayer).\n Supports control dependencies to exactly specify when it should be read.\n '
layer_class = 'variable_read'
def __init__(self, var: LayerBase, control_dependencies: Optional[Sequence[LayerBase]]=None, **kwargs):
'\n :param var: e.g. VariableLayer\n :param control_dependencies: to control what ops must run before the var is read (e.g. assign ops)\n '
super().__init__(**kwargs)
self.var = var
self.control_dependencies = (list(control_dependencies) if control_dependencies else [])
deps = [src.output.placeholder.op for src in self.control_dependencies]
while (not isinstance(var, VariableLayer)):
if isinstance(var, VariableAssignLayer):
deps.append(var.output.placeholder.op)
var = var.var
elif isinstance(var, VariableReadLayer):
deps.append(var.output.placeholder.op)
var = var.var
else:
raise TypeError(f'{self}: invalid var {var!r}')
assert isinstance(var, VariableLayer), f'{self}: var must be a VariableLayer, got {var}'
self.tf_var: tf.Variable = var.var
assert isinstance(self.tf_var, tf.Variable), f'{self}: var must be a tf.Variable, got {self.tf_var}'
with (tf.control_dependencies(deps) if deps else contextlib.nullcontext()):
self.output.placeholder = self.tf_var.read_value()
def get_dep_layers(self) -> List[LayerBase]:
'deps'
return ((super().get_dep_layers() + [self.var]) + self.control_dependencies)
@classmethod
def transform_config_dict(cls, d, network, get_layer):
'transform'
d.setdefault('from', [])
super().transform_config_dict(d, network=network, get_layer=get_layer)
d['var'] = get_layer(d['var'])
if d.get('control_dependencies'):
d['control_dependencies'] = [get_layer(layer) for layer in d['control_dependencies']]
@classmethod
def get_out_data_from_opts(cls, name: str, var: LayerBase, **kwargs):
'out'
return var.output.copy_template(name=('%s_output' % name))
|
class DataNotFound(Exception):
'\n When accessing non-existing data key in :class:`ExternData` (e.g. extern_data).\n '
|
class ExternData(TensorDict):
'\n This holds :class:`Data` instances for every data-key of external data from the dataset,\n i.e. the description such as shape and sparsity, etc.\n\n It is usually defined by a user config. See :func:`init_from_config`.\n '
def __init__(self, data=None, default_input='data', default_target='classes'):
'\n :param None|dict[str,dict[str]] data: optional init kwargs for Data\n '
super().__init__()
self._config = None
self._batch_info = None
self.default_input = default_input
self.default_target = default_target
self.extra_added_keys = set()
if data:
for (key, value) in data.items():
self.data[key] = Tensor(name=key, auto_create_placeholders=True, **value)
self.init_batch_info()
def __repr__(self):
return ('<ExternData data=%r>' % self.data)
def init_from_config(self, config, auto_create_placeholders=False, reset_batch=True):
'\n It reads ``extern_data`` from the config,\n which defines the :class:`Data` instance options to be created.\n\n :param returnn.config.Config config:\n :param bool auto_create_placeholders:\n :param bool reset_batch:\n '
from returnn.tensor.dim import batch_dim
if reset_batch:
batch_dim.batch = None
self._config = config
data_dims = _extern_data_types_from_config(config)
for (key, init_args) in data_dims.items():
if reset_batch:
init_args = init_args.copy()
if (init_args.get('dim_tags') or init_args.get('dims')):
for tag in (init_args.get('dim_tags') or init_args.get('dims')):
assert isinstance(tag, Dim), f'invalid Tensor opts {init_args!r} for extern_data key {key!r}'
tag.reset_batch_ctx()
self.data[key] = Tensor(name=key, auto_create_placeholders=auto_create_placeholders, **init_args)
self.default_input = config.value('default_input', 'data')
self.default_target = config.value('target', 'classes')
any_available_for_inference = any((data.available_for_inference for data in self.data.values()))
if (not any_available_for_inference):
for (key, data) in self.data.items():
if (key != self.default_target):
data.available_for_inference = True
self.init_batch_info()
def init_from_dataset(self, dataset, auto_create_placeholders=True):
'\n :param returnn.datasets.Dataset dataset:\n :param bool auto_create_placeholders:\n '
target_keys = list(dataset.get_target_list())
if target_keys:
if ('classes' in target_keys):
self.default_target = 'classes'
else:
self.default_target = target_keys[0]
data_keys = list(dataset.get_data_keys())
input_keys = [key for key in data_keys if (key not in target_keys)]
if input_keys:
if ('data' in input_keys):
self.default_input = 'data'
else:
self.default_input = input_keys[0]
for key in data_keys:
self.data[key] = Tensor(name=key, auto_create_placeholders=auto_create_placeholders, **_data_kwargs_from_dataset_key(dataset=dataset, key=key))
self.init_batch_info()
def init_batch_info(self):
'\n Initializes and sets the batch info on the extern data,\n i.e. sets ``Data.batch``.\n See :class:`BatchInfo`.\n '
from returnn.tf.util.data import BatchInfo
from returnn.tf.util.data import batch_dim as global_batch_dim_tag
batch_info = self._batch_info
if (not batch_info):
for (key, data) in self.get_sorted_data_items():
assert isinstance(data, Tensor)
if (data.available_for_inference and data.batch and data.batch.is_global_batch()):
batch_info = data.batch
break
if ((not batch_info) or (batch_info.static_dim == (- 1))):
batch_dim_value = None
for (key, data) in self.get_sorted_data_items():
assert isinstance(data, Tensor)
if (not data.available_for_inference):
continue
if (not data.have_batch_axis()):
continue
if (data.placeholder is None):
continue
if data.beam:
continue
batch_dim = data.get_batch_dim_tag()
if ((batch_dim.dimension is not None) and (batch_dim.dimension > 0)):
batch_dim_value = batch_dim.dimension
break
with tf_util.reuse_name_scope_of_tensor(data.placeholder):
for dim in data.dims:
if (dim.dyn_size_ext and (global_batch_dim_tag in dim.dyn_size_ext.dims)):
if (dim.dyn_size_ext.raw_tensor is not None):
batch_dim_value = tf_util.get_shape_dim(dim.dyn_size_ext.raw_tensor, dim.dyn_size_ext.dims.index(global_batch_dim_tag), name='batch_dim')
break
if (batch_dim_value is None):
batch_dim_value = tf_util.get_shape_dim(data.placeholder, data.batch_dim_axis, name='batch_dim')
break
with reuse_name_scope('extern_data/placeholders', absolute=True):
if (batch_dim_value is None):
batch_dim_value = tf_compat.v1.placeholder(tf.int32, shape=(), name='batch_dim')
elif isinstance(batch_dim_value, int):
pass
else:
batch_dim_value = tf.identity(batch_dim_value, name='batch_dim')
if (not batch_info):
batch_info = BatchInfo.make_global_batch_info(batch_dim=batch_dim_value)
else:
batch_info.dim = batch_dim_value
self._batch_info = batch_info
global_batch_dim_tag.batch = batch_info
for data in self.data.values():
if (data.beam or (data.batch and (not data.batch.is_global_batch()))):
continue
for tag in (data.dim_tags + tuple((tag.get_same_base() for tag in data.dim_tags))):
tag._validate_in_current_graph()
tag._maybe_update()
if (tag.dyn_size_ext and (tag.dyn_size_ext.placeholder is not None) and (not tag.batch) and (not tag.dyn_size_ext.batch) and (global_batch_dim_tag in tag.dyn_size_ext.dims)):
tag.dyn_size_ext.batch = batch_info
tag.batch = batch_info
tag._maybe_update()
data.batch = batch_info
data.sanity_check()
def check_matched_dataset(self, dataset, used_data_keys=None):
'\n :param Dataset.Dataset dataset:\n :param set[str]|list[str] used_data_keys:\n :return: nothing, will assert the check\n '
if (used_data_keys is None):
used_data_keys = dataset.get_data_keys()
base_err_msg = ('%r num_outputs %r vs %r' % (dataset, dataset.num_outputs, self))
for key in sorted(used_data_keys):
if (key in ['seq_idx', 'seq_tag']):
continue
if (key in self.extra_added_keys):
continue
data = self.data[key]
data_sparse = dataset.is_data_sparse(key)
assert ((data.sparse == data_sparse) or (data.dim is None)), ('key %r sparse mismatch. %s' % (key, base_err_msg))
data_dtype = dataset.get_data_dtype(key)
assert (data.dtype == data_dtype), ('key %r dtype mismatch. %s' % (key, base_err_msg))
data_dim = dataset.get_data_dim(key)
assert ((data.dim == data_dim) or (data.dim is None)), ('key %r dim mismatch. %s' % (key, base_err_msg))
data_shape = tuple(dataset.get_data_shape(key))
assert (data.shape[1:] == data_shape), ('key %r shape mismatch. %s' % (key, base_err_msg))
def register_data(self, data):
'\n :param Data data: will use data.name as the key\n '
assert (data.name not in self.data)
self.data[data.name] = data
self.init_batch_info()
def has_data(self, name):
'\n :param str name:\n :rtype: bool\n '
return (name in self.data)
def get_data(self, name: str) -> Tensor:
'\n :param name: key\n '
try:
return self.data[name]
except KeyError:
config_extern_data = '<unknown>'
if (self._config and self._config.has('extern_data')):
config_extern_data = self._config.opt_typed_value('extern_data')
raise DataNotFound(('ExternData: unknown key %r. available keys: %s. config: %s' % (name, list(self.data.keys()), config_extern_data)))
def get_default_input_data(self) -> Tensor:
'\n :return: default input data\n '
return self.data[self.default_input]
def get_default_target_data(self) -> Tensor:
'\n :return: default target data\n '
return self.data[self.default_target]
def get_data_description(self):
'\n :return: str describing the data\n :rtype: str\n '
return ', '.join([('%s: %s' % (name, self.data[name].get_description(with_name=False))) for name in self.data.keys()])
def get_sorted_data_items(self):
'\n :rtype: list[(str,Data)]\n '
keys = sorted(self.data.keys())
if (self.default_input in self.data):
keys.remove(self.default_input)
keys.insert(0, self.default_input)
return [(key, self.data[key]) for key in keys]
def get_all_dimension_tags(self, allow_same_feature_dim=False):
'\n :param bool allow_same_feature_dim:\n :rtype: list[Dim]\n '
(tags, _) = Dim.get_all_dimension_tags([data for (_, data) in self.get_sorted_data_items()], dict(allow_same_feature_dim=allow_same_feature_dim))
return tags
def set_batch_info(self, batch_info, *, init_batch_info: bool=True):
'\n :param returnn.tf.util.data.BatchInfo batch_info:\n :param init_batch_info: calls :func:`init_batch_info`, which might further initialize/modify the batch info\n '
from returnn.tf.util.data import BatchInfo
assert isinstance(batch_info, BatchInfo)
self._batch_info = batch_info
if init_batch_info:
self.init_batch_info()
def get_batch_info(self, allow_none=False):
'\n :param bool allow_none:\n :rtype: returnn.tf.util.data.BatchInfo|None\n '
if self._batch_info:
return self._batch_info
for (key, data) in self.get_sorted_data_items():
assert isinstance(data, Tensor)
if (data.available_for_inference and data.have_batch_axis()):
assert data.batch
return data.batch.get_global_base()
if allow_none:
return None
raise Exception('We cannot tell the batch dim.')
|
def _extern_data_types_from_config(config):
'\n :param returnn.config.Config config:\n :return: dict data_key -> kwargs of Data\n :rtype: dict[str,dict[str]]\n '
input_data_key = config.value('default_input', 'data')
if config.has('extern_data'):
data_dims = config.typed_dict['extern_data']
if isinstance(data_dims, TensorDict):
res = {}
for (k, v) in data_dims.data.items():
kwargs = v.get_kwargs()
kwargs.pop('name')
res[k] = kwargs
return res
assert isinstance(data_dims, dict), 'extern_data in config must be a dict'
if (config.has('num_inputs') or config.has('num_outputs')):
print('Warning: Using extern_data and will ignore num_inputs/num_outputs in config.', file=log.v2)
else:
log.print_deprecation_warning('Using num_inputs/num_outputs instead of extern_data is deprecated and might be removed in future versions')
(num_inputs, num_outputs) = _num_inputs_outputs_from_config(config)
data_dims = num_outputs.copy()
sparse_input = config.bool('sparse_input', False)
data_dims.setdefault(input_data_key, (num_inputs, (1 if sparse_input else 2)))
data = {}
for (key, data_type) in data_dims.items():
if isinstance(data_type, dict):
data[key] = data_type.copy()
continue
assert isinstance(data_type, (list, tuple))
(dim, ndim) = data_type
init_args = {'dim': dim}
if (ndim == 1):
init_args['shape'] = (None,)
init_args['sparse'] = True
elif (ndim == 2):
init_args['shape'] = (None, dim)
else:
assert (ndim >= 3)
init_args['shape'] = (((None,) * (ndim - 1)) + (dim,))
data[key] = init_args
for (key, v) in data.items():
if (key == input_data_key):
v.setdefault('available_for_inference', True)
else:
v.setdefault('available_for_inference', False)
return data
|
def _num_inputs_outputs_from_config(config):
'\n :type config: returnn.config.Config\n :returns (num_inputs, num_outputs),\n where num_inputs is like num_outputs["data"][0],\n and num_outputs is a dict of data_key -> (dim, ndim),\n where data_key is e.g. "classes" or "data",\n dim is the feature dimension or the number of classes,\n and ndim is the ndim counted without batch-dim,\n i.e. ndim=1 means usually sparse data and ndim=2 means dense data.\n :rtype: (int,dict[str,(int,int)])\n '
num_inputs = config.int('num_inputs', 0)
target = config.value('target', 'classes')
if config.is_typed('num_outputs'):
num_outputs = config.typed_value('num_outputs')
if (not isinstance(num_outputs, dict)):
num_outputs = {target: num_outputs}
num_outputs = num_outputs.copy()
from returnn.datasets.basic import convert_data_dims
num_outputs = convert_data_dims(num_outputs, leave_dict_as_is=True)
if ('data' in num_outputs):
num_inputs = num_outputs['data']
if isinstance(num_inputs, (list, tuple)):
num_inputs = num_inputs[0]
elif isinstance(num_inputs, dict):
if ('dim' in num_inputs):
num_inputs = num_inputs['dim']
else:
num_inputs = num_inputs['shape'][(- 1)]
else:
raise TypeError(('data key %r' % num_inputs))
elif config.has('num_outputs'):
num_outputs = {target: [config.int('num_outputs', 0), 1]}
else:
num_outputs = None
dataset = None
if (config.list('train') and (':' not in config.value('train', ''))):
dataset = config.list('train')[0]
if ((not config.is_typed('num_outputs')) and dataset and isinstance(dataset, str)):
from returnn.util.basic import hdf5_dimension, hdf5_group, hdf5_shape
try:
_num_inputs = (hdf5_dimension(dataset, 'inputCodeSize') * config.int('window', 1))
except Exception:
_num_inputs = (hdf5_dimension(dataset, 'inputPattSize') * config.int('window', 1))
try:
_num_outputs = {target: [hdf5_dimension(dataset, 'numLabels'), 1]}
except Exception:
_num_outputs = hdf5_group(dataset, 'targets/size')
for k in _num_outputs:
_num_outputs[k] = [_num_outputs[k], len(hdf5_shape(dataset, ('targets/data/' + k)))]
if num_inputs:
assert (num_inputs == _num_inputs)
if num_outputs:
assert (num_outputs == _num_outputs)
num_inputs = _num_inputs
num_outputs = _num_outputs
assert (num_inputs and num_outputs), 'provide extern_data in the config'
return (num_inputs, num_outputs)
|
def _data_kwargs_from_dataset_key(dataset, key):
'\n :param returnn.datasets.basic.Dataset dataset:\n :param str key:\n :rtype: dict[str]\n '
if (key in dataset.get_target_list()):
available_for_inference = False
else:
available_for_inference = True
dim = dataset.get_data_dim(key)
shape = ([None] + list(dataset.get_data_shape(key)))
sparse = dataset.is_data_sparse(key)
dtype = dataset.get_data_dtype(key)
if ((not sparse) and (shape[(- 1)] is None)):
dim = None
return dict(batch_dim_axis=0, time_dim_axis=1, shape=shape, dim=dim, sparse=sparse, dtype=dtype, available_for_inference=available_for_inference)
|
class _NetworkConstructionStack():
'\n Used to keep the recursive construction state of :function:`TFNetwork.construct_layer`.\n '
def __init__(self):
self.layers = []
self.in_flat_construct_count = 0
def append(self, layer_name):
'\n :param str layer_name:\n '
assert (layer_name not in self.layers)
self.layers.append(layer_name)
def remove(self, layer_name):
'\n :param str layer_name:\n '
self.layers.remove(layer_name)
def flat_construct(self, initial):
'\n :param _DelayedConstructionException initial:\n '
self.in_flat_construct_count += 1
queue = [initial]
try:
while queue:
try:
res = queue[(- 1)].delayed_construction()
if (queue[(- 1)] is initial):
return res
queue.pop((- 1))
except _DelayedConstructionException as delayed_exc:
queue.append(delayed_exc)
finally:
self.in_flat_construct_count -= 1
assert False, 'we should not get here'
|
class TFNetwork(object):
'\n The main neural network, i.e. collection of interconnected layers, i.e. computation graph with trainable params.\n '
def __init__(self, config=None, extern_data=None, rnd_seed=None, train_flag=None, eval_flag=None, search_flag=None, parent_layer=None, parent_net=None, extra_parent_net=None, extra_name_prefix=None, inside_rec_time_dim=None, over_rec_time_dim=None, over_rec_time_dim_subs=None, control_flow_ctx=None, absolute_name_prefix=None, name=''):
'\n :param returnn.config.Config config: only needed to init extern_data if not specified explicitly\n :param ExternData|None extern_data:\n :param int|None rnd_seed:\n :param bool|tf.Tensor train_flag: True if we want to use this model in training, False if in eval, or dynamic\n :param bool eval_flag: whether to calculate losses. if train_flag is not False, this will be set to True\n :param bool search_flag: whether we perform a beam-search. see usage\n :param returnn.tf.layers.base.LayerBase|None parent_layer:\n :param TFNetwork|None parent_net:\n :param TFNetwork|None extra_parent_net: we are on the same level (not really a child),\n but an "extra" net of extra_parent_net\n :param str|None extra_name_prefix:\n :param Dim|None inside_rec_time_dim: dim tag of outer rec layer, when run inside the loop (not optimized)\n :param Dim|None over_rec_time_dim: dim tag of outer rec layer, when optimized out of the loop\n :param set[Dim]|None over_rec_time_dim_subs: outer rec layer, out of loop, potential shorter\n :param returnn.tf.util.data.ControlFlowContext control_flow_ctx:\n :param str|None absolute_name_prefix: this is for representation\n :param str name: only for debugging\n '
self.name = name
if absolute_name_prefix:
assert absolute_name_prefix.endswith('/')
self._absolute_name_prefix = absolute_name_prefix
if ((not parent_layer) and extra_parent_net):
parent_layer = extra_parent_net.parent_layer
if ((not parent_net) and extra_parent_net):
parent_net = extra_parent_net.parent_net
if ((not parent_net) and parent_layer):
parent_net = parent_layer.network
base_net = (parent_net or extra_parent_net)
if (not base_net):
LayerBase.get_global_layer_list()[:] = []
if config:
from returnn.config import set_global_config
set_global_config(config)
if ((not config) and base_net):
config = base_net._config
if (extern_data is None):
if extra_parent_net:
extern_data = extra_parent_net.extern_data
elif parent_net:
extern_data = ExternData()
else:
extern_data = ExternData()
if (not config):
from returnn.config import get_global_config
config = get_global_config()
extern_data.init_from_config(config, auto_create_placeholders=True)
self.extern_data = extern_data
self._config = config
self.used_data_keys = set()
if (rnd_seed is None):
rnd_seed = (base_net.random.randint((2 ** 31)) if base_net else 42)
self.rnd_seed = rnd_seed
self.random = numpy.random.RandomState(rnd_seed)
if (train_flag is None):
train_flag = (base_net.train_flag if base_net else False)
assert isinstance(train_flag, (bool, tf.Tensor))
self.train_flag = train_flag
if (eval_flag is None):
eval_flag = (base_net.eval_flag if base_net else False)
assert isinstance(eval_flag, bool)
if (train_flag is not False):
eval_flag = True
self.eval_flag = eval_flag
if (search_flag is None):
search_flag = (base_net.search_flag if base_net else False)
self.search_flag = search_flag
self.parent_layer = parent_layer
self.parent_net = parent_net
self._inside_rec_time_dim = inside_rec_time_dim
self._over_rec_time_dim = over_rec_time_dim
self._over_rec_time_dim_subs = over_rec_time_dim_subs
self.control_flow_ctx = control_flow_ctx
self.extra_parent_net = extra_parent_net
self.extra_name_prefix = extra_name_prefix
self.extra_deps_in_extra = False
self.extra_only_template = False
self.is_root_in_ctx = (not parent_net)
self.extra_nets = {}
self.subnets = {}
self._selected_train_layers = None
self._construction_stack = _NetworkConstructionStack()
self.layers_desc = {}
self.layers = {}
self.losses_dict = {}
self.total_loss = None
self.total_constraints = None
self.total_objective = None
self._global_train_step = None
self._global_train_step_var = None
self.epoch_step = None
self.saver = None
self.extra_vars_to_save = []
self.recurrent = False
self._assigner_cache = {}
self.concat_sources_dropout_cache = {}
self._merge_all_summaries = None
self._graph_reset_callbacks = []
self._run_opts = {}
self._run_finished_callbacks = []
self._map_search_beam_to_search_choices = {}
def __repr__(self):
s = ('TFNetwork %r' % self.name)
if self.parent_layer:
s += (' parent_layer=%r' % self.parent_layer)
elif self.parent_net:
s += (' parent_net=%r' % self.parent_net)
if self.extra_nets:
s += (' extra_nets=%r' % self.extra_nets)
if (self.train_flag is True):
s += ' train'
elif (self.train_flag is not None):
s += (' train=%r' % self.train_flag)
if self.search_flag:
s += ' search'
return ('<%s>' % s)
def get_network_hierarchy(self):
'\n :return: list of all networks in the hierarchy, including self.\n '
net = self
ret = []
while net:
ret.append(net)
while net.extra_parent_net:
net = net.extra_parent_net
net = net.parent_net
ret.reverse()
return ret
def get_root_network(self):
'\n :rtype: TFNetwork\n '
if self.parent_net:
return self.parent_net.get_root_network()
if self.extra_parent_net:
return self.extra_parent_net.get_root_network()
return self
def get_root_ctx_network(self):
'\n :return: in contrast to :func:`get_root_network`, stop where we have ``is_root_in_ctx`` set,\n and return that network, together with the prefix\n :rtype: (TFNetwork, str)\n '
path = []
net = self
while True:
if net.is_root_in_ctx:
break
if net.extra_parent_net:
path.append((net.extra_name_prefix + ':'))
net = net.extra_parent_net
continue
if net.parent_net:
if net.parent_layer:
path.append((net.parent_layer.name + '/'))
net = net.parent_net
continue
break
return (net, ''.join(reversed(path)))
def get_control_flow_ctx(self):
'\n :rtype: returnn.tf.util.data.ControlFlowContext|None\n '
net = self
while net:
if net.control_flow_ctx:
return net.control_flow_ctx
net = net.parent_net
return None
def is_extra_internal_template_construction(self):
'\n :rtype: LayerBase|None\n '
(net, _) = self.get_root_ctx_network()
return (net.extra_parent_net and net.extra_only_template)
def get_absolute_name_scope_prefix(self):
'\n :return: TF scope name, always with "/" at the end, or ""\n :rtype: str\n '
if self.parent_layer:
return self.parent_layer.get_absolute_name_scope_prefix()
if self.parent_net:
return self.parent_net.get_absolute_name_scope_prefix()
if self.extra_parent_net:
return self.extra_parent_net.get_absolute_name_scope_prefix()
return ''
def get_absolute_name_prefix(self):
'\n :return: name, always with "/" at the end, or "". This is for representation.\n See also :func:`get_absolute_name_scope_prefix`.\n :rtype: str\n '
if (self._absolute_name_prefix is not None):
return self._absolute_name_prefix
if self.parent_layer:
return (self.parent_layer.get_absolute_name() + '/')
if self.parent_net:
return self.parent_net.get_absolute_name_prefix()
if self.extra_parent_net:
prefixes = {net: prefix for (prefix, net) in self.extra_parent_net.extra_nets.items()}
my_prefix = (('%s:' % prefixes[self]) if (self in prefixes) else '')
return (self.extra_parent_net.get_absolute_name_prefix() + my_prefix)
return ''
def construct_from_dict(self, net_dict, get_layer=None):
'\n :param dict[str,dict[str]] net_dict:\n :param GetLayer|((str)->LayerBase)|None get_layer:\n '
self.layers_desc.update(net_dict)
def ignore_layer(name_, layer_desc_):
'\n :param str name_:\n :param dict layer_desc_:\n :rtype: bool\n '
assert isinstance(name_, str)
if name_.startswith('#'):
return True
assert isinstance(layer_desc_, dict)
if (layer_desc_.get('only_on_search') and (not self.search_flag)):
return True
if (layer_desc_.get('only_on_eval') and (not self.eval_flag)):
return True
return False
for (name, layer_desc) in sorted(net_dict.items()):
if ignore_layer(name, layer_desc):
continue
if layer_desc.get('register_as_extern_data'):
self.construct_layer(net_dict, name, get_layer=get_layer)
for (name, layer_desc) in sorted(net_dict.items()):
if ignore_layer(name, layer_desc):
continue
if ((name == 'output') or name.endswith(':output') or layer_desc.get('loss', None) or layer_desc.get('is_output_layer', False)):
self.construct_layer(net_dict, name, get_layer=get_layer)
for (name, subnet) in sorted(self.subnets.items()):
assert isinstance(subnet, Subnetwork)
subnet.complete_construction_parent_subnet_layer(parent_get_layer=get_layer)
self.layers.update({('%s/%s' % (subnet.name_in_parent, sub_name)): sub_layer for (sub_name, sub_layer) in subnet.net.layers.items()})
assert (not self._construction_stack.layers)
_extra_layer_name_prefix_pattern = re.compile('^(extra(\\.[A-Za-z0-9_.()]+)?):')
def _get_extra_net(self, search_flag=None, net_name=None, prefix_name=None, auto_create=True, boundary=False):
'\n See :func:`construct_extra_net` and :func:`make_extra_net`.\n\n :param bool|None search_flag:\n :param str|None net_name:\n :param str|None prefix_name: e.g. "extra.search" or "extra.WhateverYouWant" or just "extra"\n :param bool auto_create:\n :param bool boundary: implies that other extra / non-extra networks cannot directly access this,\n and also that this is never shared\n :return: (net, prefix_name)\n :rtype: (TFNetwork|None,str)\n '
if ((search_flag is None) and prefix_name):
search_flag = ('.search' in prefix_name)
if (not prefix_name):
assert (search_flag is not None)
prefix_name = ('extra.search' if search_flag else 'extra')
if (prefix_name and (not net_name)):
net_name = ('%s(%s)' % (self.name, prefix_name))
assert (not self.extra_parent_net)
if (prefix_name not in self.extra_nets):
if (not auto_create):
return (None, prefix_name)
extra_net = TFNetwork(config=self._config, extern_data=self.extern_data, name=net_name, rnd_seed=self.random.randint((2 ** 31)), train_flag=self.train_flag, eval_flag=self.eval_flag, search_flag=(search_flag if (search_flag is not None) else self.search_flag), extra_parent_net=self, extra_name_prefix=prefix_name)
if boundary:
extra_net.is_root_in_ctx = True
else:
self.extra_nets[prefix_name] = extra_net
else:
assert (not boundary)
extra_net = self.extra_nets[prefix_name]
assert (extra_net.extra_parent_net is self)
if (search_flag is not None):
assert (extra_net.search_flag == search_flag)
return (extra_net, prefix_name)
def make_extra_net(self, prefix_name, net_name=None, only_template=False, boundary=False):
'\n See :func:`construct_extra_net`.\n\n With boundary=False, it is accessible from outside via the "extra...:" layer name prefix,\n and registered in main_net.extra_nets.\n With boundary=True, it is not accessible from outside,\n and not registered in main_net.extra_nets.\n\n :param str prefix_name: "extra.Whatever"\n :param str|None net_name:\n :param bool only_template:\n :param bool boundary:\n :rtype: TFNetwork\n '
assert self._extra_layer_name_prefix_pattern.match((prefix_name + ':'))
base_net = (self.extra_parent_net or self)
(net, _) = base_net._get_extra_net(search_flag=self.search_flag, prefix_name=prefix_name, net_name=net_name, boundary=boundary)
if only_template:
assert boundary
net.extra_only_template = True
return net
def construct_extra_net(self, net_dict, layer_list, search_flag=None, dep_layers_in_extra=False, check_existing=False, net_name=None, prefix_name=None, base_get_layer=None, base_add_layer=None):
'\n The purpose is to create another net like `self` but with different flags,\n e.g. with `search_flag = True`.\n That `extra_net` can have different losses, which will be added.\n Layers in ``layer_list`` will be explicitly re-created in the extra net.\n Other layers are taken from ``self``.\n An extra net is like an overlay over the main net.\n\n The creation of the extra net and layers in the extra net can be triggered explicitly\n by referring to another layer as e.g. ``"extra.search:layer"``.\n When done this way, all the dependencies of it are created in self again;\n unless you explicitly have called another layer like ``"extra.search:dep"``.\n See :func:`test_extra_search` for an example.\n\n :param dict[str,dict[str]] net_dict:\n :param list[str] layer_list:\n :param bool|None search_flag:\n :param bool dep_layers_in_extra: layers not in layer_list, but which are not yet created,\n will be part of the extra net, not self.\n :param bool check_existing:\n :param str|None net_name:\n :param str|None prefix_name: e.g. "extra.search", such that layers would be called like "extra.search:layer"\n :param base_get_layer: like in construct_layer\n :param base_add_layer: like in construct_layer\n :return: the layers created via layer_list (all in extra net)\n :rtype: list[LayerBase]\n '
assert (not self.extra_parent_net)
(extra_net, prefix_name) = self._get_extra_net(search_flag=search_flag, net_name=net_name, prefix_name=prefix_name)
extra_net.layers_desc.update(net_dict)
if dep_layers_in_extra:
extra_net.extra_deps_in_extra = True
if (not base_get_layer):
def base_get_layer(src_name):
'\n :param str src_name:\n :rtype: LayerBase\n '
return self.construct_layer(net_dict=net_dict, name=src_name, get_layer=get_layer, add_layer=base_add_layer)
if (not base_add_layer):
base_add_layer = self.add_layer
def get_layer(src_name):
'\n :param str src_name:\n :rtype: LayerBase\n '
if self._extra_layer_name_prefix_pattern.match(src_name):
return base_get_layer(src_name)
explicit_extra_layer_name = ('%s:%s' % (prefix_name, src_name))
if (explicit_extra_layer_name.split('/', 1)[0] in net_dict):
return base_get_layer(explicit_extra_layer_name)
if dep_layers_in_extra:
return base_get_layer(explicit_extra_layer_name)
return base_get_layer(src_name)
created_layers = []
for layer_name in layer_list:
if (not self._extra_layer_name_prefix_pattern.match(layer_name)):
layer_name = ('%s:%s' % (prefix_name, layer_name))
created_layers.append(extra_net.construct_layer(net_dict=net_dict, name=layer_name, check_existing=check_existing, get_layer=get_layer, add_layer=base_add_layer))
if extra_net.recurrent:
self.recurrent = True
self.used_data_keys.update(extra_net.used_data_keys)
return created_layers
def _flat_construction_enabled(self):
'\n :return: whether to use flat construction algorithm in :func:`construct_layer`.\n Use this if you get stack overflow errors, such as:\n ``Fatal Python error: Cannot recover from stack overflow``\n or\n ``RuntimeError: maximum recursion depth exceeded``.\n :rtype: bool\n '
return self.get_config().bool('flat_net_construction', False)
def construct_layer(self, net_dict, name, get_layer=None, add_layer=None, check_existing=True):
'\n This triggers the construction of the layer `name` if it is not constructed yet.\n Every construction trigger corresponds to ``add_layer`` call (which by default does the actual construction).\n This can recursively also get/construct other layers (via ``get_layer``).\n\n :param dict[str,dict[str]] net_dict:\n :param str name: layer name\n :param GetLayer|((str)->LayerBase)|None get_layer: optional, for source layers, for transform_config_dict.\n By default, this wraps self.construct_layer().\n I.e. the name might be misleading, as this should return an existing layer,\n or construct it if it does not exist yet.\n\n Note on custom nested/wrapped get_layer:\n This is tricky. When an outer get_layer calls an inner get_layer,\n then the inner get_layer might construct the layer,\n and this construction never can get back to the outer get_layer again.\n This is fine when this is anyway not allowed\n (e.g. to "base:...", where the base net is not allowed to access this parent net).\n But otherwise, this is not an option!\n\n :param ((str, LayerBase, dict) -> LayerBase) | None add_layer: by default self.add_layer\n :param bool check_existing: check self.get_layer. (self.layers will be checked in any case)\n :rtype: LayerBase\n '
if (name in self.layers):
return self.layers[name]
if (check_existing and (name != 'data') and (not name.startswith('data:'))):
try:
return self.get_layer(name)
except (LayerNotFound, DataNotFound):
pass
if (not get_layer):
get_layer = GetLayer(network=self, add_layer_func=add_layer, net_dict=net_dict)
full_name = name
sub_layer_name = None
if ('/' in name):
(name, sub_layer_name) = name.split('/', 1)
layer_desc = None
extra_prefix = None
if self._extra_layer_name_prefix_pattern.match(name):
(extra_prefix, name_) = name.split(':', 1)
if self.extra_parent_net:
(extra_net, _) = self.extra_parent_net._get_extra_net(prefix_name=extra_prefix, auto_create=False)
if (extra_net is not self):
return self.extra_parent_net.construct_extra_net(net_dict=net_dict, layer_list=[full_name], prefix_name=extra_prefix, check_existing=check_existing, base_get_layer=get_layer, base_add_layer=add_layer)[0]
if (name in net_dict):
layer_desc = net_dict[name]
name = name_
else:
return self.construct_extra_net(net_dict=net_dict, layer_list=[full_name], prefix_name=extra_prefix, check_existing=check_existing, base_get_layer=get_layer, base_add_layer=add_layer)[0]
elif self.extra_parent_net:
extra_prefix = self.extra_name_prefix
explicit_extra_layer_name = ('%s:%s' % (self.extra_name_prefix, name))
if (explicit_extra_layer_name in net_dict):
layer_desc = net_dict[explicit_extra_layer_name]
else:
return self.extra_parent_net.construct_layer(self.extra_parent_net.layers_desc, name=full_name, get_layer=get_layer, add_layer=add_layer, check_existing=check_existing)
if (not layer_desc):
if (name not in net_dict):
if (name == 'data'):
layer_desc = {'class': 'source'}
elif name.startswith('data:'):
layer_desc = {'class': 'source', 'data_key': name[len('data:'):]}
elif (name == ':i'):
layer_desc = {'class': ':i'}
else:
layer_desc = net_dict[name]
if (not layer_desc):
raise LayerNotFound(('layer %r not found in %r' % (name, self)), layer_name=full_name, network=self, net_dict=net_dict)
if (not add_layer):
add_layer = self.add_layer
net = self
class_name = layer_desc['class']
layer_class = get_layer_class(class_name)
base_name = name
while True:
subnet = layer_class.cls_get_sub_network(name=base_name, network=net, layer_desc=layer_desc)
if ((not subnet) and (base_name in net.subnets)):
subnet = net.subnets[base_name]
if subnet:
if (not sub_layer_name):
break
if ('/' in sub_layer_name):
(base_name, sub_layer_name) = sub_layer_name.split('/', 1)
else:
(base_name, sub_layer_name) = (sub_layer_name, None)
get_layer = subnet.get_sub_layer_func(get_layer)
net = subnet.net
name = ((name + '/') + base_name)
if (not subnet.have_layer(base_name)):
raise LayerNotFound(('sub-layer %r not found in %r' % (base_name, net)), layer_name=full_name, network=self)
layer_desc = subnet.get_layer_desc(base_name)
layer_class = subnet.get_layer_class(base_name)
continue
if (not sub_layer_name):
break
root_layer = get_layer(base_name)
sub_layer = root_layer.get_sub_layer(sub_layer_name)
if (not sub_layer):
raise LayerNotFound(('sub-layer %r not found in %r' % (sub_layer_name, root_layer)), layer_name=full_name, network=self)
return sub_layer
if self._flat_construction_enabled():
delayed_exc = _DelayedConstructionException(network=self, layer_name=name, other_kwargs=dict(net_dict=net_dict, get_layer=get_layer, add_layer=add_layer, check_existing=check_existing))
if (not self._construction_stack.in_flat_construct_count):
return self._construction_stack.flat_construct(delayed_exc)
if self._construction_stack.layers:
raise delayed_exc
layer_desc = layer_desc.copy()
layer_desc.pop('class')
layer_desc['_network'] = net
layer_desc['_name'] = base_name
name_with_prefix = (('%s:%s' % (extra_prefix, name)) if extra_prefix else name)
if (name_with_prefix in self._construction_stack.layers):
raise NetworkConstructionDependencyLoopException(layer_name=name_with_prefix, constructing_layers=self._construction_stack.layers, net_dict=net_dict, network=self)
self._construction_stack.append(name_with_prefix)
try:
layer_class.transform_config_dict(layer_desc, network=net, get_layer=get_layer)
finally:
self._construction_stack.remove(name_with_prefix)
return add_layer(name=name_with_prefix, layer_class=layer_class, **layer_desc)
def _create_layer_layer_desc(self, name, layer_desc, template=True):
'\n This is called *after* :func:`LayerBase.transform_config_dict`\n and *before* :func:`LayerBase.get_out_data_from_opts`.\n\n :param str name: layer name\n :param dict[str] layer_desc: opts\n :param bool template: for template inference, we do not need the full logic\n :rtype: dict[str]\n '
from returnn.tf.layers.basic import SearchChoices
if (not template):
layer_desc = SearchChoices.translate_to_common_search_beam(layer_desc)
layer_desc = layer_desc.copy()
assert ('name' not in layer_desc)
assert ('network' not in layer_desc)
layer_desc['name'] = name
layer_desc['network'] = self
return layer_desc
def _create_layer(self, name, layer_class, **layer_desc):
'\n This will create the layer given the layer_desc arguments.\n\n :param str name:\n :param (()->LayerBase)|LayerBase|type[LayerBase] layer_class:\n :param layer_desc: contains the kwargs for the layer class.\n the args should have been transformed via layer_class.transform_config_dict before (see construct_layer).\n must not contain "name" and "network", which will be automatically added here.\n should not contain "output", which will be initialized to layer_class.get_out_data_from_opts.\n the layer_class will usually then define the layer.output and its placeholder.\n there is one notable exception: the InternalLayer, where you predefine the output.\n :rtype: LayerBase\n '
from pprint import pprint
from returnn.util.basic import help_on_type_error_wrong_args
from returnn.tf.util.basic import py_print
layer_desc = self._create_layer_layer_desc(name=name, layer_desc=layer_desc, template=False)
debug_print_layer_output_template = self.get_config().bool('debug_print_layer_output_template', False)
debug_print_layer_output_shape = self.get_config().bool('debug_print_layer_output_shape', False)
debug_print_layer_output = util.CollectionReadCheckCovered.from_bool_or_dict((layer_desc['debug_print_layer_output'] if ('debug_print_layer_output' in layer_desc) else self.get_config().bool_or_other('debug_print_layer_output', False)))
debug_print_layer_output.collection.setdefault('summarize', 10)
debug_add_check_numerics_on_output = self.get_config().bool('debug_add_check_numerics_on_output', False)
debug_runtime_sanity_checks = self.get_config().bool('debug_runtime_sanity_checks', False)
with self.layer_creation_scope(layer_class=layer_class, **layer_desc):
try:
if ('output' not in layer_desc):
layer_desc['output'] = layer_class.get_out_data_from_opts(**layer_desc)
output_template = layer_desc['output']
assert isinstance(output_template, Data), ("%s %r layer_desc %r ['output'] is not a Data instance" % (layer_class.__name__, name, layer_desc))
output_template = layer_class.fixup_out_data(**layer_desc)
layer_desc['output'] = output_template
out_print_parts = [('[%s]' % ','.join(output_template.get_batch_axes_short_description())), output_template.dtype]
if output_template.sparse_dim:
out_print_parts.append(('sparse_dim=%s' % output_template.sparse_dim))
print(('layer %s/%r: %s' % (self.name, name, ' '.join(out_print_parts))), file=(log.v1 if debug_print_layer_output_template else log.v3))
output_template.sanity_check(ignore_placeholder=True)
output_template_special_axes = output_template.get_special_axes_dict()
if ((not output_template.available_for_inference) and (not self.eval_flag)):
from returnn.tf.layers.base import DataNotAvailableLayer
layer = DataNotAvailableLayer(name=layer_desc['name'], network=layer_desc['network'], output=output_template, layer_class=layer_class, layer_desc=layer_desc, register_as_extern_data=layer_desc.get('register_as_extern_data'))
else:
layer = layer_class(**layer_desc)
layer.post_init(layer_desc)
layer.output.sanity_check()
output_special_axes = layer.output.get_special_axes_dict()
assert (output_template_special_axes == output_special_axes), ('%s %r: not equal: %r == %r, from data %r -> %r' % (layer_class.__name__, name, output_template_special_axes, output_special_axes, output_template, layer.output))
except TypeError:
help_on_type_error_wrong_args(cls=layer_class, kwargs=list(layer_desc.keys()))
print(('TypeError creating layer %s/%r of class %s with opts:' % (self.name, name, layer_class.__name__)))
pprint(layer_desc)
raise
except Exception:
print(('Exception creating layer %s/%r of class %s with opts:' % (self.name, name, layer_class.__name__)))
pprint(layer_desc)
raise
if ((layer.output.placeholder is not None) and debug_print_layer_output_shape):
layer.output.placeholder = py_print(layer.output.placeholder, [layer.get_absolute_name(), 'shape:', str(layer.output), tf.shape(layer.output.placeholder)], summarize=10, name='debug_print_layer_output_shape')
if ((layer.output.placeholder is not None) and debug_print_layer_output.truth_value):
layer.output.placeholder = py_print(layer.output.placeholder, [layer.get_absolute_name(), layer.output.placeholder], name='debug_print_layer_output', **debug_print_layer_output.collection)
if ((layer.output.placeholder is not None) and debug_runtime_sanity_checks):
layer.output.placeholder = layer.output.get_placeholder_with_runtime_sanity_checks()
if ((layer.output.placeholder is not None) and debug_add_check_numerics_on_output and layer.output.dtype.startswith('float') and (not layer.allow_inf_in_output)):
print(('debug_add_check_numerics_on_output: add for layer %r: %r' % (name, layer.output.placeholder)))
from returnn.tf.util.basic import identity_with_check_numerics
layer.output.placeholder = identity_with_check_numerics(layer.output.placeholder, name=('%s_identity_with_check_numerics_output' % layer.tf_scope_name))
assert layer.output
if (layer.output.placeholder is not None):
layer.output.placeholder.set_shape(layer.output.batch_shape)
return layer
@contextlib.contextmanager
def layer_creation_scope(self, layer_class=LayerBase, **kwargs):
'\n :param (()->LayerBase)|LayerBase|type[LayerBase] layer_class:\n :yield: ctx\n '
expected_name_scope = self.get_absolute_name_scope_prefix()[:(- 1)]
if (tf_util.get_current_name_scope() != expected_name_scope):
with reuse_name_scope(expected_name_scope, absolute=True):
assert (tf_util.get_current_name_scope() == expected_name_scope)
with self.layer_creation_scope(layer_class=layer_class, **kwargs):
(yield)
return
with self.register_network_scope():
with layer_class.cls_setup_scope(**kwargs):
(yield)
def add_layer(self, name, layer_class, **layer_desc):
'\n This will construct the layer given the layer_desc arguments,\n and add it to the network.\n\n :param str name:\n :param (()->LayerBase)|LayerBase layer_class:\n :param layer_desc: contains the kwargs for the layer class.\n the args should have been transformed via layer_class.transform_config_dict before (see construct_layer).\n must not contain "name" and "network", which will be automatically added here.\n should not contain "output", which will be initialized to layer_class.get_out_data_from_opts.\n the layer_class will usually then define the layer.output and its placeholder.\n there is one notable exception: the InternalLayer, where you predefine the output.\n '
if (self._extra_layer_name_prefix_pattern.match(name) and self.extra_parent_net):
if name.startswith(self.extra_name_prefix):
(prefix, name) = name.split(':', 1)
if self._extra_layer_name_prefix_pattern.match(name):
(prefix, name_) = name.split(':', 1)
(extra_net, _) = (self.extra_parent_net or self)._get_extra_net(prefix_name=prefix)
layer = extra_net.add_layer(name=name_, layer_class=layer_class, **layer_desc)
else:
(root_name, sub_name) = (name.split('/', 1) if ('/' in name) else (name, None))
if (sub_name and (root_name in self.subnets)):
subnet = self.subnets[root_name]
with subnet.net.layer_creation_scope(**subnet.layer.kwargs):
layer = subnet.net.add_layer(name=sub_name, layer_class=layer_class, **layer_desc)
else:
layer = self._create_layer(name=name, layer_class=layer_class, **layer_desc)
assert (name not in self.layers)
self.layers[name] = layer
if layer.recurrent:
self.recurrent = True
return layer
def get_extern_data(self, key, mark_data_key_as_used=True):
'\n Returns Data and add the key to self.used_data_keys if mark_data_key_as_used.\n :param str key: e.g. "data" or "classes"\n :param bool mark_data_key_as_used:\n :rtype: Data\n '
if ((key in {'seq_idx', 'seq_tag'}) and self.parent_net):
return self.parent_net.get_extern_data(key, mark_data_key_as_used=mark_data_key_as_used)
if mark_data_key_as_used:
self.used_data_keys.add(key)
if ((key == 'seq_idx') and (key not in self.extern_data.data)):
self.extern_data.data[key] = Data(name='seq_idx', shape=(), dtype='int32', sparse=False, auto_create_placeholders=True)
if ((key == 'seq_tag') and (key not in self.extern_data.data)):
self.extern_data.data[key] = Data(name='seq_tag', shape=(), dtype='string', auto_create_placeholders=True)
return self.extern_data.get_data(key)
def get_used_data_keys(self, exclude_extra_added=True):
'\n :param bool exclude_extra_added:\n :rtype: set[str]\n '
used_data_keys = self.used_data_keys
if exclude_extra_added:
used_data_keys = used_data_keys.difference(self.extern_data.extra_added_keys)
return used_data_keys
def get_seq_tags(self, mark_data_key_as_used=True, beam=None):
'\n :param bool mark_data_key_as_used: for extern_data\n :param returnn.tf.util.data.SearchBeam|None beam:\n :return: tensor of shape (batch,) of dtype string, via extern_data\n :rtype: tf.Tensor\n '
data = self.get_extern_data(key='seq_tag', mark_data_key_as_used=mark_data_key_as_used)
if beam:
data = data.copy_extend_with_beam(beam)
return data.placeholder
def make_subnet(self, name, opts):
'\n :param str name:\n :param dict[str] opts:\n :rtype: Subnetwork\n '
if (name in self.subnets):
subnet = self.subnets[name]
else:
subnet = Subnetwork(parent_net=self, name=name, opts=opts)
if (not subnet.template):
self.subnets[name] = subnet
return subnet
def get_losses_initialized(self, reduce_func=None, with_total=False):
'\n :param ((tf.Tensor)->tf.Tensor)|None reduce_func: as in get_losses. e.g. TFUtil.identity\n :param bool with_total: whether to return total loss / constraints\n :return: loss name (e.g. "output" or "rec_layer/output" or so) -> LossHolder (initialized, i.e. layer set),\n and optionally total loss and total constraints (if with_total)\n :rtype: (dict[str,LossHolder], tf.Tensor|int|None, tf.Tensor|int|None)\n '
if with_total:
total_loss = 0
total_constraints = 0
else:
total_loss = None
total_constraints = None
losses_multi_dict = {}
for (name, layer) in sorted(self.layers.items()):
assert isinstance(layer, LayerBase)
extra_name_prefix = None
if self._extra_layer_name_prefix_pattern.match(name):
(extra_name_prefix, name) = name.split(':', 1)
with reuse_name_scope('loss'):
with reuse_name_scope(layer.tf_scope_name):
losses = layer.get_losses_initialized(reduce_func=reduce_func)
for loss_obj in losses:
losses_multi_dict.setdefault(loss_obj.name, []).append((extra_name_prefix, loss_obj))
if with_total:
for loss_obj in losses:
if (loss_obj.get_loss_value_for_objective() is not None):
total_loss = tf_util.optional_add(total_loss, loss_obj.get_loss_value_for_objective())
if with_total:
with reuse_name_scope('constraints'):
with reuse_name_scope(layer.tf_scope_name):
constraints = layer.get_constraints_value()
if (constraints is not None):
total_constraints = tf_util.optional_add(total_constraints, constraints)
losses_dict = {}
for (loss_name, loss_holders) in losses_multi_dict.items():
assert (len(loss_holders) >= 1)
if (len(loss_holders) == 1):
assert (loss_name not in losses_dict)
losses_dict[loss_name] = loss_holders[0][1]
else:
for (extra_name_prefix, loss_holder) in loss_holders:
if (not extra_name_prefix):
name = loss_holder.name
else:
name = ('%s:%s' % (extra_name_prefix, loss_holder.name))
assert (name not in losses_dict)
losses_dict[name] = loss_holder
return (losses_dict, total_loss, total_constraints)
def _construct_objective(self):
self._flatten_layer_with_losses()
with tf.name_scope('objective'):
(losses_dict, total_loss, total_constraints) = self.get_losses_initialized(with_total=True)
self.losses_dict.clear()
self.losses_dict.update(losses_dict)
self.total_loss = total_loss
self.total_constraints = total_constraints
self.total_objective = tf_util.optional_add(total_loss, total_constraints)
if (not tf_util.has_current_control_flow_context()):
tf_compat.v1.summary.scalar('loss', self.total_loss)
tf_compat.v1.summary.scalar('constraints', self.total_constraints)
tf_compat.v1.summary.scalar('objective', self.total_objective)
def maybe_construct_objective(self):
'\n Construct self.total_object.\n '
if (self.total_objective is None):
self._construct_objective()
def get_objective(self):
'\n :rtype: int|tf.Tensor\n :return: 0 if no loss, or tf.Tensor, scalar. loss + constraints. will be used for the updater.\n '
self.maybe_construct_objective()
return self.total_objective
def get_total_loss(self):
'\n :rtype: int|tf.Tensor\n :return: 0 if no loss, or tf.Tensor, scalar. without constraints. will be used for the updater\n '
self.maybe_construct_objective()
return self.total_loss
def get_total_constraints(self):
'\n :rtype: int|tf.Tensor\n :return: 0 if no constraints, or tf.Tensor, scalar. will be used for the updater\n '
self.maybe_construct_objective()
return self.total_constraints
def _get_all_merged_summaries(self):
'\n :return: merged summaries, serialized string\n :rtype: tf.Tensor\n '
if (self._merge_all_summaries is None):
self._merge_all_summaries = tf_compat.v1.summary.merge_all()
return self._merge_all_summaries
def _flatten_layer_with_losses(self):
from .util.data import BatchInfo
from .layers.basic import SourceLayer, InternalLayer, SubnetworkLayer, CopyLayer, FlattenBatchLayer
from tensorflow.python.util import nest
from pprint import pformat
def _relevant_dims_for_layer(layer_):
'\n :param LayerBase layer_:\n :return: dims to flatten. this assumes _check_push_flattening_to_inputs_for_layer_simple\n :rtype: set[Dim]\n '
relevant_end_points = deps_used_by_end_points[layer_]
dims = set()
for end_point_ in relevant_end_points:
dims.update((end_point_.output.dim_tags[a] for a in [end_point_.output.batch_dim_axis, end_point_.output.time_dim_axis]))
return dims
def _needs_flattening(layer_):
'\n :param LayerBase layer_:\n :rtype: bool\n '
return set(layer_.output.dim_tags).issuperset(_relevant_dims_for_layer(layer_))
def _map_layer_dict_value(v):
if isinstance(v, LayerBase):
v = _resolve_layer(v)
if _needs_flattening(v):
return mapped_layers[v]
return v
def _make_layer(layer_cls, layer_dict, map_opts=True):
'\n Creates the flattened layer\n\n :param type[LayerBase]|LayerBase layer_cls:\n :param dict[str] layer_dict:\n :param bool map_opts:\n :rtype: LayerBase\n '
opts = layer_dict.copy()
if map_opts:
opts = nest.map_structure(_map_layer_dict_value, opts)
opts.pop('output', None)
opts['output'] = layer_cls.get_out_data_from_opts(**opts)
opts.pop('out_shape', None)
opts['output'] = layer_cls.fixup_out_data(**opts)
print(('Loss flattened layer %s/%r output: %r' % (opts['network'].name, opts['name'], opts['output'])), file=log.v3)
with self.layer_creation_scope(layer_class=layer_cls, **opts):
layer__ = layer_cls(**opts)
assert isinstance(layer__, LayerBase)
layer__.post_init(opts)
layer__.output.sanity_check()
return layer__
def _layer_deps(layer_):
'\n :param LayerBase layer_:\n :rtype: list[LayerBase]\n '
return [_resolve_layer(dep_) for dep_ in nest.flatten(layer_.kwargs) if isinstance(dep_, LayerBase)]
def _should_flatten_layer_output(layer_):
'\n Decides whether layer output has right properties for flattening\n\n :param LayerBase layer_:\n :rtype: bool\n '
if ((not layer_.output.have_batch_axis()) or (not layer_.output.have_time_axis())):
return False
if (not layer_.output.is_time_axis_dynamic()):
return False
dims_ = [layer_.output.dim_tags[a] for a in [layer_.output.batch_dim_axis, layer_.output.time_dim_axis]]
if any(((d.dimension is None) for d in set(layer_.output.dim_tags).difference(dims_))):
return False
if layer_.output.beam:
return False
return True
def _check_push_flattening_to_inputs_for_layer_simple(layer_):
'\n Checks preconditions for input flattening\n\n :param LayerBase layer_:\n :rtype: bool\n '
if (layer_ in blacklist):
return False
if layer_.recurrent:
return False
if layer_.have_params:
return False
if isinstance(layer_, (SourceLayer, InternalLayer)):
return False
if (layer_.layer_class in {'random', 'rand_int', 'constant'}):
return False
if (not _should_flatten_layer_output(layer_)):
return False
return True
def _check_push_flattening_to_inputs_for_layer(layer_):
'\n Checks whether the inputs to the layer should be flattened aswell\n\n :param LayerBase layer_:\n :return: False when we should stop here\n :rtype: bool\n '
if (not _check_push_flattening_to_inputs_for_layer_simple(layer_)):
return False
dims = _relevant_dims_for_layer(layer_)
if (len(dims) > 2):
return False
assert set(layer_.output.dim_tags).issuperset(dims)
rem_dims = set(layer_.output.dim_tags).difference(dims)
if any(((d.dimension is None) for d in rem_dims)):
return False
deps = _layer_deps(layer_)
if (not deps):
return False
layer_kwargs = layer_.kwargs.copy()
layer_kwargs.pop('out_shape', None)
layer_kwargs.pop('output', None)
layer_kwargs_flat_values = nest.flatten(layer_kwargs)
if any(((dim in layer_kwargs_flat_values) for dim in dims)):
return False
valid_deps = all(((set(dep_.output.dim_tags).issuperset(dims) or set(dep_.output.dim_tags).isdisjoint(dims)) for dep_ in deps))
if (not valid_deps):
return False
have_any_deps_which_needs_flattening = False
for dep_ in deps:
if dep_.output.beam:
return False
if _needs_flattening(dep_):
if any(((d.dimension is None) for d in set(dep_.output.dim_tags).difference(dims))):
return False
layer_queue.append(dep_)
have_any_deps_which_needs_flattening = True
return have_any_deps_which_needs_flattening
def _resolve_layer(layer_):
'\n Flattens the layer structure, removes irrelevant layers and returns next successor layer\n\n :param LayerBase layer_:\n :return: next layer in succession\n :rtype: LayerBase\n '
while True:
if isinstance(layer_, SubnetworkLayer):
layer_ = layer_.subnetwork.layers['output']
continue
if ((type(layer_) is CopyLayer) and (len(layer_.sources) == 1)):
layer_ = layer_.sources[0]
continue
return layer_
end_points = []
blacklist = set()
for layer in self.layers.values():
if (not layer.loss):
continue
if layer.loss.recurrent:
continue
layer = _resolve_layer(layer)
if (not _check_push_flattening_to_inputs_for_layer_simple(layer)):
continue
cache = tf_util.get_flatten_with_seq_len_mask_cache_for_data(layer.output)
if cache.has_cache():
continue
end_points.append(layer)
if (not end_points):
return
layer_queue = []
for layer in self.layers.values():
if (not layer.loss):
continue
layer = _resolve_layer(layer)
if (layer in end_points):
continue
layer_queue.append(layer)
while layer_queue:
layer = layer_queue.pop(0)
if (layer in blacklist):
continue
blacklist.add(layer)
layer_queue.extend(_layer_deps(layer))
deps_used_by_end_points = {layer: {layer} for layer in end_points}
deps_used_by = {layer: set() for layer in end_points}
for end_point in end_points:
layer_queue = [end_point]
visited = set()
while layer_queue:
layer = layer_queue.pop(0)
if (layer in visited):
continue
visited.add(layer)
for dep in _layer_deps(layer):
deps_used_by_end_points.setdefault(dep, set()).add(end_point)
deps_used_by.setdefault(dep, set()).add(layer)
if (dep in visited):
continue
if (not _check_push_flattening_to_inputs_for_layer_simple(dep)):
continue
layer_queue.append(dep)
starting_points = []
mapped_layers = {}
visited = set()
end_points = [_resolve_layer(layer) for layer in end_points]
layer_queue = list(end_points)
while layer_queue:
layer = layer_queue.pop(0)
if (layer in visited):
continue
visited.add(layer)
if (not _check_push_flattening_to_inputs_for_layer(layer)):
if ((len(visited) == 1) and (not layer_queue)):
return
starting_points.append(layer)
mapped_layers[layer] = _make_layer(FlattenBatchLayer, dict(network=layer.network, sources=[layer], name=('%s_flat' % layer.name)), map_opts=False)
assert starting_points, ('no starting points found, starting from end points %r' % (end_points,))
visited = set()
layer_queue = list(starting_points)
while layer_queue:
layer = layer_queue.pop(0)
if (layer in visited):
continue
visited.add(layer)
if (not _needs_flattening(layer)):
continue
if (layer not in starting_points):
if any(((dep not in mapped_layers) for dep in _layer_deps(layer) if _needs_flattening(dep))):
assert layer_queue
visited.remove(layer)
layer_queue.append(layer)
continue
if (layer not in mapped_layers):
mapped_layers[layer] = _make_layer(type(layer), layer.kwargs)
for next_layer in deps_used_by[layer]:
layer_queue.append(next_layer)
for layer in end_points:
assert (layer in mapped_layers), ('end point %r not mapped.\n end points:\n%s\n mapped:\n%s\n blacklist:\n%s\n starting points:\n%s' % (layer, pformat(end_points), pformat(mapped_layers), pformat(blacklist), pformat(starting_points)))
for (layer, new_layer) in mapped_layers.items():
if (not _should_flatten_layer_output(layer)):
continue
new_out = new_layer.output
if (not new_out.have_batch_axis()):
continue
new_batch = new_out.batch
if (not new_batch):
continue
if (len(new_batch.virtual_dims) != 2):
continue
(new_virt_batch_dim1, new_virt_batch_dim2) = new_batch.virtual_dims
if (not isinstance(new_virt_batch_dim1, BatchInfo.GlobalBatchDim)):
continue
if (not isinstance(new_virt_batch_dim2, BatchInfo.PackedDim)):
continue
if (new_virt_batch_dim2.dim_tag != layer.output.get_time_dim_tag()):
continue
new_out_template = layer.output.copy_template_excluding_time_dim()
new_out_template = new_out_template.copy_template_excluding_axis(new_out_template.batch_dim_axis)
new_out_template = new_out_template.copy_add_dim_by_tag(new_out.get_batch_dim_tag(), unbroadcast=True, axis=0)
new_out = new_out.copy_compatible_to(new_out_template, add_dims=False)
cache = tf_util.get_flatten_with_seq_len_mask_cache_for_data(layer.output)
cache.set_cache(new_out.placeholder)
def get_fetches_dict(self, config=None, should_train=None, should_eval=None, with_summary=False, with_size=False, horovod_collected_reduce_inputs=None):
'\n :param returnn.config.Config|None config:\n :param bool|None should_train:\n :param bool|None should_eval:\n :param bool with_summary:\n :param bool with_size:\n :param dict[str,(tf.Tensor,tf.Tensor)]|None horovod_collected_reduce_inputs: will write into. see below\n :return: values and actions which should be calculated and executed in self.run()\n by the TF session for each step\n :rtype: dict[str,tf.Tensor|tf.Operation]\n '
import os
if (config is None):
config = self.get_config()
if (should_train is None):
should_train = (self.train_flag is not False)
if (should_eval is None):
should_eval = self.eval_flag
use_horovod_reduction = False
if config.is_true('use_horovod'):
import returnn.tf.horovod
if returnn.tf.horovod.get_ctx().should_sync_every_step():
use_horovod_reduction = True
def reduce_sum(x, name, average=False):
'\n :param tf.Tensor x:\n :param str name:\n :param bool average:\n :return: sum(x) if horovod else x\n :rtype: tf.Tensor\n '
if (not use_horovod_reduction):
return x
from returnn.tf.util.basic import global_tensor
import horovod.tensorflow as hvd
out = global_tensor((lambda : hvd.allreduce(x, average=average)), name=('horovod_fetch_reduce_sum__' + name.replace(':', '__').replace('/', '_')))
if ((horovod_collected_reduce_inputs is not None) and (x.name not in horovod_collected_reduce_inputs)):
horovod_collected_reduce_inputs[x.name] = (x, out)
return out
def inv_reduce_sum(x, name):
'\n :param tf.Tensor x:\n :param str name:\n :return: reciprocal(sum(reciprocal(x))) if horovod else x\n :rtype: tf.Tensor\n '
if (not use_horovod_reduction):
return x
return tf_compat.v1.reciprocal(reduce_sum(tf_compat.v1.reciprocal(x), name=name))
d = {}
if with_size:
for key in self.used_data_keys:
data = self.extern_data.get_data(key)
for (dim, v) in data.size_placeholder.items():
d[('size:%s:%i' % (key, dim))] = v
if (should_train or should_eval):
loss = self.get_objective()
if (loss is 0):
loss = tf_util.global_tensor((lambda : tf.constant(0.0)), name='zero_loss')
else:
assert self.losses_dict
d['loss'] = reduce_sum(loss, name='loss', average=True)
for (loss_name, loss) in self.losses_dict.items():
if (loss.get_only_on_eval() and should_train):
continue
if (loss.get_loss_value_for_fetch() is not None):
d[('cost:%s' % loss_name)] = reduce_sum(loss.get_loss_value_for_fetch(), name=('cost:%s' % loss_name))
if (loss.get_error_value() is not None):
d[('error:%s' % loss_name)] = reduce_sum(loss.get_error_value(), name=('error:%s' % loss_name))
d[('loss_norm_factor:%s' % loss_name)] = inv_reduce_sum(loss.get_norm_factor(), name=('loss_norm_factor:%s' % loss_name))
if with_size:
for layer in self.layers.values():
if (layer.only_on_eval and should_train):
continue
if (layer.target and layer.target.startswith('layer:')):
target_data = layer.loss.target
for (dim, v) in target_data.size_placeholder.items():
d[('size:%s:%i' % (layer.target, dim))] = v
for layer in self.layers.values():
for (k, v) in layer.stats.items():
d[('stats:%s:%s' % (layer.name, k))] = v
if config.bool('tf_log_memory_usage', False):
for dev in tf_util.get_tf_list_local_devices():
if (dev.device_type != 'GPU'):
continue
if (not tf_util.is_gpu_available_in_session()):
continue
d[('mem_usage:%s' % os.path.basename(dev.name.replace('/device:', '/')))] = tf_util.mem_usage_for_dev(dev.name)
if self.get_post_control_dependencies():
d['post_control_dependencies'] = self.get_post_control_dependencies()
if (with_summary and (self._get_all_merged_summaries() is not None)):
d['summary'] = self._get_all_merged_summaries()
return d
def get_used_targets(self):
'\n :return: sorted list of targets\n :rtype: list[str]\n '
targets = set()
for layer in self.layers.values():
if layer.target:
targets.add(layer.target)
return list(sorted(targets))
def get_default_target(self):
'\n :return: e.g. "classes"\n :rtype: str\n '
targets = self.get_used_targets()
default_target = self.extern_data.default_target
if (not targets):
return default_target
if (len(targets) == 1):
return targets[0]
if (default_target in targets):
return default_target
raise Exception(("multiple targets %r and default_target %r not in list. set 'target' in config" % (targets, default_target)))
def get_output_layers(self):
'\n :rtype: list[LayerBase]\n '
return [layer for (_, layer) in sorted(self.layers.items()) if layer.is_output_layer()]
def get_default_output_layer_name(self):
'\n :rtype: str|None\n :returns: default output layer name if there is one, or None\n '
if ('output' in self.layers):
return 'output'
output_layers = self.get_output_layers()
if (len(output_layers) == 1):
return output_layers[0].name
return None
def get_default_output_layer(self, must_exist=True):
'\n :param bool must_exist: if it does not exist, will raise an exception\n :rtype: LayerBase|None\n :return: the default output layer\n '
name = self.get_default_output_layer_name()
if (not name):
from pprint import pformat
assert (not must_exist), ('%s: default output layer does not exist. Layers:\n%s' % (self, pformat(self.layers)))
return None
return self.layers[name]
def get_layer(self, layer_name):
'\n Normally just self.layers[layer_name] but with some extra logic added,\n such as resolving "base:" prefix to the parent network.\n Raises :class:`LayerNotFound` if the layer is not found.\n\n :param str layer_name:\n :rtype: LayerBase\n '
if layer_name.startswith('base:'):
if (not self.parent_net):
raise LayerNotFound(('layer %r not found, there is no parent net of %r' % (layer_name, self)), layer_name=layer_name, network=self)
return self.parent_net.get_layer(layer_name[len('base:'):])
if (layer_name in self.layers):
return self.layers[layer_name]
orig_layer_name = layer_name
if ('/' in layer_name):
(root_layer_name, sub_layer_name) = layer_name.split('/', 1)
if (root_layer_name in self.subnets):
subnet = self.subnets[root_layer_name]
return subnet.net.get_layer(sub_layer_name)
root_layer = self.get_layer(root_layer_name)
sub_layer = root_layer.get_sub_layer(sub_layer_name)
if (not sub_layer):
raise LayerNotFound(('sub-layer %r not found in layer %r in net %r' % (sub_layer_name, root_layer, self)), layer_name=orig_layer_name, network=self)
return sub_layer
if self._extra_layer_name_prefix_pattern.match(layer_name):
if self.extra_parent_net:
return self.extra_parent_net.get_layer(layer_name)
(prefix, layer_name) = layer_name.split(':', 1)
(extra_net, _) = self._get_extra_net(prefix_name=prefix, auto_create=False)
if (not extra_net):
raise LayerNotFound(('cannot get layer %r, no extra net for %r' % (layer_name, self)), layer_name=orig_layer_name, network=self)
if (layer_name not in extra_net.layers):
raise LayerNotFound(('layer %r not found in extra net %r' % (layer_name, extra_net)), layer_name=orig_layer_name, network=self)
return extra_net.layers[layer_name]
if layer_name.startswith('base:'):
if (not self.parent_net):
raise LayerNotFound(('cannot get layer %r, no parent net for %r' % (layer_name, self)), layer_name=orig_layer_name, network=self)
return self.parent_net.get_layer(layer_name[len('base:'):])
if ((layer_name == 'data') or layer_name.startswith('data:')):
return self.construct_layer(name=layer_name, net_dict={}, check_existing=False)
if self.extra_parent_net:
return self.extra_parent_net.get_layer(layer_name)
if (layer_name not in self.layers):
raise LayerNotFound(('layer %r not found in %r' % (layer_name, self)), layer_name=orig_layer_name, network=self)
return self.layers[layer_name]
def get_all_layers_shallow(self):
'\n :return: layers, including extra net, not including sub layers\n :rtype: list[LayerBase]\n '
layer_set = set()
layers = []
for (_, layer) in sorted(self.layers.items()):
if (layer not in layer_set):
layers.append(layer)
layer_set.add(layer)
if self.extra_nets:
for (_, extra_net) in sorted(self.extra_nets.items()):
assert isinstance(extra_net, TFNetwork)
for (_, layer) in sorted(extra_net.layers.items()):
if (layer not in layer_set):
layers.append(layer)
layer_set.add(layer)
return layers
def get_all_layers_deep(self):
'\n :return: all layers, including extra net, including sub layers. duplicates are made unique.\n It might exclude internal layers.\n We ensure that layers are unique by their absolute name.\n :rtype: list[LayerBase]\n '
all_params = set()
layers_by_abs_name = {}
layer_set = set()
layers = []
skipped_layers = []
net_queue = [self]
layer_queue = []
while (net_queue or layer_queue):
if layer_queue:
layer = layer_queue.pop(0)
if (layer in layer_set):
continue
layer_set.add(layer)
layer_abs_name = layer.get_absolute_name()
if (layer_abs_name not in layers_by_abs_name):
layers.append(layer)
layers_by_abs_name[layer_abs_name] = layer
all_params.update(layer.params.values())
else:
skipped_layers.append(layer)
sub_nets = layer.get_sub_networks()
if sub_nets:
net_queue += sub_nets
else:
sub_layers = layer.get_sub_layers()
layer_queue += sub_layers
continue
if net_queue:
net = net_queue.pop(0)
if net.extra_nets:
net_queue[:0] = [extra_net for (_, extra_net) in sorted(self.extra_nets.items())]
for (_, layer) in sorted(net.layers.items()):
if (layer not in layer_set):
layer_queue.append(layer)
continue
for layer in skipped_layers:
for param in layer.params.values():
assert (param in all_params)
return layers
def get_params_list(self):
'\n :return: list of model variables, i.e. from all the layers, excluding auxiliary vars like global_step\n :rtype: list[tf.Variable]\n '
ls = []
for layer in self.get_all_layers_deep():
assert isinstance(layer, LayerBase)
for (param_name, param) in sorted(layer.params.items()):
assert isinstance(param, tf.Variable)
if (param in ls):
continue
ls.append(param)
return ls
def get_saveable_param_replace_dict(self):
'\n :return: params and saveable_param_replace resolved, union of all layers\n :rtype: dict[tf.Variable,tensorflow.python.training.saver.BaseSaverBuilder.SaveableObject]\n '
d = {}
for layer in self.get_all_layers_deep():
assert isinstance(layer, LayerBase)
d.update(layer.saveable_param_replace)
return d
def get_saveable_params_list(self):
'\n :return: list of model variables or SaveableObject, to save/restore\n :rtype: list[tf.Variable|tensorflow.python.training.saver.BaseSaverBuilder.SaveableObject]\n '
state_vars = tf_compat.v1.get_collection(tf_util.CollectionKeys.STATE_VARS)
ls = []
for layer in self.get_all_layers_deep():
assert isinstance(layer, LayerBase)
for (param_name, param) in sorted(layer.get_saveable_params_dict().items()):
if (param in ls):
continue
if (param in state_vars):
continue
ls.append(param)
ls += self.get_auxiliary_params()
ls += self.extra_vars_to_save
return ls
def get_trainable_params(self):
'\n :return: list of variables\n :rtype: list[tf.Variable]\n '
if (self._selected_train_layers is None):
self.declare_train_params()
trainable_vars_col = tf_compat.v1.get_collection(tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES)
assert isinstance(trainable_vars_col, list)
ls = []
for layer_name in sorted(self._selected_train_layers):
layer = self.layers[layer_name]
assert isinstance(layer, LayerBase)
for (param_name, param) in sorted(layer.params.items()):
assert isinstance(param, tf.Variable)
if (param in trainable_vars_col):
ls.append(param)
trainable_vars_col.remove(param)
if self.extra_nets:
for (_, extra_net) in sorted(self.extra_nets.items()):
assert isinstance(extra_net, TFNetwork)
for param in extra_net.get_trainable_params():
if (param not in ls):
ls.append(param)
return ls
def declare_train_params(self, hidden_layer_selection=None, with_output=None, global_trainable=None):
'\n :param list[str]|None hidden_layer_selection:\n :param bool|None with_output:\n :param bool|None global_trainable:\n '
if (global_trainable is None):
global_trainable = self.layers_desc.get('#trainable', True)
if global_trainable:
if (hidden_layer_selection is None):
hidden_layer_selection = [name for (name, layer) in self.layers.items() if (not layer.is_output_layer())]
else:
hidden_layer_selection = list(hidden_layer_selection)
if (with_output is None):
with_output = True
if with_output:
hidden_layer_selection += [name for (name, layer) in self.layers.items() if layer.is_output_layer()]
hidden_layer_selection = set(hidden_layer_selection)
else:
hidden_layer_selection = set()
self._selected_train_layers = sorted(hidden_layer_selection)
if self.extra_nets:
for (_, extra_net) in self.extra_nets.items():
extra_net.declare_train_params(global_trainable=global_trainable)
def get_num_params(self):
'\n :return: number of model parameters, i.e. total dimension\n :rtype: int\n '
num_params = 0
params = self.get_params_list()
for param in params:
shape = param.get_shape().as_list()
if all(shape):
num_params += int(numpy.prod(shape))
return num_params
def initialize_params(self, session):
'\n :param tf.compat.v1.Session session:\n\n Note: This will create a new node to the graph for each call!\n And it will overwrite also the already initialized variables.\n So you should call this only once after network construction and before you maybe load some of the params\n from external sources.\n If you know that you will load all params explicitly, you would not need to call this function.\n '
var_list = (self.get_params_list() + self.get_auxiliary_params())
with tf.name_scope('var_initializer'):
initializer_op = tf_compat.v1.variables_initializer(var_list=var_list)
session.run(initializer_op)
for var in var_list:
custom_post_init = getattr(var, 'custom_post_init', None)
if custom_post_init:
assert callable(custom_post_init)
custom_post_init(session=session)
def get_var_assigner(self, var):
'\n :param tf.Variable var:\n '
if (var in self._assigner_cache):
return self._assigner_cache[var]
with reuse_name_scope('var_assigner'):
assigner = tf_util.VariableAssigner(var)
self._assigner_cache[var] = assigner
return assigner
def get_param_values_dict(self, session):
'\n :param tf.compat.v1.Session session:\n :return: dict: layer_name -> param_name -> variable numpy array\n :rtype: dict[str,dict[str,numpy.ndarray]]\n Note that this excludes auxiliary params.\n '
layers = {}
for layer in self.get_all_layers_deep():
name = layer.get_absolute_name()
assert (name not in layers)
layers[name] = layer.get_param_values_dict(session)
return layers
def set_param_values_by_dict(self, values_dict, ignore_non_existing=False, **kwargs):
'\n :param dict[str,dict[str,numpy.ndarray]] values_dict:\n :param bool ignore_non_existing:\n :param kwargs: passed to :func:`LayerBase.set_param_values_by_dict`\n\n Note that this excludes auxiliary params.\n '
layers = {layer.get_absolute_name(): layer for layer in self.get_all_layers_deep()}
for (layer_name, layer_values_dict) in values_dict.items():
if layer_values_dict:
if (ignore_non_existing and (layer_name not in layers)):
print(('Will not set layer %r because it does not exist.' % (layer_name,)), file=log.v3)
continue
layers[layer_name].set_param_values_by_dict(values_dict=layer_values_dict, **kwargs)
def get_auxiliary_params(self):
'\n :rtype: list[tf.Variable]\n '
var_list = [self.global_train_step_var]
rnd_generator = tf_util.get_global_random_generator(create=False)
if (rnd_generator is not None):
assert isinstance(rnd_generator.state, tf.Variable)
var_list.append(rnd_generator.state)
return var_list
def get_params_serialized(self, session):
'\n :param tf.compat.v1.Session session:\n :rtype: TFNetworkParamsSerialized\n '
return TFNetworkParamsSerialized(values_dict=self.get_param_values_dict(session=session), global_train_step=self.get_global_train_step(session=session))
def set_params_by_serialized(self, serialized, session, **kwargs):
'\n :param TFNetworkParamsSerialized serialized:\n :param tf.compat.v1.Session session:\n :param kwargs: passed to :func:`set_param_values_by_dict`\n '
self.set_param_values_by_dict(serialized.values_dict, session=session, **kwargs)
self.set_global_train_step(serialized.global_train_step, session=session)
@property
def global_train_step_var(self):
'\n :rtype: tf.Variable\n '
net = self
while True:
if (net._global_train_step_var is not None):
return net._global_train_step_var
if net.parent_net:
net = net.parent_net
continue
if net.extra_parent_net:
net = net.extra_parent_net
continue
with tf_util.reuse_name_scope('', absolute=True, reuse=getattr(tf_compat.v1, 'AUTO_REUSE', None)):
with tf_util.default_control_flow_ctx(), tf.device('/cpu:0'):
net._global_train_step_var = tf_compat.v1.get_variable(name='global_step', shape=(), dtype=tf.int64, initializer=tf_compat.v1.zeros_initializer(tf.int64), collections=[tf_compat.v1.GraphKeys.GLOBAL_STEP], trainable=False)
return net._global_train_step_var
@property
def global_train_step(self):
'\n :rtype: tf.Tensor|tf.Variable\n '
from returnn.util import BehaviorVersion
net = self
while True:
if (net._global_train_step is not None):
return net._global_train_step
if net.parent_net:
net = net.parent_net
continue
if net.extra_parent_net:
net = net.extra_parent_net
continue
if (BehaviorVersion.get() <= 14):
return net.global_train_step_var
with tf_util.default_control_flow_ctx(), tf_util.reuse_name_scope('', absolute=True), tf.device('/cpu:0'):
net._global_train_step = net.global_train_step_var.read_value()
return net._global_train_step
def set_global_train_step(self, step, session):
'\n :param int step:\n :param tf.compat.v1.Session session:\n '
self.get_var_assigner(self.global_train_step_var).assign(step, session=session)
def get_global_train_step(self, session):
'\n :param tf.compat.v1.Session session:\n :rtype: int\n '
return self.global_train_step_var.eval(session=session)
def get_epoch_step(self):
'\n :return: int64\n :rtype: tf.Tensor\n '
if self.parent_net:
return self.parent_net.get_epoch_step()
if (self.epoch_step is not None):
return self.epoch_step
with reuse_name_scope('', absolute=True):
self.epoch_step = tf_compat.v1.placeholder(name='epoch_step', shape=(), dtype=tf.int64)
return self.epoch_step
def reset_saver(self):
"\n Resets the :class:`tf.train.Saver` object which will be used\n for :func:`load_params_from_file` and :func:`save_params_to_file`.\n Warning: Don't repeat that too often as it will always create new ops in the computation graph.\n "
self.saver = None
def _create_saver(self):
with tf.name_scope('saver'):
self.saver = tf_compat.v1.train.Saver(var_list=self.get_saveable_params_list(), max_to_keep=((2 ** 31) - 1))
def save_params_to_file(self, filename, session):
'\n Will save the model parameters to the filename.\n Note that the model parameters live inside the current TF session.\n\n :param str filename:\n :param tf.compat.v1.Session session:\n '
import os
filename = os.path.abspath(filename)
from returnn.util.basic import maybe_make_dirs
maybe_make_dirs(os.path.dirname(filename))
if (not self.saver):
self._create_saver()
try_again_wait_time = 10
while True:
try:
self.saver.save(sess=session, save_path=filename)
break
except IOError as e:
import errno
import time
if (e.errno in [errno.EBUSY, errno.EDQUOT, errno.EIO, errno.ENOSPC]):
print('Exception while saving:', e, file=log.v3)
print(('Trying again in %s secs.' % try_again_wait_time), file=log.v3)
time.sleep(try_again_wait_time)
continue
raise
def load_params_from_file(self, filename, session):
'\n Will load the model parameters from the filename.\n Note that the model parameters live inside the current TF session.\n\n :param str filename:\n :param tf.compat.v1.Session session:\n '
saveable_params = self.get_saveable_params_list()
must_use_custom_checkpoint_loader = False
if any([have_custom_post_init(param) for param in saveable_params]):
must_use_custom_checkpoint_loader = True
if any([layer.custom_param_importer for layer in self.get_all_layers_deep()]):
must_use_custom_checkpoint_loader = True
ignore_missing_vars = self.get_config().bool('load_ignore_missing_vars', False)
if ignore_missing_vars:
must_use_custom_checkpoint_loader = True
if must_use_custom_checkpoint_loader:
loader = CustomCheckpointLoader(filename=filename, saveable_params=saveable_params, network=self, ignore_missing=ignore_missing_vars)
loader.load_now(session=session)
return
if (not self.saver):
self._create_saver()
try:
self.saver.restore(sess=session, save_path=filename)
except tf.errors.NotFoundError as exc:
print('load_params_from_file: some variables not found:', file=log.v2)
for line in str(exc).splitlines():
if ('not found' in line):
print(f' {line}', file=log.v4)
try:
loader = CustomCheckpointLoader(filename=filename, saveable_params=saveable_params, network=self, ignore_missing=self.get_config().bool('load_ignore_missing_vars', False))
if loader.missing_non_critical_var_names:
print('Did not found non-critical-to-restore vars:', loader.missing_non_critical_var_names, file=log.v2)
elif (not loader.missing_var_names):
print('Strange, nothing missing? Pre-loaded missing variables from other checkpoints?', file=log.v2)
loader.load_now(session=session)
except tf.errors.NotFoundError:
print(('Error, some entry is missing in the checkpoint %r: %s: %s' % (filename, type(exc).__name__, exc)), file=log.v1)
print('CustomCheckpointLoader was not able to recover.', file=log.v2)
raise
def print_network_info(self, name='Network'):
'\n :param str name:\n :return: nothing, prints very brief net topology on log\n '
print(('%s layer topology:' % name), file=log.v2)
print(' extern data:', self.extern_data.get_data_description(), file=log.v2)
print((' used data keys: %s' % list(sorted(self.used_data_keys))), file=log.v2)
print(' layers:', file=log.v2)
for (layer_name, layer) in sorted(self.layers.items()):
layer_dim = ('unknown' if (layer.output.dim is None) else ('%i' % layer.output.dim))
print((' layer %s %r #: %s' % (layer.layer_class, layer_name, layer_dim)), file=log.v2)
if (not self.layers):
print(' (no layers)', file=log.v2)
if self.extra_nets:
for (_, extra_net) in sorted(self.extra_nets.items()):
assert isinstance(extra_net, TFNetwork)
print((' %r layers:' % extra_net.name), file=log.v2)
for (layer_name, layer) in sorted(extra_net.layers.items()):
layer_dim = ('unknown' if (layer.output.dim is None) else ('%i' % layer.output.dim))
print((' layer %s %r #: %s' % (layer.layer_class, layer_name, layer_dim)), file=log.v2)
if (not extra_net.layers):
print(' (no layers)', file=log.v2)
print('net params #:', self.get_num_params(), file=log.v2)
print('net trainable params:', self.get_trainable_params(), file=log.v2)
def cond_on_train(self, fn_train, fn_eval):
'\n Uses fn_train() or fn_eval() base on self.train_flag.\n It will be a branched evaluation.\n\n :param ()->(tf.Tensor|T) fn_train:\n :param ()->(tf.Tensor|T) fn_eval:\n :return: fn_train() if self.train_flag else fn_eval()\n :rtype: tf.Tensor|T\n '
return tf_util.cond(self.train_flag, fn_train, fn_eval)
def get_search_choices(self, sources=None, src=None, base_search_choice=None, _layer_to_search_choices=None, debug_stream=None):
'\n Recursively searches through all sources,\n and if there is a :class:`ChoiceLayer` / any layer with search_choices, returns it.\n Could also go to the parent network.\n If there are multiple, it assumes they are on the same search-sequence in the search-tree\n and it will return the last one.\n\n :param LayerBase|None src:\n :param LayerBase|None base_search_choice:\n :param list[LayerBase]|None sources:\n :param dict[LayerBase]|None _layer_to_search_choices:\n keep track of visited layers in case there are circular deps\n :param typing.TextIO|None debug_stream: if given, will print additional debug info into it\n :return: (direct or indirect) source LayerBase which has search_choices, or None\n :rtype: LayerBase|None\n '
if src:
assert (src.get_normalized_layer() == src)
from returnn.tf.layers.basic import SearchChoices
from functools import cmp_to_key
from pprint import pformat
if (_layer_to_search_choices is None):
_layer_to_search_choices = {}
normalized_to_layer = {}
layers = self._get_all_search_choices(sources=sources, src=src, base_search_choice=base_search_choice, _layer_to_search_choices=_layer_to_search_choices, _normalized_to_layer=normalized_to_layer)
def full_trace_for_layer(layer, _layer_trace=None):
'\n :param LayerBase layer: with search choices\n :param list[LayerBase]|None _layer_trace:\n :return: layers with search choices\n :rtype: list[LayerBase]\n '
assert (isinstance(layer, LayerBase) and isinstance(layer.search_choices, SearchChoices))
if (_layer_trace is None):
_layer_trace = []
if (layer not in _layer_trace):
_layer_trace.append(layer)
else:
return _layer_trace
if (layer not in _layer_to_search_choices):
self._get_all_search_choices(base_search_choice=layer, _layer_to_search_choices=_layer_to_search_choices, _normalized_to_layer=normalized_to_layer)
for dep in _layer_to_search_choices[layer]:
full_trace_for_layer(dep, _layer_trace=_layer_trace)
return _layer_trace
def get_debug_dep_map():
'\n :rtype: dict[str,list[str]]\n '
relevant_map = {}
for (key, values) in _layer_to_search_choices.items():
relevant_map[key.get_absolute_name()] = [value.get_absolute_name() for value in values]
return relevant_map
def compare_layer(l1, l2):
'\n Compares two layers with search_choices, to sort them.\n See also: :func:`SearchChoices.compare`.\n\n :param LayerBase l1:\n :param LayerBase l2:\n :return: 0 if equal, -1 if l1 <= l2, else 1 if l1 >= l2\n :rtype: int\n '
assert (isinstance(l1, LayerBase) and isinstance(l1.search_choices, SearchChoices))
assert (isinstance(l2, LayerBase) and isinstance(l2.search_choices, SearchChoices))
l1n = l1.get_normalized_layer()
l2n = l2.get_normalized_layer()
if ((l1 != l1n) and (l2 != l2n)):
(l1, l2) = (l1n, l2n)
if (l1 is l2):
return 0
l1trace_ = full_trace_for_layer(l1)
l2trace_ = full_trace_for_layer(l2)
if ((l1 in l2trace_) and (l2 not in l1trace_)):
return (- 1)
if ((l2 in l1trace_) and (l1 not in l2trace_)):
return 1
raise Exception(('get_search_choices src=%r base_search_choice=%r sources=%r.\nSearch choices cannot be compared.\nLayer 1\n %r\nchoice trace\n%s\nvs layer 2\n %r\nchoice trace\n%s.\nFull dependency map:\n%s\nRelevant layers:\n%s\nNetwork:\n%s' % (src, base_search_choice, sources, l1, pformat(l1trace_), l2, pformat(l2trace_), pformat(get_debug_dep_map()), pformat(layers), pformat(self.layers))))
if debug_stream:
print(('Relevant layers:\n%s' % pformat(layers)), file=debug_stream)
print(('Full dependency map:\n%s' % pformat(get_debug_dep_map())), file=debug_stream)
while (base_search_choice in layers):
layers.remove(base_search_choice)
if (not layers):
return None
layers = sorted(layers, key=cmp_to_key(compare_layer))
return layers[(- 1)]
def _get_all_search_choices(self, sources=None, src=None, base_search_choice=None, _layer_to_search_choices=None, _normalized_to_layer=None):
'\n Recursively searches through all sources,\n and if there is a :class:`ChoiceLayer` / any layer with search_choices, returns it.\n Could also go to the parent network.\n If there are multiple, it assumes they are on the same search-sequence in the search-tree\n and it will return the last one.\n\n :param LayerBase|None src:\n :param LayerBase|None base_search_choice:\n :param list[LayerBase]|None sources:\n :param dict[LayerBase,list[LayerBase]]|None _layer_to_search_choices:\n tracks visited layers in case there are circular deps\n :param dict[LayerBase,LayerBase]|None _normalized_to_layer:\n :return: (direct or indirect) sources LayerBase which has search_choices\n :rtype: list[LayerBase]\n '
if (_layer_to_search_choices is None):
_layer_to_search_choices = {}
if (_normalized_to_layer is None):
_normalized_to_layer = {}
if (src is not None):
assert isinstance(src, LayerBase)
normalized_src = src.get_normalized_layer()
if (normalized_src != src):
assert (_normalized_to_layer.setdefault(normalized_src, src) == src)
if src.search_choices:
assert normalized_src.search_choices, ('normalized %s vs %s (choices %s)' % (normalized_src, src, src.search_choices))
if src.search_choices:
if src.search_choices.is_decided:
return []
return [src]
assert (base_search_choice is None)
base_search_choice = src
if (base_search_choice is not None):
if (base_search_choice in _layer_to_search_choices):
return _layer_to_search_choices[base_search_choice]
else:
_layer_to_search_choices[base_search_choice] = []
normalized_base = base_search_choice.get_normalized_layer()
if (normalized_base != base_search_choice):
assert (_normalized_to_layer.setdefault(normalized_base, base_search_choice) == base_search_choice)
assert (sources is None)
sources = base_search_choice.get_dep_layers()
assert (sources is not None)
layers = []
for src_ in sources:
src_choice_layers = self._get_all_search_choices(src=src_, _layer_to_search_choices=_layer_to_search_choices, _normalized_to_layer=_normalized_to_layer)
for layer in src_choice_layers:
if (base_search_choice and (layer not in _layer_to_search_choices[base_search_choice])):
_layer_to_search_choices[base_search_choice].append(layer)
if (layer not in layers):
layers.append(layer)
if (not layers):
if (self.parent_layer and (not self.is_inside_rec_layer())):
return self.parent_layer.network._get_all_search_choices(sources=self.parent_layer.get_dep_layers())
return []
if (base_search_choice is not None):
normalized_base = base_search_choice.get_normalized_layer()
if (normalized_base != base_search_choice):
normalized_choices = self._get_all_search_choices(base_search_choice=normalized_base, _layer_to_search_choices=_layer_to_search_choices, _normalized_to_layer=_normalized_to_layer)
if (normalized_choices == layers):
return layers
if any([(layer.get_normalized_layer() == layer) for layer in normalized_choices]):
normalized_choices = [layer for layer in normalized_choices if (layer.get_normalized_layer() == layer)]
from pprint import pformat
assert all([(layer in _normalized_to_layer) for layer in normalized_choices]), '\n'.join(['No cur -> prev mapping for some layers.', '', ('Base: %s' % base_search_choice), '', ('Cur (normalized) base: %s' % normalized_base), '', 'Prev choices:', pformat(layers), '', 'Cur (normalized) choices:', pformat(normalized_choices), '', 'Mapping:', pformat(_normalized_to_layer), ''])
layers = [_normalized_to_layer[layer] for layer in normalized_choices]
_layer_to_search_choices[base_search_choice] = layers
return layers
def debug_search_choices(self, base_search_choice):
'\n :param LayerBase base_search_choice:\n :return: nothing, by intention, such that constructs like `assert ..., debug_search_choices(...) or (...)` work\n '
print('debug search choices:')
print(' base:', base_search_choice)
print(' network:')
for (_, layer) in sorted(self.layers.items()):
print(' layer:', layer)
class Visitor(dict):
'\n Wraps around `dict`, to catch any `__setitem__` calls.\n '
def __setitem__(self, key, value):
'\n :param LayerBase key:\n :param value:\n '
print((' visit: %r, search choices %r' % (key, key.search_choices)))
print((' sources: %s' % ', '.join(([('%r search choices %r' % (dep.get_absolute_name(), dep.search_choices)) for dep in key.get_dep_layers()] or ['None']))))
super(Visitor, self).__setitem__(key, value)
search_choices = self.get_search_choices(base_search_choice=base_search_choice, _layer_to_search_choices=Visitor(), debug_stream=sys.stdout)
print('-> search choices:', search_choices)
def get_data_batch_dim(self):
'\n Get the batch-dim size, i.e. amount of sequences in the current batch.\n Consider that the data tensor is usually of shape [batch, time, dim],\n this would return shape(data)[0].\n\n The code currently assumes that the batch-dim can be taken from the extern data.\n If it does not have that available for some reason (e.g. some subnetwork),\n it will try some alternative sources and assumes that they have the correct batch-dim.\n\n Note that the batch-dim usually stays always the same across the whole network\n and also every individual batch sequence will stay related.\n One notable exception of this is the choice layer, where the\n batch-dim will get expanded by the beam search if search is used,\n as well as in all following layers, until there is a decide layer.\n\n :return: int scalar tensor which states the batch-dim\n :rtype: int|tf.Tensor\n '
return self.get_global_batch_info().dim
def get_global_batch_info(self):
'\n :return: global batch info from root network from extern data\n :rtype: returnn.tf.util.data.BatchInfo\n '
root = self.get_root_network()
if root.extern_data.get_batch_info(allow_none=True):
return root.extern_data.get_batch_info()
for layer in LayerBase.get_global_layer_list():
if layer.output.batch:
return layer.output.batch.get_global_base()
raise Exception(('%s: Cannot get global batch info' % root))
def set_rec_step_info(self, i, prev_end_flag=None, prev_end_layer=None, seq_lens=None):
'\n Used by _SubnetworkRecCell.\n\n :param tf.Tensor i: scalar, int32, current step (time)\n :param tf.Tensor|None prev_end_flag: (batch,), bool, says that the current sequence has ended.\n This is about the last frame, not the current!\n :param LayerBase|None prev_end_layer:\n :param tf.Tensor|None seq_lens: (batch,) int32, seq lens\n '
from returnn.tf.layers.rec import RecStepInfoLayer
self.layers[':i'] = RecStepInfoLayer(name=':i', network=self, i=i, prev_end_flag=prev_end_flag, prev_end_layer=prev_end_layer, seq_lens=seq_lens)
def is_inside_rec_layer(self, inside_loop=True):
'\n :param bool inside_loop: only True if we are inside the loop of the most recent rec layer\n :return: whether we are inside a :class:`RecLayer` (with inside_loop: and not optimized out-of-the-loop).\n At template construction inside a rec layer, this is always true, but the rec layer itself does not exist yet.\n :rtype: bool\n\n Also see :func:`get_inside_rec_time_dim` and :func:`get_rec_parent_layer`.\n '
return (self.get_inside_rec_time_dim(inside_loop=inside_loop) is not None)
def get_inside_rec_time_dim(self, inside_loop=True):
'\n :param bool inside_loop: only True if we are inside the loop of the most recent rec layer\n :return: when the net is inside a rec loop (:class:`RecLayer` and not optimized out of the loop),\n this returns the dim tag the rec layer iterates over\n :rtype: Dim|None\n '
if self._inside_rec_time_dim:
return self._inside_rec_time_dim
if self._over_rec_time_dim:
if inside_loop:
return None
return self._over_rec_time_dim
if self.extra_parent_net:
return self.extra_parent_net.get_inside_rec_time_dim(inside_loop=inside_loop)
from returnn.tf.layers.rec import RecLayer
if isinstance(self.parent_layer, RecLayer):
assert (not self._is_rec_layer_inside_net())
if inside_loop:
return None
return self.parent_layer.time_dim_tag
if self.parent_net:
return self.parent_net.get_inside_rec_time_dim(inside_loop=inside_loop)
return None
def get_all_rec_time_dims(self):
'\n :return: all rec time dims, moved out or not, including all parents\n :rtype: set[Dim]\n '
coll = set()
net = self
while net:
if net._inside_rec_time_dim:
coll.add(net._inside_rec_time_dim)
if net._over_rec_time_dim:
coll.add(net._over_rec_time_dim)
if net._over_rec_time_dim_subs:
coll.update(net._over_rec_time_dim_subs)
net = net.parent_net
return coll
def _is_rec_layer_inside_net(self):
'\n :rtype: bool\n '
if self.extra_parent_net:
return self.extra_parent_net._is_rec_layer_inside_net()
from returnn.tf.layers.rec import RecLayer
from returnn.tf.layers.rec import _SubnetworkRecCell
assert isinstance(self.parent_layer, RecLayer)
assert isinstance(self.parent_layer.cell, _SubnetworkRecCell)
return (self is self.parent_layer.cell.net)
def get_rec_parent_layer(self, inside_loop=True):
'\n :param bool inside_loop: only return if the network is constructed within the loop (not moved out)\n of the most recent parent rec layer\n :return: if we are a subnet of a :class:`RecLayer`, will return the RecLayer instance.\n At template construction time, this is always None.\n :rtype: returnn.tf.layers.rec.RecLayer|None\n '
if self.extra_parent_net:
return self.extra_parent_net.get_rec_parent_layer(inside_loop=inside_loop)
from returnn.tf.layers.rec import RecLayer
if isinstance(self.parent_layer, RecLayer):
if inside_loop:
return (self.parent_layer if self._is_rec_layer_inside_net() else None)
return self.parent_layer
if self._inside_rec_time_dim:
return None
if self.parent_net:
return self.parent_net.get_rec_parent_layer(inside_loop=inside_loop)
return None
def have_rec_step_info(self):
'\n :rtype: bool\n '
return (self.get_rec_step_info(must_exist=False) is not None)
def get_rec_step_info(self, must_exist=True):
'\n :param bool must_exist: if True, will throw exception if not available\n :rtype: returnn.tf.layers.rec.RecStepInfoLayer|None\n '
from returnn.tf.layers.rec import RecStepInfoLayer, _SubnetworkRecCell
if ((':i' in self.layers) and isinstance(self.layers[':i'], RecStepInfoLayer)):
return self.layers[':i']
rec_layer = self.get_rec_parent_layer()
if ((not rec_layer) or (len(rec_layer.cell.layers_in_loop) == 0)):
assert (not must_exist), ('%s: We expect to be the subnet of a RecLayer, but we are not.' % self)
return None
assert isinstance(rec_layer.cell, _SubnetworkRecCell)
step_info_layer = rec_layer.cell.net.layers[':i']
assert isinstance(step_info_layer, RecStepInfoLayer)
return step_info_layer
def get_rec_step_index(self):
'\n Assumes that have_rec_step_info is True.\n\n :rtype: tf.Tensor\n :return: scalar, int32\n '
return self.get_rec_step_info().step
def get_config(self, consider_global_config=True, fallback_dummy_config=True):
'\n :param bool consider_global_config: if no config is set, check for global config\n :param bool fallback_dummy_config: if no config, return a new empty Config, otherwise return None\n :rtype: returnn.config.Config|None\n '
from returnn.config import Config, get_global_config
if self._config:
return self._config
if self.parent_net:
return self.parent_net.get_config(consider_global_config=consider_global_config, fallback_dummy_config=fallback_dummy_config)
if self.extra_parent_net:
return self.extra_parent_net.get_config(consider_global_config=consider_global_config, fallback_dummy_config=fallback_dummy_config)
if consider_global_config:
config = get_global_config(raise_exception=False)
if config:
return config
if fallback_dummy_config:
return Config()
return None
@staticmethod
def register_post_control_dependencies(deps):
'\n Will register the control dependencies\n or globally for a session run on this network.\n This can e.g. be called inside `self.post_init`.\n We use UPDATE_OPS, as that is also e.g. used by batchnorm. See:\n https://github.com/tensorflow/tensorflow/issues/1122\n\n :param list[tf.Tensor|tf.Operation] deps:\n :return: nothing\n '
ls = tf_compat.v1.get_collection_ref(tf_compat.v1.GraphKeys.UPDATE_OPS)
assert isinstance(ls, list)
ls.extend(deps)
@staticmethod
def get_post_control_dependencies():
'\n :rtype: list[tf.Operation]\n '
return tf_compat.v1.get_collection(tf_compat.v1.GraphKeys.UPDATE_OPS)
def register_graph_reset_callback(self, cb):
"\n Note: These callbacks are not called automatically.\n You explicitly have to call :func:`call_graph_reset_callbacks`.\n\n Note: We don't store this in the graph itself (e.g. via tf.get_collection),\n as we don't want to serialize this\n (which would also lead to an error, because it cannot be serialized).\n\n Note: Currently these callbacks might get called multiple times,\n so make sure that this is not a problem.\n Also make sure that the network/session is still in a valid state after this has been called,\n e.g. such that further session runs would still work correctly.\n\n Note: These callbacks will only be called if there was not any error.\n\n :param function|()->None cb:\n "
self.get_root_network()._graph_reset_callbacks.append(cb)
def get_graph_reset_callbacks(self):
'\n :rtype: list[()->None]\n '
return self.get_root_network()._graph_reset_callbacks
def call_graph_reset_callbacks(self):
'\n Calls any callbacks registered via :func:`register_graph_reset_callback`.\n '
for cb in self.get_graph_reset_callbacks():
cb()
def set_run_opts(self, epoch, dataset_name):
'\n The run options are valid during one loop over some dataset.\n\n Contrary to epoch_step, train_flag, etc, we do not provide these as TF placeholders,\n for convenience, because it is not needed right now.\n If it is needed, it probably is easier to introduce auxiliary TF variables (on CPU) instead\n and just set them once here.\n\n :param int epoch:\n :param str|None dataset_name:\n '
root_net = self.get_root_network()
root_net._run_opts.update(dict(epoch=epoch, dataset_name=dataset_name))
def get_run_opts(self):
'\n :rtype: dict[str]\n '
opts = self.get_root_network()._run_opts
assert opts, 'set_run_opts not called?'
return opts.copy()
def register_run_finished_callback(self, cb):
'\n :param function|()->None cb:\n '
self.get_root_network()._run_finished_callbacks.append(cb)
def set_run_finished(self, error_occurred=False):
'\n Maybe calls any callbacks registered via :func:`register_run_finished_callback`\n (if no error occurred)\n and cleans up the run opts.\n\n :param bool error_occurred:\n '
root_net = self.get_root_network()
if (not error_occurred):
for cb in root_net._run_finished_callbacks:
cb()
root_net._run_finished_callbacks[:] = []
root_net._run_opts.clear()
@classmethod
def get_network_stack(cls):
'\n :rtype: list[TFNetwork]\n '
from returnn.tf.util.basic import CollectionKeys
coll = tf_compat.v1.get_collection_ref(CollectionKeys.RETURNN_NET_STACK)
assert isinstance(coll, list)
return coll
@classmethod
def get_current_network(cls, must_exist=True):
'\n :param bool must_exist:\n :rtype: TFNetwork|None\n '
coll = cls.get_network_stack()
if must_exist:
assert coll
elif (not coll):
return None
return coll[(- 1)]
@contextlib.contextmanager
def register_network_scope(self):
'\n Registers a ref to this network inside the current TF computation graph.\n '
coll = self.get_network_stack()
coll.append(self)
try:
(yield)
finally:
assert (coll[(- 1)] is self)
coll.pop((- 1))
def get_search_choices_from_beam(self, beam):
'\n Currently we have somewhat redundant information in\n :class:`returnn.tf.util.data.SearchBeam`\n (which is totally independent from other things in RETURNN (which is good))\n and\n :class:`returnn.tf.layers.base.SearchChoices` (which is more dependent on the RETURNN layers,\n and has some more info).\n The :class:`Data` (which is also independent from other things in RETURNN (which is also good))\n only knows about :class:`returnn.tf.util.data.SearchBeam`\n but not about :class:`returnn.tf.layers.base.SearchChoices`.\n Thus there are situations where we only have a ref to the former, but like to get a ref to the latter.\n\n Note that this might (hopefully) get cleaned up at some point...\n\n :param returnn.tf.util.data.SearchBeam beam:\n :rtype: returnn.tf.layers.base.SearchChoices|None\n '
root_net = self.get_root_network()
if (root_net is not self):
return root_net.get_search_choices_from_beam(beam)
return self._map_search_beam_to_search_choices.get(beam, None)
def register_search_choices_for_beam(self, beam, search_choices):
'\n :param returnn.tf.util.data.SearchBeam beam:\n :param returnn.tf.layers.base.SearchChoices search_choices:\n '
root_net = self.get_root_network()
if (root_net is not self):
return root_net.register_search_choices_for_beam(beam, search_choices)
self._map_search_beam_to_search_choices[beam] = search_choices
|
class Subnetwork():
"\n Represents a subnetwork.\n\n Despite the different namespace, optionally some variable sharing,\n and optionally some custom input data,\n layers behave just as in the root network,\n with the same dependency resolution (both ways).\n I.e. a layer outside can depend only on a single sub layer\n and not the whole subnetwork\n (in contrast to :func:`LayerBase.get_sub_layer`).\n\n This is usually used with :class:`SubnetworkLayer`,\n via :func:`LayerBase:cls_get_sub_network`.\n\n This works for custom calls on :func:`TFNetwork.construct_layer`\n with custom ``get_layer`` or ``add_layer``\n e.g. in template construction from the :class:`RecLayer` subnetwork\n and doesn't require extra logic for this.\n\n This has also a mode to start its own template construction,\n for the case this layer is embedded in another layer\n (e.g. :class:`CondLayer` or :class:`MaskedComputationLayer`,\n in contrast to :class:`SubnetworkLayer`).\n This is triggered by a special type of extra parent network\n with ``extra_only_template`` set.\n This implies that the parent (non-extra) network\n can not directly access the sub network,\n which is important for the template construction here\n (see :func:`_construct_template_subnet`).\n\n A special extra parent can also have the ``extra_boundary`` flag set,\n which triggers that we have our own construction code\n (but not using templates, but constructing the real layers).\n This is used also for the embedded case (e.g. :class:`MaskedComputationLayer`).\n This is needed when the parent (non-extra) network\n cannot directly access this sub network.\n "
def __init__(self, parent_net, name, opts=None):
'\n :param TFNetwork parent_net:\n :param str name:\n :param dict[str]|None opts:\n '
from .layers.basic import InternalLayer
self.parent_net = parent_net
self.name = name
if parent_net.extra_name_prefix:
self.name_in_parent = ('%s:%s' % (parent_net.extra_name_prefix, name))
else:
self.name_in_parent = name
template = (parent_net.extra_parent_net and parent_net.extra_only_template)
self.template = template
self.parent_cannot_access = (template or (parent_net.extra_parent_net and parent_net.is_root_in_ctx))
subnet_layer_dict = opts.copy()
self._net_dict = subnet_layer_dict.pop('subnetwork')
from_arg = subnet_layer_dict.pop('from', subnet_layer_dict.pop('_from', 'data'))
self._from_arg = (list(from_arg) if isinstance(from_arg, (list, tuple)) else [from_arg])
self._concat_sources = subnet_layer_dict.pop('concat_sources', True)
self._dropout = subnet_layer_dict.pop('dropout', 0)
self._dropout_noise_shape = subnet_layer_dict.pop('dropout_noise_shape', None)
subnet_layer_dict.pop('class', None)
subnet_layer_dict.pop('_network', None)
subnet_layer_dict.pop('_name', None)
subnet_layer_dict.pop('loss', None)
subnet_layer_dict.pop('loss_scale', None)
subnet_layer_dict.pop('loss_opts', None)
subnet_layer_dict.pop('rec_previous_layer', None)
subnet_layer_dict.pop('load_on_init', None)
subnet_layer_dict['name'] = name
subnet_layer_dict['network'] = parent_net
subnet_layer_dict['output'] = Data(name='dummy_output', shape=())
subnet_layer_dict.pop('sources', None)
subnet_layer_dict.pop('n_out', None)
subnet_layer_dict.pop('out_type', None)
subnet_layer_dict.pop('out_shape', None)
self.layer = InternalLayer(**subnet_layer_dict)
self.layer.post_init(subnet_layer_dict)
self.net = TFNetwork(name=('%s/%s' % (parent_net.name, name)), extern_data=ExternData(), train_flag=parent_net.train_flag, search_flag=parent_net.search_flag, parent_layer=self.layer, rnd_seed=(0 if template else None))
self.net.layers_desc.update(self._net_dict)
for (key, value) in self.parent_net.extern_data.data.items():
if (key != 'data'):
self.net.extern_data.data[key] = value
def __repr__(self):
return ('%s{%s}' % (self.__class__.__name__, self.net.name))
def get_data(self, name, get_layer):
'\n :param str name:\n :param GetLayer get_layer:\n :rtype: (GetLayer,str)\n '
layer_name = ('data:%s' % name)
assert self._from_arg, ('%s: set_sources_args not called? or no source but asked for %r' % (self, name))
def base_get_layer(name_):
'\n :param str name_:\n :rtype: (GetLayer,str)\n '
return (get_layer, ('base:' + name_))
if self._concat_sources:
if (name == 'data'):
if ((len(self._from_arg) == 1) and (not self._dropout)):
return base_get_layer(self._from_arg[0])
self._net_dict.update({layer_name: {'class': 'copy', 'from': [('base:%s' % arg) for arg in self._from_arg], 'dropout': self._dropout, 'dropout_noise_shape': self._dropout_noise_shape}})
return (get_layer, layer_name)
else:
return base_get_layer(('data:%s' % name))
for (i, arg) in enumerate(self._from_arg):
if ((name == arg) or (name == str(i)) or ((name == 'data') and (i == 0))):
if (not self._dropout):
return base_get_layer(arg)
self._net_dict.update({layer_name: {'class': 'copy', 'from': ('base:%s' % arg), 'dropout': self._dropout, 'dropout_noise_shape': self._dropout_noise_shape}})
return (get_layer, layer_name)
return base_get_layer(('data:%s' % name))
def get_sub_layer_func(self, base_get_layer):
'\n :param GetLayer|((str)->LayerBase)|None base_get_layer:\n :rtype: GetLayer\n '
return GetLayer(self.net, net_dict=self._net_dict, subnetwork=self, parent_get_layer=base_get_layer)
def construct_layer(self, name, parent_get_layer=None):
'\n With default parent_get_layer,\n this will not trigger recursive constructions in the parent net,\n but any recursive construction in this subnet.\n\n :param str name:\n :param GetLayer|((str)->LayerBase)|None parent_get_layer:\n :rtype: LayerBase\n '
return self.get_sub_layer_func(parent_get_layer)(name)
def construct_all(self, parent_get_layer=None):
'\n Trigger the standard construction of all layers in the net dict.\n\n :param GetLayer|((str)->LayerBase)|None parent_get_layer:\n '
if self.template:
self._construct_template_subnet(get_parent_layer=parent_get_layer)
return
self.net.construct_from_dict(self._net_dict, get_layer=self.get_sub_layer_func(parent_get_layer))
def complete_construction_parent_subnet_layer(self, parent_get_layer=None):
'\n :param GetLayer|((str)->LayerBase)|None parent_get_layer:\n :rtype: returnn.tf.layers.basic.SubnetworkLayer\n '
from returnn.tf.layers.basic import SubnetworkLayer
self.construct_all(parent_get_layer=parent_get_layer)
parent_net_dict = self.parent_net.layers_desc
name = self.name_in_parent
layer = self.parent_net.construct_layer(parent_net_dict, name, get_layer=parent_get_layer)
assert isinstance(layer, SubnetworkLayer)
assert ((layer.subnetwork_ is self) and (layer.subnetwork is self.net))
layer.update_params_from_subnet()
layer.update_load_on_init()
return layer
def have_layer(self, name):
'\n :param str name:\n :rtype: bool\n '
return (name in self._net_dict)
def get_layer_desc(self, name):
'\n :param str name:\n :rtype: dict[str]\n '
return self._net_dict[name]
def get_layer_class(self, name):
'\n :param str name:\n :rtype: type[LayerBase]\n '
layer_desc = self.get_layer_desc(name)
class_name = layer_desc['class']
return get_layer_class(class_name)
def _construct_template_subnet(self, get_parent_layer=None):
'\n Very similar to _SubnetworkRecCell._construct_template, but simpler.\n\n :param GetLayer|((str)->LayerBase)|None get_parent_layer:\n :rtype: Subnetwork\n '
from pprint import pformat
from returnn.tf.layers.rec import _TemplateLayer
subnet = self.net
net_dict = self._net_dict
assert ('output' in net_dict), ("%s: 'output' layer missing in %s" % (self, pformat(net_dict)))
if ('output' in subnet.layers):
return
def add_templated_layer(name, layer_class, **layer_desc):
'\n :param str name:\n :param type[LayerBase]|LayerBase layer_class:\n :param layer_desc:\n :rtype: LayerBase\n '
layer_ = _TemplateLayer(name=name, network=subnet)
subnet.layers[name] = layer_
layer_desc = layer_desc.copy()
layer_desc['name'] = name
layer_desc['network'] = subnet
if ('output' not in layer_desc):
layer_desc['output'] = layer_class.get_out_data_from_opts(**layer_desc)
layer_desc['output'] = layer_class.fixup_out_data(**layer_desc)
layer_.init(layer_class=layer_class, **layer_desc)
if layer_class.recurrent:
subnet.recurrent = True
from tensorflow.python.util import nest
layers_flat = [v for v in nest.flatten(layer_desc) if isinstance(v, LayerBase)]
for dep_layer in layers_flat:
layer_.add_dependency(dep_layer, is_prev_time_frame=False)
return layer_
assert self.parent_cannot_access
get_templated_layer = GetLayer(subnet, subnetwork=self, parent_get_layer=get_parent_layer, add_layer_func=add_templated_layer)
try:
get_templated_layer('output')
assert ('output' in subnet.layers)
for (layer_name, layer) in net_dict.items():
if (subnet.eval_flag and layer.get('loss')):
get_templated_layer(layer_name)
for (layer_name, layer) in net_dict.items():
if layer.get('is_output_layer'):
get_templated_layer(layer_name)
except Exception as exc:
import sys
(etype, value, tb) = sys.exc_info()
from returnn.util import better_exchook
from io import StringIO
ss = StringIO()
print(('%s: Exception constructing template network (for deps and data shapes): %s %s' % (self, type(exc).__name__, exc)), file=ss)
print('Template network so far:', file=ss)
from pprint import pprint
pprint(subnet.layers, stream=ss)
better_exchook.better_exchook(etype, value, tb, file=ss)
new_exc = Exception(ss.getvalue())
new_exc.__traceback__ = tb
raise new_exc
|
class TFNetworkParamsSerialized(object):
'\n Holds all the params as numpy arrays, including auxiliary params.\n '
def __init__(self, values_dict, global_train_step):
'\n :param dict[str,dict[str,numpy.ndarray]] values_dict: dict: layer_name -> param_name -> variable numpy array\n :param int global_train_step:\n '
self.values_dict = values_dict
self.global_train_step = global_train_step
|
class GetLayer():
'\n Helper object which represents the get_layer function which also triggers layer construction.\n This is implemented to better handle subnetworks and to avoid a deep stack of get_layer functions.\n Instead of defining another wrapped get_layer function,\n any subnetwork can instead create a new instance of this object.\n https://github.com/rwth-i6/returnn/issues/993\n '
def __init__(self, network, net_dict=None, subnetwork=None, add_layer_func=None, parent_get_layer=None):
'\n :param TFNetwork network:\n :param dict[str]|None net_dict:\n :param Subnetwork|None subnetwork:\n :param ((str,LayerBase,dict)->LayerBase)|None add_layer_func: by default TFNetwork.add_layer\n :param GetLayer|((str)->LayerBase)|None parent_get_layer:\n '
self.network = network
if (net_dict is None):
net_dict = network.layers_desc
self._net_dict = net_dict
if subnetwork:
assert (subnetwork.net is network)
self.subnetwork = subnetwork
self._add_layer_func = add_layer_func
self._parent_get_layer = parent_get_layer
def __repr__(self):
args = [repr(self.network.name)]
if self._add_layer_func:
args.append(('add_layer=%s' % self._add_layer_func))
if self._parent_get_layer:
args.append(('parent_get_layer=%s' % self._parent_get_layer))
return ('<GetLayer %s>' % ' '.join(args))
def copy(self):
'\n :rtype: GetLayer\n '
return GetLayer(network=self.network, net_dict=self._net_dict, subnetwork=self.subnetwork, add_layer_func=self._add_layer_func, parent_get_layer=self._parent_get_layer)
def _get_parent_get_layer(self):
'\n This assumes that it exists and is a GetLayer.\n\n :rtype: GetLayer\n '
assert self.network.parent_net
if self._parent_get_layer:
return self._parent_get_layer
else:
return GetLayer(self.network.parent_net)
def _transform_base_get_layer(self, name):
if ((not self.network.parent_net) and (not self._parent_get_layer)):
raise LayerNotFound(('Base layer %r not found in network %r.' % (name, self.network)), layer_name=name, network=self.network, net_dict=self._net_dict)
if name.startswith('base:'):
name = name[len('base:'):]
else:
assert (self.subnetwork and (not self.subnetwork.parent_cannot_access))
name = ((self.subnetwork.name_in_parent + '/') + name)
get_layer = self._get_parent_get_layer()
return (get_layer, name)
def __call__(self, layer_name):
'\n :param str layer_name:\n :rtype: LayerBase\n '
get_layer = self
name = layer_name
is_prev = False
while True:
assert isinstance(get_layer, GetLayer)
assert isinstance(name, str)
name_ = (('prev:' + name) if is_prev else name)
if (name_ in get_layer.network.layers):
return get_layer.network.layers[name_]
if name.startswith('base:'):
(get_layer, name) = get_layer._transform_base_get_layer(name)
if (not isinstance(get_layer, GetLayer)):
name = (('prev:' + name) if is_prev else name)
assert callable(get_layer)
return get_layer(name)
continue
if get_layer.subnetwork:
if (((name == 'data') or name.startswith('data:')) and (name_ not in get_layer._net_dict)):
if (name == 'data'):
name = 'data:data'
continue
(get_layer, name) = get_layer.subnetwork.get_data(name=name[len('data:'):], get_layer=get_layer)
continue
if name.startswith('prev:'):
if is_prev:
raise Exception(("Multiple 'prev:' prefixes are not allowed. %r %r" % (self, layer_name)))
is_prev = True
name = name[len('prev:'):]
if (get_layer.subnetwork and (not get_layer.subnetwork.parent_cannot_access)):
(get_layer, name) = get_layer._transform_base_get_layer(name)
if (not isinstance(get_layer, GetLayer)):
name = (('prev:' + name) if is_prev else name)
assert callable(get_layer)
return get_layer(name)
continue
break
name = (('prev:' + name) if is_prev else name)
return get_layer.network.construct_layer(net_dict=get_layer._net_dict, name=name, get_layer=get_layer, add_layer=get_layer._add_layer_func)
|
class LossHolder():
'\n This object just keeps a reference to the loss/error value,\n and does the necessary logic to collect it, and also the normalization logic.\n Every new computation (nodes in the computation graph) must be constructed on demand,\n to allow first to collect all possible losses without calculating them,\n and then calculating them in the right context (e.g. inside a while_loop, or so).\n '
def __init__(self, name, loss, layer_output, reduce_func=None, layer=None, loss_value=None, error_value=None, norm_factor=None, only_on_eval=None, network=None):
'\n After construction, you should call init() before usage, in case you do not provide `layer` here.\n\n :param str name: The name uniquely identifies the loss. Earlier, this was the same as the layer name.\n This is still true for simple cases,\n but for losses coming from a subnetwork or other extended losses,\n it can be something else.\n It could look like "output", or "output/sublayer".\n :param LayerBase layer:\n We can always point to a layer where this comes from (either in the subnet, or the parent layer).\n :param Data layer_output: template describing the layer output\n :param TFNetwork network: for which network to create this LossHolder. might be different from layer.network\n :param returnn.tf.layers.base.Loss loss:\n :param ((tf.Tensor)->tf.Tensor)|None reduce_func: if given, will overwrite the reduce func for the loss.\n By default, every loss_value and error_value is a scalar\n (sum or average over the batches, and over the frames for frame-wise losses).\n However, if you provide reduce_func = TFUtil.identity, you can get the unreduced tensor.\n :param tf.Tensor|None loss_value:\n :param tf.Tensor|None error_value:\n :param tf.Tensor norm_factor:\n :param bool only_on_eval:\n '
if (layer and (not network)):
network = layer.network
if (layer and (only_on_eval is None)):
only_on_eval = layer.only_on_eval
assert (name and loss and network)
if layer:
assert isinstance(layer, LayerBase)
if reduce_func:
loss.reduce_func = reduce_func
(_, prefix) = network.get_root_ctx_network()
self.name = (prefix + name)
self.loss = loss
self.layer_output = layer_output
self.reduce_func = reduce_func
self._network = network
self._layer = layer
self._is_prepared = False
self._loss_value = loss_value
self._loss_value_for_fetch = None
self._loss_value_for_objective = None
self._error_value = error_value
self._norm_factor = norm_factor
self._only_on_eval = only_on_eval
def __repr__(self):
return ('<LossHolder name=%r loss=%r>' % (self.name, self.loss))
def init(self, layer):
'\n It will just set the layer.\n The `LossHolder` is initialized if the layer is set.\n\n :param LayerBase layer:\n :return: self\n :rtype: LossHolder\n '
self._layer = layer
if (self._only_on_eval is None):
self._only_on_eval = layer.only_on_eval
if (self._network is None):
self._network = layer.network
return self
def get_layer(self):
'\n :return: layer. assumes that it is set\n :rtype: LayerBase\n '
assert self._layer, 'call init()'
return self._layer
def get_only_on_eval(self):
'\n :return: only_on_eval flag. assumes that it is set\n :rtype: bool\n '
assert (self._only_on_eval is not None), 'call init()'
return self._only_on_eval
def get_tf_name(self):
'\n :return: name which can be used for a TF op, thus contains no "/" or other special chars\n :rtype: str\n '
return LayerBase.cls_get_tf_scope_name(self.name.replace('/', '_'))
def get_loss_value(self):
'\n :return: loss value. scalar\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._loss_value
def get_loss_value_for_fetch(self):
'\n :return: loss value for fetch. scalar. same as loss_value, but maybe with additional checks\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._loss_value_for_fetch
def get_loss_value_for_objective(self):
'\n :return: loss value for objective. scalar. might be scaled (scale) and/or normalized (use_normalized_loss)\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._loss_value_for_objective
def get_error_value(self):
'\n :return: error value for fetch. scalar\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._error_value
def get_norm_factor(self):
'\n :return: norm factor for loss and error. scalar\n :rtype: tf.Tensor\n '
self._prepare()
return self._norm_factor
def _normalized_value_per_seq(self, value):
'\n :param tf.Tensor|None value: (batch*time,) or (time*batch,)\n :return: (batch,) or None if value is None\n :rtype: tf.Tensor|None\n '
if (value is None):
return None
return self.loss.reduce_to_batch(value, normalize=True)
def get_normalized_loss_value_per_seq(self):
'\n :return: (batch,) or None if loss is None\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._normalized_value_per_seq(self._loss_value)
def get_normalized_error_value_per_seq(self):
'\n :return: (batch,) or None if error is None\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._normalized_value_per_seq(self._error_value)
def _value_per_pos(self, value):
'\n :param tf.Tensor|None value: (batch*time,) or (time*batch,)\n :return: (batch,time) or None if value is None\n :rtype: tf.Tensor|None\n '
if (value is None):
return None
value = tf.reshape(value, tf.shape(self.loss.output.placeholder)[:2])
if (self.loss.output.time_dim_axis == 0):
value = tf_util.swapaxes(value, 0, 1)
return value
def get_loss_value_per_pos(self):
'\n :return: (batch,time) or None if loss is None\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._value_per_pos(self._loss_value)
def get_error_value_per_pos(self):
'\n :return: (batch,time) or None if error is None\n :rtype: tf.Tensor|None\n '
self._prepare()
return self._value_per_pos(self._error_value)
def _tf_summary(self):
'\n This gets called inside a loss name scope of the layer.\n\n :return: nothing, will use tf.summary\n '
if self._network.parent_net:
return
if tf_util.has_current_control_flow_context():
return
name = self.get_tf_name()
if (self._loss_value is not None):
if (self._loss_value.get_shape().ndims == 0):
tf_compat.v1.summary.scalar(('loss_%s' % name), (self._loss_value * self._norm_factor))
if self._network.get_config().bool('calculate_exp_loss', False):
tf_compat.v1.summary.scalar(('exp_loss_%s' % name), tf.exp((self._loss_value * self._norm_factor)))
if self._network.get_config().bool('debug_unnormalized_loss_summaries', False):
tf_compat.v1.summary.scalar(('unnormalized_loss_%s' % name), self._loss_value)
if (self._network.get_config().bool('debug_objective_loss_summaries', False) and (self._loss_value_for_objective is not None)):
tf_compat.v1.summary.scalar(('objective_loss_%s' % name), self._loss_value_for_objective)
if (self._error_value is not None):
if (self._error_value.get_shape().ndims == 0):
tf_compat.v1.summary.scalar(('error_%s' % name), (self._error_value * self._norm_factor))
def _prepare(self):
'\n This gets called inside a loss name scope of the layer.\n\n :return: nothing, will prepare\n '
if self._is_prepared:
return
assert self._layer, 'call init()'
self.loss.init_by_layer(layer=self._layer, layer_output_template=self.layer_output)
if ((self._loss_value is None) and (self._error_value is None)):
with reuse_name_scope('loss'):
if self._only_on_eval:
self._loss_value = self._layer._cond_only_on_eval_opt(self.loss.get_value, default_value=0.0)
else:
self._loss_value = self.loss.get_value()
with reuse_name_scope('error'):
if self._only_on_eval:
self._error_value = self._layer._cond_only_on_eval_opt(self.loss.get_error, default_value=0.0)
else:
self._error_value = self.loss.get_error()
assert ((self._loss_value is not None) or (self._error_value is not None)), ('layer %r loss %r return None for loss and error' % (self._layer, self.loss))
if (self._norm_factor is None):
self._norm_factor = self.loss.get_normalization_factor()
loss_value = self._loss_value
if (loss_value is not None):
if self._network.get_config().bool('debug_add_check_numerics_on_output', False):
print(('debug_add_check_numerics_on_output: add for layer loss %r: %r' % (self._layer.name, self._layer.output.placeholder)))
from returnn.tf.util.basic import identity_with_check_numerics
loss_value = identity_with_check_numerics(loss_value, name=('%s_identity_with_check_numerics_loss' % self.get_tf_name()))
self._loss_value_for_fetch = loss_value
if ((self.loss.scale != 1) and (loss_value is not None)):
if (not self.loss.scale):
loss_value = None
else:
loss_value *= self.loss.scale
if (self.loss.use_normalized_loss and (loss_value is not None)):
loss_value *= self._norm_factor
self._loss_value_for_objective = loss_value
self._tf_summary()
self._is_prepared = True
def copy_new_base(self, name=None, layer=None, network=None, reduce_func=None):
'\n :param LayerBase layer:\n :param TFNetwork network:\n :param str name:\n :param ((tf.Tensor)->tf.Tensor)|None reduce_func:\n :return: new copy of LossHolder\n :rtype: LossHolder\n '
if (not layer):
layer = self._layer
if (not network):
network = self._network
if (not name):
name = self.name
loss_value = self._loss_value
error_value = self._error_value
if (reduce_func is None):
reduce_func = self.loss.reduce_func
if (reduce_func and (reduce_func != self.loss.reduce_func)):
loss_value = None
error_value = None
return LossHolder(name=name, layer=layer, layer_output=self.layer_output, network=network, loss=self.loss, reduce_func=reduce_func, loss_value=loss_value, error_value=error_value, norm_factor=self._norm_factor, only_on_eval=self._only_on_eval)
|
class NetworkLayerException(Exception):
'\n Some exception by the network, e.g. during construction.\n '
def __init__(self, message, layer_name, network, net_dict=None):
'\n :param str message:\n :param str layer_name:\n :param TFNetwork network:\n :param dict[str]|None net_dict:\n '
super(NetworkLayerException, self).__init__(message)
self.layer_name = layer_name
self.network = network
self.net_dict = (net_dict or network.layers_desc)
|
class NetworkConstructionDependencyLoopException(NetworkLayerException):
'\n This is raised when there is a dependency loop in the network construction.\n '
def __init__(self, network, layer_name, constructing_layers, net_dict):
'\n :param TFNetwork network:\n :param str layer_name:\n :param list[str] constructing_layers:\n :param dict[str,dict[str]] net_dict:\n '
msg = ('%s: Error: There is a dependency loop on layer %r.' % (network, layer_name))
msg += '\nConstruction stack (most recent first):'
for layer_name_ in reversed(constructing_layers):
msg += ('\n %s' % layer_name_)
super(NetworkConstructionDependencyLoopException, self).__init__(msg, network=network, layer_name=layer_name, net_dict=net_dict)
|
class _DelayedConstructionException(Exception):
'\n When we want to do a flat construction.\n '
def __init__(self, network, layer_name, other_kwargs):
'\n :param TFNetwork network:\n :param str layer_name:\n :param dict[str] other_kwargs:\n '
self.network = network
self.layer_name = layer_name
self.other_kwargs = other_kwargs
def __repr__(self):
return ('%s(layer_name=%r)' % (self.__class__.__name__, self.layer_name))
def delayed_construction(self):
'\n Call :func:`TFNetwork.construct_layer` again now.\n\n :rtype: LayerBase\n '
print('Delayed flat layer construction:', self.layer_name, file=log.v5)
return self.network.construct_layer(name=self.layer_name, **self.other_kwargs)
|
class LayerNotFound(NetworkLayerException):
'\n Via :func:`TFNetwork.get_layer`.\n '
|
def _help_data_or_array(value):
'\n :param numpy.ndarray|bool|object value:\n :return: (info,(min,max))\n :rtype: (str,(int|float,int|float))\n '
import numpy
if isinstance(value, numpy.ndarray):
info = ('shape %s, dtype %s' % (value.shape, value.dtype))
if (value.size > 0):
v_minmax = (numpy.min(value), numpy.max(value))
info += (', min/max %s/%s' % v_minmax)
if (value.dtype.kind == 'f'):
info += (', mean/stddev %s/%s' % (numpy.mean(value), numpy.std(value)))
if (value.ndim <= 1):
info += (', (%s)' % numpy.array2string(value))
else:
v_minmax = (0, 0)
info += ', EMPTY'
elif isinstance(value, (numpy.floating, numpy.integer, numpy.bool_, float, int, bool, str, bytes)):
v_minmax = (0, 1)
info = ('%s(%s)' % (type(value).__name__, value))
elif (value is None):
v_minmax = ((- 1), (- 1))
info = 'None'
else:
v_minmax = ((- 1), (- 1))
info = ('type %r' % type(value))
return (info, v_minmax)
|
def help_on_tf_exception(session, exception, fetches, feed_dict=None, meta_step_info=None, extern_data=None, file=sys.stdout):
'\n Generic debugging helper, on any TF exception (or even any other exception as well).\n Will try to provide as much helpful context information as possible.\n (This is not in :mod:`TFUtil` because it depends on `ExternData`, which is only defined here.)\n\n :param tf.compat.v1.Session session:\n :param tf.errors.OpError|BaseException exception:\n :param tf.Tensor|list[tf.Tensor]|dict[str,tf.Tensor]|object|None fetches:\n :param dict[tf.Tensor,numpy.ndarray]|None feed_dict:\n :param dict[str]|None meta_step_info:\n :param ExternData|None extern_data:\n :param typing.IO[str]|io.TextIOBase|io.StringIO file:\n '
from pprint import pprint, pformat
import traceback
from returnn.tf.util.basic import get_base_name, find_ops_with_tensor_input, find_ops_path_output_to_input
from tensorflow.python.util import nest
if (fetches is not None):
fetches = nest.flatten(fetches)
if isinstance(exception, tf.errors.OpError):
op = exception.op
print('Failing op:', repr(op), file=file)
assert ((op is None) or isinstance(op, tf.Operation))
show_verbose_op_inputs = True
if (op and (op.type == 'Placeholder')):
using_ops = find_ops_with_tensor_input(op.outputs[0], fetches=fetches)
print('Used by:', repr(using_ops), file=file)
for op_ in using_ops:
print(''.join(traceback.format_list(op_.traceback)), file=file)
if fetches:
input_to_output_ops = find_ops_path_output_to_input(op.outputs[0], fetches=fetches)
print('Input to output:', file=file)
pprint(input_to_output_ops, stream=file)
show_verbose_op_inputs = False
if (op and op.type.startswith('Horovod')):
show_verbose_op_inputs = False
if isinstance(exception, tf.errors.ResourceExhaustedError):
show_verbose_op_inputs = False
if (op and op.inputs and show_verbose_op_inputs):
try:
assert fetches
input_to_output_ops = find_ops_path_output_to_input(op.inputs[0], fetches=fetches)
assert input_to_output_ops, ('op.inputs[0] %r not in fetches\n%s' % (op.inputs[0], pformat(fetches)))
debug_fetch = None
for x in input_to_output_ops:
if ((not tf_util.has_control_flow_context(x)) or (x.type == 'Exit')):
debug_fetch = x
break
assert (debug_fetch is not None), ('ops: %r, fetches: %r' % (input_to_output_ops, fetches))
stop_at_ts = list((feed_dict.keys() if feed_dict else ()))
for op_ in op.graph.get_operations():
assert isinstance(op_, tf.Operation)
if tf_util.has_control_flow_context(op_):
continue
for x in ((list(op_.inputs) + list(op_.outputs)) + list(op.control_inputs)):
if isinstance(x, tf.Operation):
continue
assert isinstance(x, tf.Tensor)
if (x.dtype._is_ref_dtype and (x not in stop_at_ts)):
stop_at_ts.append(x)
tf_compat.v1.GraphKeys.VARIABLES = tf_compat.v1.GraphKeys.GLOBAL_VARIABLES
from returnn.tf.util.basic import FetchHelper
(debug_fetch, fetch_helpers, op_copied) = FetchHelper.copy_graph(debug_fetch, target_op=op, fetch_helper_tensors=list(op.inputs), stop_at_ts=stop_at_ts, verbose_stream=file)
try:
print('Execute again to debug the op inputs...', file=file)
session.run(debug_fetch, feed_dict=feed_dict)
except tf.errors.OpError as sub_exc:
if (sub_exc.op is op_copied):
pass
else:
print(('We tried to fetch the op inputs (%r) but got another exception:' % (list(op.inputs),)), file=file)
print(sub_exc, file=file)
print('Maybe we still get some values via the fetch helpers, though...', file=file)
except Exception as sub_exc:
print(('We tried to fetch the op inputs (%r) but got another exception:' % (list(op.inputs),)), file=file)
print(sub_exc, file=file)
from returnn.util import better_exchook
better_exchook.better_exchook(*sys.exc_info(), autodebugshell=False, file=file)
else:
print('Op inputs:', file=file)
for (input_t, fetch_helper) in zip(op.inputs, fetch_helpers):
(info, _) = _help_data_or_array(fetch_helper.most_recent_value)
print((' %r: %s' % (input_t, info)), file=file)
if ((op is None) and isinstance(exception, tf.errors.InvalidArgumentError) and ('Retval[0]' in exception.message)):
if (fetches is not None):
found_fetch = None
for fetch in fetches:
try:
session.run(fetch, feed_dict=feed_dict)
except Exception as exc_:
print(('Exception for fetch %s: %s: %s' % (fetch, type(exc_).__name__, exc_)), file=file)
found_fetch = fetch
break
if (found_fetch is not None):
if isinstance(found_fetch, tf.Tensor):
found_fetch = found_fetch.op
assert isinstance(found_fetch, tf.Operation)
for fetch in (list(found_fetch.control_inputs) + list(found_fetch.inputs)):
if (isinstance(fetch, tf.Tensor) and (fetch.op.type == 'ScalarSummary')):
fetch = tf_compat.v1.summary.merge([fetch])
try:
session.run(fetch, feed_dict=feed_dict)
except Exception as exc_:
print(('Exception for fetch %s: %s: %s' % (fetch, type(exc_).__name__, exc_)), file=file)
input_to_output_ops = find_ops_path_output_to_input(list(feed_dict.keys()), fetches=fetch)
print('Input to output op path:', file=file)
pprint(input_to_output_ops, stream=file)
if (not input_to_output_ops):
tf_util.print_graph_output(fetch, file=file)
break
print('Step meta information:', file=file)
pprint(meta_step_info, stream=file)
print('Feed dict:', file=file)
if isinstance(feed_dict, dict):
for (key, value) in sorted(feed_dict.items(), key=(lambda item: item[0].name)):
assert isinstance(key, tf.Tensor)
(info, v_minmax) = _help_data_or_array(value)
data = None
if key.name.startswith('extern_data/'):
data_key = get_base_name(key)
if (extern_data and (data_key in extern_data.data)):
data = extern_data.data[data_key]
info += (', %s' % data)
print((' %r: %s' % (key, info)), file=file)
if (data and data.sparse):
if ((v_minmax[0] < 0) or (v_minmax[1] >= data.dim)):
print(' WARNING, invalid label for data', data, file=file)
elif (feed_dict is None):
print('None', file=file)
else:
print(('(unexpected type %r)' % type(feed_dict)), file=file)
pprint(feed_dict, stream=file)
|
class CustomCheckpointLoader():
'\n This uses `tf.train.NewCheckpointReader`.\n\n It would do automatic conversions if needed, e.g. between different LSTM implementations.\n However, be careful that for some LSTM implementation, there is an additional ``forget_bias``\n option, which is an additional scalar which gets added (not to the param, but to the forget value directly).\n When we convert the parameters, this is ignored, and you must take care about that explicitly\n to make sure you get the same results.\n\n It tries to automatically resolve renames, similar to this:\n\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py\n\n Also see:\n\n https://github.com/tensorflow/tensorflow/issues/11168\n https://github.com/tensorflow/tensorflow/commit/92da8abfd35b93488ed7a55308b8f589ee23b622\n https://github.com/tensorflow/tensorflow/commit/157370e5916b85c65958ed8383ae31d727228ed7\n\n '
def __init__(self, filename, saveable_params, params_prefix='', load_if_prefix='', ignore_missing=False, ignore_params=(), ignore_params_prefixes=(), var_name_mapping=None, network=None, custom_missing_load_func: Optional[CustomLoadParamFunc]=None):
'\n :param str filename: filepattern for NewCheckpointReader or .index/.meta file path\n :param list[tf.Variable|tensorflow.python.training.saver.BaseSaverBuilder.SaveableObject] saveable_params:\n :param str params_prefix: expect that all vars in saveable_params have this prefix, and remove it\n :param str load_if_prefix: if given, only load variables with a name containing this string.\n the variables in the file are expected to have the same name but without this string.\n :param bool ignore_missing: any vars in the model, which are not found in the checkpoint, will be ignored.\n however, if there is no single var in the checkpoint, this is still an error.\n :param typing.Container[str] ignore_params: these param (by name) will not be loaded\n :param typing.Iterable[str] ignore_params_prefixes: these param (by prefix name) will not be loaded\n :param dict[str,str] var_name_mapping: defines a custom mapping (new_name -> name_in_checkpoint) for\n renamed vars in the checkpoint\n :param TFNetwork network:\n :param custom_missing_load_func:\n '
self.filepattern = util.get_checkpoint_filepattern(filename)
self.network = network
self.custom_missing_load_func = custom_missing_load_func
self.ignore_missing = ignore_missing
self.params_prefix = params_prefix
self.load_if_prefix = load_if_prefix
self.var_name_mapping = (var_name_mapping or {})
self.saveable_params = []
count = 0
for param in saveable_params:
param_name = self._get_param_name(param, assert_load_if_prefix_match=False)
if (load_if_prefix and (param_name is None)):
continue
if (param_name in ignore_params):
print(('%s: Ignoring variable %s' % (self, param)), file=log.v3)
continue
if any([param_name.startswith(prefix) for prefix in ignore_params_prefixes]):
print(('%s: Ignoring variable %s' % (self, param)), file=log.v3)
continue
count += 1
if have_custom_post_init(param):
print(('%s: Not loading pre-initialized variable %s' % (self, param)), file=log.v3)
continue
self.saveable_params.append(param)
assert (count > 0), ('%s: no saveable vars' % self)
self.reader = tf_compat.v1.train.NewCheckpointReader(self.filepattern)
self.net_vars = [v for v in self.saveable_params if isinstance(v, tf.Variable)]
self.net_saveables = [v for v in self.saveable_params if (not isinstance(v, tf.Variable))]
self.var_ckpt_names = set(self.reader.get_variable_to_shape_map())
self.var_net_names = {self._get_param_name(v): v for v in self.saveable_params}
self.missing_var_names = []
self.missing_non_critical_var_names = []
for (name, v) in sorted(self.var_net_names.items()):
if (name in self.var_ckpt_names):
continue
if getattr(v, 'RETURNN_non_critical_for_restore', False):
self.missing_non_critical_var_names.append(name)
continue
self.missing_var_names.append(name)
self.obsolete_var_names = [v for v in sorted(self.var_ckpt_names) if (v not in self.var_net_names)]
self.custom_param_importers = ([self.CustomParamImporter(layer=layer, checkpoint_loader=self) for layer in network.layers.values() if layer.custom_param_importer] if network else [])
def __repr__(self):
keys = ['filename', 'params_prefix', 'load_if_prefix', 'ignore_missing', 'network']
return ('%s(%s)' % (self.__class__.__name__, ', '.join([('%s=%r' % (key, getattr(self, key, '<unset>'))) for key in keys])))
class CustomParamImporter():
'\n Helper class for custom param loading.\n '
def __init__(self, layer, checkpoint_loader):
'\n :param LayerBase layer:\n :param CustomCheckpointLoader checkpoint_loader:\n '
self.layer = layer
self.prefix_param_name = layer.get_absolute_name_scope_prefix()
self.checkpoint_param_names = []
self.var_name_mapping = checkpoint_loader.var_name_mapping
prefix = self.prefix_param_name
for name in list(checkpoint_loader.var_ckpt_names):
if name.startswith(prefix):
checkpoint_loader.var_ckpt_names.remove(name)
self.checkpoint_param_names.append(name[len(prefix):])
for name in list(checkpoint_loader.missing_var_names):
if name.startswith(prefix):
checkpoint_loader.missing_var_names.remove(name)
for name in list(checkpoint_loader.obsolete_var_names):
if name.startswith(prefix):
checkpoint_loader.obsolete_var_names.remove(name)
self.reader = checkpoint_loader.reader
self.assigned = False
def __repr__(self):
return ('<CustomParamImporter %r on layer %r>' % (self.layer.custom_param_importer, self.layer.name))
def assign_var(self, var, session):
'\n :param tf.Variable var:\n :param tf.compat.v1.Session session:\n '
if self.assigned:
return
self.assigned = True
values_dict = {name: self.reader.get_tensor(self.var_name_mapping.get(name, (self.prefix_param_name + name))) for name in self.checkpoint_param_names}
self.reader = None
print(('Custom param import of layer %r with original params %r.' % (self.layer, sorted(values_dict.keys()))), file=log.v3)
self.layer.set_param_values_by_dict(values_dict=values_dict, session=session)
def _find_custom_param_importer(self, v_name):
'\n :param str v_name:\n :rtype: CustomParamImporter|None\n '
for importer in self.custom_param_importers:
if v_name.startswith(importer.prefix_param_name):
return importer
return None
def _get_param_name(self, v, assert_load_if_prefix_match=True):
'\n :param tf.Variable|tensorflow.python.training.saver.BaseSaverBuilder.SaveableObject v:\n :param bool assert_load_if_prefix_match: only has an effect with self.load_if_prefix.\n if True, auto resolve load_if_prefix. if False and no match, return None.\n :return: var name. self.params_prefix removed if given\n :rtype: str|None\n '
if isinstance(v, tf.Variable):
v_name = v.name[:(- 2)]
else:
v_name = v.name
if self.params_prefix:
assert v_name.startswith(self.params_prefix), ('did not expect %r' % v)
v_name = v_name[len(self.params_prefix):]
if self.load_if_prefix:
if (self.load_if_prefix not in v_name):
assert (not assert_load_if_prefix_match), ('var %r not expected with load_if_prefix %r' % (v, self.load_if_prefix))
return None
v_name = v_name.replace(self.load_if_prefix, '')
return v_name
class VariableValue():
'\n Helper to assign some variable.\n '
def __init__(self, value=None, custom_param_importer=None):
'\n :param numpy.ndarray|None value:\n :param CustomCheckpointLoader.CustomParamImporter|None custom_param_importer:\n '
assert ((value is not None) or custom_param_importer)
self.value = value
self.custom_param_importer = custom_param_importer
def assign_var(self, var, session):
'\n :param tf.Variable var:\n :param tf.compat.v1.Session session:\n '
if (self.value is not None):
tf_util.VariableAssigner(var=var).assign(value=self.value, session=session)
else:
self.custom_param_importer.assign_var(var=var, session=session)
def get_variable_value_map(self):
'\n :return: var -> numpy array\n :rtype: dict[tf.Variable,CustomCheckpointLoader.VariableValue]\n '
variable_values = {}
if ((not self.missing_var_names) and (not self.custom_param_importers)):
for v in self.saveable_params:
assert isinstance(v, tf.Variable), 'not yet implemented otherwise...'
v_name = self._get_param_name(v)
if (not self.reader.has_tensor(v_name)):
if getattr(v, 'RETURNN_non_critical_for_restore', False):
continue
raise tf.errors.NotFoundError(None, None, ('var %r not found in checkpoint' % v_name))
value = self.reader.get_tensor(v_name)
variable_values[v] = self.VariableValue(value=value)
return variable_values
reader = self.reader
net_vars = self.net_vars
net_saveables = self.net_saveables
var_ckpt_names = self.var_ckpt_names
var_net_names = self.var_net_names
missing_var_names = self.missing_var_names
obsolete_var_names = self.obsolete_var_names
map_list = {'lstm_cell/biases': 'lstm_cell/bias', 'lstm_cell/weights': 'lstm_cell/kernel', 'lstm_cell/bias': 'rnn/lstm_cell/bias', 'lstm_cell/kernel': 'rnn/lstm_cell/weights', 'rnn/lstm_cell/bias': 'lstm_cell/bias', 'rnn/lstm_cell/kernel': 'lstm_cell/kernel', 'cudnn/params_canonical/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/bias': 'lstm_fused_cell/bias', 'cudnn/params_canonical/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/kernel': 'lstm_fused_cell/kernel'}
print('Variables to restore which are not in checkpoint:', missing_var_names, file=log.v2)
var_name_map = {}
def make_load_renamed(old_name):
'\n :param str old_name:\n :rtype: () -> numpy.ndarray\n '
def load_old():
'\n :rtype: numpy.ndarray\n '
return reader.get_tensor(old_name)
return load_old
def make_load_renamed_flatten(old_name):
'\n :param str old_name:\n :rtype: () -> numpy.ndarray\n '
def load_old():
'\n :rtype: numpy.ndarray\n '
return reader.get_tensor(old_name).flatten()
return load_old
def make_load_weights_nativelstm_to_basic(new_name, postfix):
'\n :param str new_name:\n :param str postfix: "/lstm_cell/kernel" or "/rnn/lstm_cell/kernel"\n :rtype: ()->numpy.ndarray\n '
assert new_name.endswith(postfix)
old_name1 = (new_name[:(- len(postfix))] + '/W_re')
old_name2 = (new_name[:(- len(postfix))] + '/W')
def load_native_lstm_weights():
'\n :rtype: numpy.ndarray\n '
w_re = reader.get_tensor(old_name1)
w_ff = reader.get_tensor(old_name2)
assert ((w_re.ndim == w_ff.ndim == 2) and (w_re.shape[1] == w_ff.shape[1]) and ((w_re.shape[1] // 4) == w_re.shape[0]))
w = numpy.concatenate([w_ff, w_re], axis=0)
(w_j, w_i, w_f, w_o) = numpy.split(w, 4, axis=1)
w = numpy.concatenate([w_i, w_j, w_f, w_o], axis=1)
return w
return load_native_lstm_weights
def make_load_bias_nativelstm_to_basic(new_name, postfix):
'\n :param str new_name:\n :param str postfix: "/lstm_cell/bias" or "/rnn/lstm_cell/bias"\n :rtype: ()->numpy.ndarray\n '
assert new_name.endswith(postfix)
old_name = (new_name[:(- len(postfix))] + '/b')
def load_native_lstm_bias():
'\n :rtype: numpy.ndarray\n '
b = reader.get_tensor(old_name)
assert (b.ndim == 1)
(b_j, b_i, b_f, b_o) = numpy.split(b, 4, axis=0)
b = numpy.concatenate([b_i, b_j, b_f, b_o], axis=0)
return b
return load_native_lstm_bias
class MakeLoadBasicToNativeLstm():
'\n BasicLSTM -> NativeLSTM converter.\n '
def __init__(self, basic_kernel, basic_bias):
'\n :param str basic_kernel:\n :param str basic_bias:\n '
self.basic_kernel = basic_kernel
self.basic_bias = basic_bias
self._w_ff = None
self._w_re = None
self._bias = None
def _calc(self):
if (self._w_ff is not None):
return
old_w_ff_re = reader.get_tensor(self.basic_kernel)
assert (old_w_ff_re.ndim == 2)
old_bias = reader.get_tensor(self.basic_bias)
assert ((old_bias.ndim == 1) and (old_bias.shape[0] == old_w_ff_re.shape[1]) and ((old_bias.shape[0] % 4) == 0))
n_out = (old_bias.shape[0] // 4)
assert (old_w_ff_re.shape[0] > n_out)
n_in = (old_w_ff_re.shape[0] - n_out)
(old_w_ff_re_i, old_w_ff_re_j, old_w_ff_re_f, old_w_ff_re_o) = numpy.split(old_w_ff_re, 4, axis=1)
(old_bias_i, old_bias_j, old_bias_f, old_bias_o) = numpy.split(old_bias, 4, axis=0)
new_w_ff_re = numpy.concatenate([old_w_ff_re_j, old_w_ff_re_i, old_w_ff_re_f, old_w_ff_re_o], axis=1)
(new_w_ff, new_w_re) = numpy.split(new_w_ff_re, [n_in], axis=0)
new_bias = numpy.concatenate([old_bias_j, old_bias_i, old_bias_f, old_bias_o], axis=0)
self._w_ff = new_w_ff
self._w_re = new_w_re
self._bias = new_bias
def get_w_re(self):
'\n :rtype: numpy.ndarray\n '
self._calc()
return self._w_re
def get_w(self):
'\n :rtype: numpy.ndarray\n '
self._calc()
return self._w_ff
def get_b(self):
'\n :rtype: numpy.ndarray\n '
self._calc()
return self._bias
class MakeLoadCudnnRnn():
'\n Helper to load the CuDNN params.\n '
cudnn_postfix = '/cudnn/CudnnRNNParamsToCanonical:0'
def __init__(self, prefix, target='lstm_block_wrapper/'):
self.target = target
self.keys = [(target + 'bias'), (target + 'kernel')]
self.prefix = prefix
self.data = None
def _load(sself):
from returnn.tf.layers.rec import RecLayer
sself.data = RecLayer.convert_cudnn_canonical_to_lstm_block(reader=reader, prefix=sself.prefix, target=sself.target)
def make_getter(self, key):
'\n :param str key:\n :rtype: ()->numpy.ndarray\n '
def get():
'\n :rtype: numpy.ndarray\n '
if (self.data is None):
self._load()
return self.data[key]
return get
def get_lazy_dict(self):
'\n :rtype: dict[str,()->numpy.ndarray]\n '
return {(self.prefix + k): self.make_getter((self.prefix + k)) for k in self.keys}
for v in missing_var_names:
for postfix in ['/rnn/lstm_cell/kernel', '/lstm_cell/kernel', '/rnn/basic_lstm_cell/kernel', '/basic_lstm_cell/kernel']:
if v.endswith(postfix):
old_name1 = (v[:(- len(postfix))] + '/W_re')
old_name2 = (v[:(- len(postfix))] + '/W')
if ((old_name1 in obsolete_var_names) and (old_name2 in obsolete_var_names)):
var_name_map[v] = make_load_weights_nativelstm_to_basic(v, postfix=postfix)
break
for postfix in ['/rnn/lstm_cell/bias', '/lstm_cell/bias', '/rnn/basic_lstm_cell/bias', '/basic_lstm_cell/bias']:
if v.endswith(postfix):
old_name = (v[:(- len(postfix))] + '/b')
if (old_name in obsolete_var_names):
var_name_map[v] = make_load_bias_nativelstm_to_basic(v, postfix=postfix)
if v.endswith('/rec/W_re'):
prefix = v[:(- len('/rec/W_re'))]
cur_name_w = ('%s/rec/W' % prefix)
cur_name_b = ('%s/rec/b' % prefix)
old_name_kernel = ('%s/rec/rnn/lstm_cell/kernel' % prefix)
old_name_bias = ('%s/rec/rnn/lstm_cell/bias' % prefix)
old_name2_kernel = ('%s/rec/lstm_cell/kernel' % prefix)
old_name2_bias = ('%s/rec/lstm_cell/bias' % prefix)
if ((old_name_kernel in obsolete_var_names) and (old_name_bias in obsolete_var_names) and (cur_name_w in missing_var_names) and (cur_name_b in missing_var_names)):
loader = MakeLoadBasicToNativeLstm(basic_kernel=old_name_kernel, basic_bias=old_name_bias)
var_name_map[v] = loader.get_w_re
var_name_map[cur_name_w] = loader.get_w
var_name_map[cur_name_b] = loader.get_b
elif ((old_name2_kernel in obsolete_var_names) and (old_name2_bias in obsolete_var_names) and (cur_name_w in missing_var_names) and (cur_name_b in missing_var_names)):
loader = MakeLoadBasicToNativeLstm(basic_kernel=old_name2_kernel, basic_bias=old_name2_bias)
var_name_map[v] = loader.get_w_re
var_name_map[cur_name_w] = loader.get_w
var_name_map[cur_name_b] = loader.get_b
m = re.match('^(.*)/batch_norm/(beta|gamma|mean|variance)$', v)
if m:
prefix = m.group(1)
name = m.group(2)
matching_obsolete_var_names = [old_name for old_name in obsolete_var_names if re.match(('^%s/batch_norm/.*_%s$' % (re.escape(prefix), name)), old_name)]
if (len(matching_obsolete_var_names) == 1):
var_name_map[v] = make_load_renamed(old_name=matching_obsolete_var_names[0])
m = re.match('^(.*)/batch_norm/v2_(beta|gamma|mean|variance)$', v)
if m:
prefix = m.group(1)
name = m.group(2)
matching_obsolete_var_names = [old_name for old_name in obsolete_var_names if (re.match(('^%s/batch_norm/.*_%s$' % (re.escape(prefix), name)), old_name) or re.match(('^%s/batch_norm/%s$' % (re.escape(prefix), name)), old_name))]
if (len(matching_obsolete_var_names) == 1):
var_name_map[v] = make_load_renamed_flatten(old_name=matching_obsolete_var_names[0])
for v in obsolete_var_names:
for (k_old, k_new) in map_list.items():
if v.endswith(('/%s' % k_old)):
v2 = (v[:(- len(k_old))] + k_new)
if (v2 in missing_var_names):
var_name_map[v2] = make_load_renamed(old_name=v)
break
if v.endswith(MakeLoadCudnnRnn.cudnn_postfix):
var_name_map.update(MakeLoadCudnnRnn(prefix=v[:((- len(MakeLoadCudnnRnn.cudnn_postfix)) + 1)]).get_lazy_dict())
var_name_map.update({name: make_load_renamed(old_name) for (name, old_name) in self.var_name_mapping.items()})
if self.custom_missing_load_func:
for var_name in missing_var_names:
if (var_name in var_name_map):
continue
var = self.var_net_names[var_name]
var_shape = tuple(var.get_shape().as_list())
assert all((isinstance(d, int) for d in var_shape)), f'var {var_name} {var} unknown?'
res = self.custom_missing_load_func(name=var_name, shape=var_shape, reader=self.reader)
if (res is not None):
assert isinstance(res, numpy.ndarray)
assert (res.shape == var_shape)
print(f'custom_missing_load_func loaded {var_name} with shape {var_shape}.', file=log.v4)
var_name_map[var_name] = (lambda res_: (lambda : res_))(res)
could_not_find_map_list = [v for v in missing_var_names if (v not in var_name_map)]
if (self.ignore_missing or (not could_not_find_map_list)):
print('We found these corresponding variables in the checkpoint:', var_name_map, file=log.v2)
print('Custom param importers:', self.custom_param_importers, file=log.v2)
print('Loading now...', file=log.v3)
for v in self.saveable_params:
v_name = self._get_param_name(v)
custom_importer = self._find_custom_param_importer(v_name)
if custom_importer:
variable_values[v] = self.VariableValue(custom_param_importer=custom_importer)
elif (v_name in var_ckpt_names):
variable_values[v] = self.VariableValue(value=reader.get_tensor(v_name))
else:
if (self.ignore_missing and (v_name not in var_name_map)):
print(('Warning, did not find match for var %r (%r, params_prefix %r, load_if_prefix %r) in checkpoint %r.' % (v, v_name, self.params_prefix, self.load_if_prefix, self.filepattern)), file=log.v3)
continue
variable_values[v] = self.VariableValue(value=var_name_map[v_name]())
assert variable_values, ('no vars to load; saveable vars are %r. load_if_prefix %r.' % (self.saveable_params, self.load_if_prefix))
print('Successfully loaded all variables. Any new save will use the updated variable names.', file=log.v3)
return variable_values
else:
print('Could not find mappings for these variables:', could_not_find_map_list, 'var_name_map:', var_name_map, file=log.v3)
print('All variables in checkpoint:', file=log.v3)
print(reader.debug_string().decode('utf8'), file=log.v3)
print('All variables to restore:', file=log.v3)
for v in (net_vars + net_saveables):
print(v, file=log.v3)
print(file=log.v3)
print('Variables to restore which are not in checkpoint:', file=log.v3)
for v in sorted(var_net_names):
if (v in var_ckpt_names):
continue
print(v, file=log.v3)
print(file=log.v3)
print('Variables in checkpoint which are not needed for restore:', file=log.v3)
for v in sorted(var_ckpt_names):
if (v in var_net_names):
continue
print(v, file=log.v3)
print(file=log.v3)
print('Probably we can restore these:', file=log.v3)
for v in sorted(var_name_map.keys()):
print(v, file=log.v3)
if (not var_name_map):
print('(None)', file=log.v3)
print(file=log.v3)
raise tf.errors.NotFoundError(node_def=None, op=None, message=('CustomCheckpointLoader. could_not_find_map_list: %r' % (could_not_find_map_list,)))
def load_now(self, session):
'\n :param tf.compat.v1.Session session:\n :return: nothing, will assign the variables in the session\n '
for (var, value) in self.get_variable_value_map().items():
value.assign_var(var=var, session=session)
def set_as_custom_init(self):
'\n Make sure that this loader is used during initialization.\n '
var_value_map = self.get_variable_value_map()
read_vars = set()
def make_var_post_init(var):
'\n :param tf.Variable var:\n :return: function\n :rtype: (tf.compat.v1.Session)->None\n '
def var_post_init(session):
'\n :param tf.compat.v1.Session session:\n '
assert (var not in read_vars), 'Cannot initialize this twice. On purpose, to free memory.'
read_vars.add(var)
value = var_value_map.pop(var)
value.assign_var(var=var, session=session)
return var_post_init
for var in self.saveable_params:
if (self.ignore_missing and (var not in var_value_map)):
continue
if self.load_if_prefix:
print(('%s registered for pre-loading via prefix %r.' % (var.name, self.load_if_prefix)), file=log.v2)
set_custom_post_init(var=var, func=make_var_post_init(var))
|
class CustomLoadParamFunc(Protocol):
'\n This is a custom param importer function.\n '
def __call__(self, *, name: str, shape: Tuple[int], reader: tf.compat.v1.train.NewCheckpointReader) -> Optional[numpy.ndarray]:
...
|
def set_custom_post_init(var, func):
'\n It registers the provided `func` such that it gets called for this variable\n in :func:`TFNetwork.initialize_params`.\n\n :param tf.Variable var:\n :param (tf.compat.v1.Session)->None func:\n '
assert callable(func)
var.custom_post_init = func
|
def have_custom_post_init(var):
'\n :param tf.Variable var:\n :return: whether :func:`set_custom_post_init` was called on this var, i.e. we have custom init\n :rtype: bool\n '
custom_post_init = getattr(var, 'custom_post_init', None)
if custom_post_init:
assert callable(custom_post_init)
return True
return False
|
def py_get_sprint_automata_for_batch(sprint_opts, tags):
'\n :param dict[str] sprint_opts:\n :param list[str] tags:\n :return: (edges, weights, start_end_states)\n :rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray)\n '
sprint_instance_pool = SprintInstancePool.get_global_instance(sprint_opts=sprint_opts)
with sprint_instance_pool.lock:
(edges, weights, start_end_states) = sprint_instance_pool.get_automata_for_batch(tags)
edges = edges.astype('int32')
start_end_states = start_end_states.astype('int32')
return (edges, weights, start_end_states)
|
def get_sprint_automata_for_batch_op(sprint_opts, tags):
'\n :param dict[str] sprint_opts:\n :param tf.Tensor tags: shape (batch,), of dtype string\n :return: (edges, weights, start_end_states). all together in one automaton.\n edges are of shape (4, num_edges), each (from, to, emission-idx, seq-idx), of dtype int32.\n weights are of shape (num_edges,), of dtype float32.\n start_end_states are of shape (2, batch), each (start,stop) state idx, batch = len(tags), of dtype int32.\n :rtype: (tf.Tensor, tf.Tensor, tf.Tensor)\n '
def py_wrap_get_sprint_automata_for_batch(py_tags):
'\n :param list[str] py_tags: len batch\n :return: (edges, weights, start_end_states)\n :rtype: (numpy.ndarray, numpy.ndarray, numpy.ndarray)\n '
try:
return py_get_sprint_automata_for_batch(sprint_opts=sprint_opts, tags=py_tags)
except Exception:
print('Exception in py_wrap_get_sprint_automata_for_batch:')
import sys
sys.excepthook(*sys.exc_info())
raise
tags.set_shape((None,))
(edges, weights, start_end_states) = tf_compat.v1.py_func(py_wrap_get_sprint_automata_for_batch, [tags], [tf.int32, tf.float32, tf.int32], name='get_sprint_automata_for_batch')
assert isinstance(edges, tf.Tensor)
assert isinstance(weights, tf.Tensor)
assert isinstance(start_end_states, tf.Tensor)
edges.set_shape((4, None))
weights.set_shape((None,))
start_end_states.set_shape((2, tags.get_shape().dims[0]))
return (edges, weights, start_end_states)
|
def py_get_sprint_loss_and_error_signal(sprint_opts, log_posteriors, seq_lengths, seq_tags):
'\n :param dict[str] sprint_opts:\n :param numpy.ndarray log_posteriors: 3d (time,batch,label)\n :param numpy.ndarray seq_lengths: 1d (batch)\n :param list[str] seq_tags: seq names\n :return: (loss, error_signal), error_signal has the same shape as posteriors. loss is a 1d-array (batch).\n :rtype: (numpy.ndarray, numpy.ndarray)\n '
sprint_instance_pool = SprintInstancePool.get_global_instance(sprint_opts=sprint_opts)
with sprint_instance_pool.lock:
(loss, error_signal) = sprint_instance_pool.get_batch_loss_and_error_signal(log_posteriors=log_posteriors, seq_lengths=seq_lengths, tags=seq_tags)
return (loss, error_signal)
|
def get_sprint_loss_and_error_signal(sprint_opts, log_posteriors, seq_lengths, seq_tags):
'\n :param dict[str] sprint_opts:\n :param tf.Tensor log_posteriors: 3d (time,batch,label)\n :param tf.Tensor seq_lengths: 1d (batch,)\n :param tf.Tensor seq_tags: 1d (batch,), seq names\n :return: (loss, error_signal), error_signal has the same shape as posteriors. loss is a 1d-array (batch).\n :rtype: (tf.Tensor, tf.Tensor)\n '
def py_wrap_get_sprint_loss_and_error_signal(py_log_posteriors, py_seq_lengths, py_seq_tags):
'\n :param numpy.ndarray py_log_posteriors: 3d (time,batch,label)\n :param numpy.ndarray py_seq_lengths: 1d (batch)\n :param list[str] py_seq_tags:\n :return: (loss, error_signal), error_signal has the same shape as posteriors. loss is a 1d-array (batch).\n :rtype: (numpy.ndarray, numpy.ndarray)\n '
try:
return py_get_sprint_loss_and_error_signal(sprint_opts=sprint_opts, log_posteriors=py_log_posteriors, seq_lengths=py_seq_lengths, seq_tags=py_seq_tags)
except Exception:
print('Exception in py_wrap_get_sprint_loss_and_error_signal:')
import sys
sys.excepthook(*sys.exc_info())
raise
log_posteriors.set_shape((None, None, None))
seq_lengths.set_shape((None,))
seq_tags.set_shape((None,))
(loss, error_signal) = tf_compat.v1.py_func(py_wrap_get_sprint_loss_and_error_signal, [log_posteriors, seq_lengths, seq_tags], [tf.float32, tf.float32], name='get_sprint_loss_and_error_signal')
assert isinstance(loss, tf.Tensor)
assert isinstance(error_signal, tf.Tensor)
loss.set_shape((None,))
error_signal.set_shape(log_posteriors.get_shape())
return (loss, error_signal)
|
def _init_optimizer_classes_dict():
global _OptimizerClassesDictInitialized
if _OptimizerClassesDictInitialized:
return
_OptimizerClassesDictInitialized = True
potential_list = list(globals().items())
potential_list += list(vars(tf_compat.v1.train).items())
if (tf_version_tuple() >= (1, 2, 0)):
try:
from tensorflow.contrib import opt
potential_list += list(vars(opt).items())
except ImportError:
pass
allowed_types = (Optimizer,)
if KerasOptimizer:
potential_list += list(vars(tf_compat.v2.keras.optimizers).items())
allowed_types += (KerasOptimizer,)
potential_list += [('NadamKeras', tf_compat.v2.keras.optimizers.Nadam)]
for (name, v) in potential_list:
assert isinstance(name, str)
if (name.lower() in _OptimizerClassesDict):
continue
if (v is Optimizer):
continue
if (KerasOptimizer and (v is KerasOptimizer)):
continue
if (not isinstance(v, type)):
continue
if (not issubclass(v, allowed_types)):
continue
if (v is _KerasOptimizerWrapper):
continue
register_optimizer_class(v, name=name)
|
def _check_valid_optimizer(optimizer_class):
'\n :param type optimizer_class:\n '
if KerasOptimizer:
assert issubclass(optimizer_class, (Optimizer, KerasOptimizer))
else:
assert issubclass(optimizer_class, Optimizer)
|
def register_optimizer_class(cls, name=None):
'\n :param type[Optimizer|KerasOptimizer] cls:\n :param str|None name:\n '
_init_optimizer_classes_dict()
if (not name):
name = cls.__name__
_check_valid_optimizer(cls)
assert (name.lower() not in _OptimizerClassesDict)
_OptimizerClassesDict[name.lower()] = cls
if name.endswith('Optimizer'):
name = name[:(- len('Optimizer'))]
assert (name.lower() not in _OptimizerClassesDict)
_OptimizerClassesDict[name.lower()] = cls
|
def get_optimizer_class(class_name):
'\n :param str|function|type[Optimizer|KerasOptimizer] class_name: e.g. "adam"\n :return: the class\n :rtype: type[Optimizer|KerasOptimizer]\n '
_init_optimizer_classes_dict()
if isinstance(class_name, type):
_check_valid_optimizer(class_name)
return class_name
if callable(class_name):
class_name = class_name()
assert isinstance(class_name, str)
return _OptimizerClassesDict[class_name.lower()]
|
class Updater(object):
'\n This will create the :class:`tf.compat.v1.train.Optimizer` instance given the config\n and the update-op for all trainable vars.\n See the code of :func:`Updater.create_optimizer` for valid config options.\n\n Wraps one or multiple tf.compat.v1.train.Optimizer, and extends it by some further functionality.\n\n Note: `Vincent Vanhoucke says <https://github.com/tensorflow/tensorflow/issues/323#issuecomment-159116515>`_,\n in case you get nans, consider increasing the epsilon (for Adam, Nadam and similar).\n This is the config option ``optimizer_epsilon``.\n In some places in our Theano code, 1e-16 is our default epsilon, in some other parts, 1e-8 is.\n 1e-8 might be more stable. Or even 1e-6.\n Note that when the gradient is suddenly zero in one step, the update can be proportional to lr / eps.\n\n From the :class:`tf.compat.v1.train.AdamOptimizer` documentation:\n\n The default value of 1e-8 for epsilon might not be a good default in\n general. For example, when training an Inception network on ImageNet a\n current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the\n formulation just before Section 2.1 of the Kingma and Ba paper rather than\n the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon\n hat" in the paper.\n\n More from Vincent Vanhoucke:\n\n One thing you can do is run with a tiny learning rate, or even zero learning rate.\n If you still have divergence then, you have a bug in your setup.\n If not, increase your rate slowly and see if there is a regime in which things train without diverging.\n It\'s completely possible to have weights that are in a good range,\n but activations or gradients going to infinity because of the shape of the loss, or too high a learning rate.\n It\'s obviously always a possibility that there is a bug in the optimizers, but in my experience,\n every single instance of this kind of problem could be traced back to a weirdly wired model,\n learning rate issues, bad randomization of the input examples,\n or - in the case of Adam or RMSProp - issues with the epsilon value.\n\n In addition, you might also want to try ``gradient_nan_inf_filter`` or maybe set beta1=0.5.\n\n For further debugging, see :func:`tf.add_check_numerics_ops` or :func:`add_check_numerics_ops_and_debug_print`,\n which is config option ``debug_add_check_numerics_ops``.\n Also relevant are config options ``debug_add_check_numerics_on_output`` and ``debug_grad_summaries``.\n '
def __init__(self, config, network, initial_learning_rate=1.0):
'\n :param returnn.config.Config config:\n :param TFNetwork network:\n :param float initial_learning_rate:\n '
self.config = config
self.learning_rate_var = tf.Variable(name='learning_rate', initial_value=0.0, trainable=False, dtype='float32')
self.initial_learning_rate = initial_learning_rate
self.learning_rate = None
self.trainable_vars = []
self.network = network
self.global_train_step = self.network.global_train_step
self.use_locking = self.config.bool('optimizer_use_locking', False)
self.loss = network.get_objective()
self.decouple_constraints = self.config.bool('decouple_constraints', False)
self.optimizers = OrderedDict()
self.optim_op = None
self.optim_meta_losses_dict = None
self.optimizer_vars = []
self.optimizer_init_vars_op = None
if self.config.is_true('deterministic_train'):
non_det_ops = tf_util.get_non_deterministic_ops_from_graph()
if non_det_ops:
print('WARNING: The graph uses these non deterministic ops: {}'.format(non_det_ops), file=log.v1)
def reset_optim_op(self):
'\n Call this if sth is changed which the optim_op depends on.\n See self.create_optim_op().\n '
self.optim_op = None
def set_trainable_vars(self, trainable_vars):
'\n :param list[tf.Variable] trainable_vars:\n '
if (trainable_vars == self.trainable_vars):
return
self.trainable_vars = trainable_vars
self.reset_optim_op()
def set_learning_rate(self, value, session):
'\n :param float value:\n :param tf.compat.v1.Session session:\n '
from returnn.tf.util.basic import VariableAssigner
VariableAssigner(self.learning_rate_var).assign(value, session=session)
def get_current_step_learning_rate(self):
'\n :rtype: tf.Tensor\n '
lr = self.learning_rate_var
if callable(self.config.typed_dict.get('dynamic_learning_rate')):
import inspect
learning_rate_function = self.config.typed_dict.get('dynamic_learning_rate')
signature = inspect.signature(learning_rate_function)
assert any([(arg.kind == inspect.Parameter.VAR_KEYWORD) for arg in signature.parameters.values()]), 'please specify **kwargs in dynamic_learning_rate for future compatibility'
if ('epoch' in signature.parameters):
raise NotImplementedError('TF updater: dynamic_learning_rate with epoch not supported currently')
lr = learning_rate_function(network=self.network, global_train_step=self.global_train_step, learning_rate=lr)
elif self.config.typed_dict.get('dynamic_learning_rate'):
with tf.name_scope('dynamic_learning_rate'):
from returnn.util.basic import CollectionReadCheckCovered
opts = CollectionReadCheckCovered(self.config.typed_dict['dynamic_learning_rate'])
interval_steps = tf.constant(opts['interval'], name='interval', dtype=self.global_train_step.dtype)
step_in_interval = tf_compat.v1.mod(self.global_train_step, interval_steps, name='step_in_interval')
factor = tf.pow(tf.constant(opts['decay'], name='decay', dtype=tf.float32), tf.cast(step_in_interval, dtype=tf.float32, name='step_in_interval_float'), name='factor')
lr = (lr * factor)
opts.assert_all_read()
if (self.config.is_true('use_horovod') and self.config.is_true('horovod_scale_lr')):
import horovod.tensorflow as hvd
lr = (lr * hvd.size())
return lr
def create_optim_op(self):
'\n Creates the optimize TF op.\n\n :return: nothing, will just set self.optim_op\n '
assert isinstance(self.loss, tf.Tensor), 'no loss defined?'
assert self.trainable_vars, 'no variables to update/optimize'
from returnn.tf.util.basic import MetaLosses
from returnn.tf.util.gradient_checkpoint import prepare_gradient_checkpointing
prepare_gradient_checkpointing()
all_prev_existing_vars = tf_compat.v1.global_variables()
trainable_vars_for_gradients = list(self.trainable_vars)
trainable_vars_custom_update = []
for v in self.trainable_vars:
if hasattr(v, 'returnn_custom_update'):
trainable_vars_custom_update.append(v)
trainable_vars_for_gradients.remove(v)
self.learning_rate = self.get_current_step_learning_rate()
self.optimizers.clear()
self.create_all_needed_optimizers(trainable_vars_for_gradients)
with tf_compat.v1.variable_scope('optimize'):
meta_losses_scope = MetaLosses.enter_gradient_scope()
apply_grads = self.get_apply_grads_op(self.loss, trainable_vars_for_gradients)
meta_losses_scope.exit()
self.optim_meta_losses_dict = meta_losses_scope.losses_as_fetch_dict()
if meta_losses_scope.losses:
with tf.name_scope('meta_loss'):
meta_loss = meta_losses_scope.summed_loss_for_optimization()
meta_apply_grads = self.get_apply_grads_op(meta_loss, trainable_vars_for_gradients)
apply_grads = tf.group(apply_grads, meta_apply_grads)
self.optim_op = apply_grads
if trainable_vars_custom_update:
with tf_compat.v1.variable_scope('custom_update'):
updates = [self.optim_op]
for param in trainable_vars_custom_update:
custom_update = getattr(param, 'returnn_custom_update')
assert isinstance(custom_update, CustomUpdate)
updates.append(custom_update.update_var(param))
self.optim_op = tf.group(*updates)
if self.config.opt_typed_value('extra_updates'):
extra_updates = self.config.typed_dict['extra_updates']
assert isinstance(extra_updates, dict)
vars_by_name = {v.name[:(- 2)]: v for v in all_prev_existing_vars}
extra_updates_op_list = []
from returnn.util.basic import getargspec
from returnn.tf.util.basic import get_var_update_ops, get_variable_grad_from_update_ops
for (var_name, func) in extra_updates.items():
func_arg_names = getargspec(func).args
assert (var_name in vars_by_name), ('var with name %r not found. vars:\n%s' % (var_name, '\n'.join(sorted(vars_by_name.keys()))))
var = vars_by_name[var_name]
assert isinstance(var, tf.Variable)
ops = get_var_update_ops(var, fetches=self.optim_op)
with tf.control_dependencies(ops):
func_kwargs = {'var': var}
if ('network' in func_arg_names):
func_kwargs['network'] = self.network
if ('update_ops' in func_arg_names):
func_kwargs['update_ops'] = ops
if ('grad' in func_arg_names):
func_kwargs['grad'] = get_variable_grad_from_update_ops(var, ops)
op = func(**func_kwargs)
assert isinstance(op, (tf.Operation, tf.Tensor))
extra_updates_op_list.append(op)
self.optim_op = tf.group(self.optim_op, *extra_updates_op_list)
slot_names_per_optimizer = self.get_slot_names_per_optimizer()
slot_vars = []
for (opt_key, slot_names) in slot_names_per_optimizer.items():
print(('Initialize optimizer (%s) with slots %s.' % ((opt_key or 'default'), slot_names)), file=log.v3)
for slot_name in slot_names:
for v in self.filter_var_list_per_optimizer_key(trainable_vars_for_gradients, opt_key=opt_key):
slot_var = self.get_slot(var=v, name=slot_name)
if (slot_var is None):
print(('Warning: No slot_var found for variable %r, slot_name %r. Maybe no gradient for this var?' % (v, slot_name)), file=log.v3)
else:
assert isinstance(slot_var, tf.Variable)
slot_vars.append(slot_var)
self.optimizer_vars = slot_vars
other_new_vars = []
for v in tf_compat.v1.global_variables():
if (v in all_prev_existing_vars):
continue
if (v in self.optimizer_vars):
continue
other_new_vars.append(v)
if other_new_vars:
print(('These additional variable were created by the optimizer: %s.' % other_new_vars), file=log.v3)
self.optimizer_vars += other_new_vars
with tf.name_scope('optimizer_init_vars'):
self.optimizer_init_vars_op = tf_compat.v1.variables_initializer(self.optimizer_vars, name='init_optim_slot_vars')
if self.config.bool_or_other('debug_grad_summaries', False):
from returnn.tf.util.basic import variable_summaries, get_base_name, reuse_name_scope_of_tensor
for key in self.network.used_data_keys:
data = self.network.extern_data.data[key]
if data.sparse:
continue
with reuse_name_scope_of_tensor(data.placeholder):
variable_summaries(data.placeholder)
if self.config.bool('debug_add_check_numerics_ops', False):
print('Adding checks for inf/nan.', file=log.v3)
self.optim_op = tf.group(self.optim_op, add_check_numerics_ops([self.optim_op]))
with tf.control_dependencies([self.optim_op, self.network.global_train_step]):
incr_step_op = tf_compat.v1.assign_add(self.network.global_train_step_var, 1, name='global_train_step_increment')
self.optim_op = tf.group(self.optim_op, incr_step_op, name='optim_and_step_incr')
if self.config.bool('debug_save_updater_vars', False):
print('Save updater/optimizer vars:', file=log.v3)
print(self.optimizer_vars)
for v in self.optimizer_vars:
if (v not in self.network.extra_vars_to_save):
self.network.extra_vars_to_save.append(v)
self.network.reset_saver()
def get_optim_op(self, callback_on_new=None):
'\n :param None|()->None callback_on_new:\n :rtype: tf.Operation\n '
if (self.optim_op is None):
self.create_optim_op()
if callback_on_new:
callback_on_new()
return self.optim_op
def init_optimizer_vars(self, session):
'\n :param tf.compat.v1.Session session:\n '
self.get_optim_op()
session.run(self.optimizer_init_vars_op)
def get_default_optimizer(self):
'\n :rtype: tf.compat.v1.train.Optimizer\n '
return self.get_default_optimizer_item(auto_create_new=False)[1]
def get_default_optimizer_item(self, auto_create_new):
'\n :param bool auto_create_new:\n :return: key, optimizer\n :rtype: (object, tf.compat.v1.train.Optimizer)\n '
return self._get_optimizer_item_for_opts(None, auto_create_new=auto_create_new)
def create_all_needed_optimizers(self, train_vars):
'\n :param list[tf.Variable] train_vars:\n '
for var in train_vars:
self._get_optimizer_item_for_variable(var, auto_create_new=True)
def _get_optimizer_item_for_variable(self, var, auto_create_new=False):
'\n :param tf.Variable var:\n :param bool auto_create_new:\n :return: key, optimizer\n :rtype: (object, tf.compat.v1.train.Optimizer)\n '
updater_opts = getattr(var, 'RETURNN_updater_opts', None)
if (not updater_opts):
return self.get_default_optimizer_item(auto_create_new=auto_create_new)
from returnn.util.basic import CollectionReadCheckCovered
assert isinstance(updater_opts, CollectionReadCheckCovered)
optimizer_opts = updater_opts.get('optimizer', None)
if (not optimizer_opts):
return self.get_default_optimizer_item(auto_create_new=auto_create_new)
assert isinstance(optimizer_opts, dict)
return self._get_optimizer_item_for_opts(optimizer_opts, auto_create_new=auto_create_new)
def _get_optimizer_item_for_opts(self, optimizer_opts, auto_create_new):
'\n :param dict[str]|str|None optimizer_opts:\n :param bool auto_create_new:\n :return: key, optimizer\n :rtype: (object, tf.compat.v1.train.Optimizer)\n '
from returnn.util.basic import make_hashable
key = make_hashable(optimizer_opts)
if (key in self.optimizers):
return (key, self.optimizers[key])
assert auto_create_new, ('no optimizer found for opts %r' % (optimizer_opts,))
optimizer = self._create_optimizer(optimizer_opts)
self.optimizers[key] = optimizer
return (key, optimizer)
def _create_optimizer(self, optimizer_opts):
'\n :param dict[str]|str|None optimizer_opts: if dict, contains "class": opt_name. if str, then opt_name.\n :rtype: tf.compat.v1.train.Optimizer\n '
if (optimizer_opts is None):
return self._create_default_optimizer()
lr = self.learning_rate
epsilon = self.config.float('optimizer_epsilon', 1e-16)
use_locking = self.use_locking
momentum = self.config.float('momentum', 0.0)
if isinstance(optimizer_opts, str):
optimizer_opts = {'class': optimizer_opts}
assert isinstance(optimizer_opts, dict)
optimizer_opts = optimizer_opts.copy()
if ('class' in optimizer_opts):
optim_class_name = optimizer_opts.pop('class')
optim_class = get_optimizer_class(optim_class_name)
else:
(_, default_opt) = self._get_optimizer_item_for_opts(None, auto_create_new=True)
optim_class = default_opt.__class__
from returnn.util.basic import collect_class_init_kwargs
optim_class_kwargs = collect_class_init_kwargs(optim_class)
if ('epsilon' in optim_class_kwargs):
optimizer_opts.setdefault('epsilon', epsilon)
if (('momentum' in optim_class_kwargs) and momentum):
optimizer_opts.setdefault('momentum', momentum)
if (('use_locking' in optim_class_kwargs) and use_locking):
optimizer_opts.setdefault('use_locking', use_locking)
assert ('learning_rate' not in optimizer_opts), 'learning_rate will be set implicitly'
if ('learning_rate_multiplier' in optimizer_opts):
lr = (lr * optimizer_opts.pop('learning_rate_multiplier'))
optimizer_opts['learning_rate'] = lr
print(('Create optimizer %s with options %r.' % (optim_class, optimizer_opts)), file=log.v2)
if (KerasOptimizer and issubclass(optim_class, KerasOptimizer)):
optim_class = _KerasOptimizerWrapper.get_factory(optim_class)
optimizer = optim_class(**optimizer_opts)
assert isinstance(optimizer, Optimizer)
return optimizer
def _create_default_optimizer(self):
'\n :rtype: tf.compat.v1.train.Optimizer\n '
lr = self.learning_rate
epsilon = self.config.float('optimizer_epsilon', 1e-16)
use_locking = self.use_locking
momentum = self.config.float('momentum', 0.0)
optim_config = self.config.typed_value('optimizer')
behavior_valid_optimizer = False
if optim_config:
assert isinstance(optim_config, (dict, str))
assert ('class' in optim_config)
optimizer = self._create_optimizer(optim_config)
behavior_valid_optimizer = True
elif self.config.bool('adam', False):
assert (not momentum)
print('Create Adam optimizer.', file=log.v2)
optimizer = tf_compat.v1.train.AdamOptimizer(learning_rate=lr, epsilon=epsilon, use_locking=use_locking)
elif self.config.bool('nadam', False):
assert_min_tf_version((1, 2, 0), 'NadamOptimizer introduced in TF 1.2.0')
assert (not momentum)
print('Create NAdam optimizer.', file=log.v2)
try:
from tensorflow.contrib.opt import NadamOptimizer
optimizer = NadamOptimizer(learning_rate=lr, epsilon=epsilon, use_locking=use_locking)
except ImportError:
optimizer = tf.keras.optimizers.Nadam(learning_rate=lr, epsilon=epsilon)
optimizer = _KerasOptimizerWrapper(optimizer)
elif self.config.bool('adadelta', False):
assert (not momentum)
print('Create Adadelta optimizer.', file=log.v2)
optimizer = tf_compat.v1.train.AdadeltaOptimizer(learning_rate=lr, epsilon=epsilon, use_locking=use_locking)
elif self.config.bool('adagrad', False):
assert (not momentum)
print('Create Adagrad optimizer.', file=log.v2)
optimizer = tf_compat.v1.train.AdagradOptimizer(learning_rate=lr, use_locking=use_locking)
elif self.config.is_of_type('rmsprop', float):
print(('Create RMSProp optimizer. With Decay %f' % self.config.float('rmsprop', 0.9)), file=log.v2)
optimizer = tf_compat.v1.train.RMSPropOptimizer(decay=self.config.float('rmsprop', 0.9), learning_rate=lr, momentum=momentum, epsilon=epsilon, use_locking=use_locking)
elif self.config.bool('rmsprop', False):
print('Create RMSProp optimizer.', file=log.v2)
optimizer = tf_compat.v1.train.RMSPropOptimizer(learning_rate=lr, momentum=momentum, epsilon=epsilon, use_locking=use_locking)
elif momentum:
print('Create Momentum optimizer.', file=log.v2)
optimizer = tf_compat.v1.train.MomentumOptimizer(learning_rate=lr, momentum=momentum, use_locking=use_locking)
else:
print('Create SGD optimizer.', file=log.v2)
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=lr, use_locking=use_locking)
behavior_valid_optimizer = True
BehaviorVersion.require(condition=behavior_valid_optimizer, message="Please define an optimizer specifically via the 'optimizer=...' parameter", version=2)
return optimizer
def _compute_gradients(self, loss, var_list):
'\n :param tf.Tensor loss:\n :param list[tf.Variable] var_list:\n :return: list of (gradient, variable) pairs\n :rtype: list[(tf.Tensor,tf.Variable)]\n '
if self.config.is_true('deterministic_train'):
aggregation_method = tf.AggregationMethod.ADD_N
else:
aggregation_method = tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
default_opt = self.get_default_optimizer()
return default_opt.compute_gradients(loss=loss, var_list=var_list, aggregation_method=aggregation_method)
def _apply_gradients(self, grads_and_vars, opt_key, accum_grad_multiple_num_steps=0):
'\n :param list[(tf.Tensor,tf.Variable) grads_and_vars:\n :param object opt_key:\n :param int accum_grad_multiple_num_steps:\n :rtype: tf.Operation\n '
optimizer = self.optimizers[opt_key]
assert isinstance(optimizer, Optimizer)
if (accum_grad_multiple_num_steps >= 1):
return tf.cond(tf.equal(tf_compat.v1.mod(self.global_train_step, accum_grad_multiple_num_steps), (accum_grad_multiple_num_steps - 1)), true_fn=(lambda : optimizer.apply_gradients(grads_and_vars)), false_fn=(lambda : tf.no_op()), name='apply_grads/accum_grad_multiple_step')
return optimizer.apply_gradients(grads_and_vars)
def get_slot_names_per_optimizer(self):
'\n :return: ordered dict: opt key -> slot names\n :rtype: dict[object, list[str]]\n '
from collections import OrderedDict
res = OrderedDict()
for (key, optimizer) in self.optimizers.items():
assert isinstance(optimizer, Optimizer)
res[key] = optimizer.get_slot_names()
return res
def filter_var_list_per_optimizer_key(self, var_list, opt_key):
'\n :param list[tf.Variable] var_list:\n :param object opt_key: should be in self.optimizer\n :rtype: list[tf.Variable]\n '
res = []
for var in var_list:
(key, _) = self._get_optimizer_item_for_variable(var)
if (key == opt_key):
res.append(var)
return res
def get_slot(self, var, name):
'\n :param tf.Variable var:\n :param str name:\n :rtype: tf.Variable|None\n '
(_, opt) = self._get_optimizer_item_for_variable(var)
return opt.get_slot(var, name)
class _GetGlobalInfo():
def __init__(self, updater, all_vars, var_grads):
'\n :param Updater updater:\n :param list[tf.Variable] all_vars:\n :param dict[tf.Variable,tf.Tensor] var_grads:\n '
self.updater = updater
self.all_vars = all_vars
self.var_grads = var_grads
self.all_grads = list(var_grads.values())
self.vars_by_tag = self._build_vars_by_tag_dict()
self._l2loss_cache = {}
self._global_grad_norm = None
self._global_grad_norm_per_tag = {}
self._maximize_grad_norm_var_grads = None
def _build_vars_by_tag_dict(self):
'\n :return: tag name -> set of vars\n :rtype: dict[str,set[tf.Variable]]\n '
res = {}
for var in self.all_vars:
opts = self.updater._get_updater_opts_from_var(var)
var_tags = opts.get('tags', [])
for tag in var_tags:
res.setdefault(tag, set()).add(var)
return res
def get_l2loss(self, x):
'\n :param tf.Tensor|tf.IndexedSlices x:\n :return: tf.nn.l2_loss(x) (which is l2norm(x)**2 / 2, or sum(x**2) / 2)\n :rtype: tf.Tensor\n '
if (x not in self._l2loss_cache):
with tf_compat.v1.colocate_with(x):
values = x
if isinstance(values, tf.IndexedSlices):
values = values.values
self._l2loss_cache[x] = tf.nn.l2_loss(values)
return self._l2loss_cache[x]
def _global_norm(self, grads):
'\n :param list[tf.Tensor]|set[tf.Tensor] grads:\n :rtype: tf.Tensor\n '
if (not isinstance(grads, (list, tuple))):
grads = sorted(grads, key=(lambda v: v.name))
with tf.name_scope('global_norm'):
half_squared_norms = [self.get_l2loss(grad) for grad in grads if (grad is not None)]
half_squared_norm = tf.reduce_sum(tf.stack(half_squared_norms))
norm = tf.sqrt((half_squared_norm * tf.constant(2.0, dtype=half_squared_norm.dtype)), name='global_norm')
return norm
def get_global_grad_norm(self, tag=None):
'\n :param str|None tag:\n :return: sqrt(sum(t**2 for t in all_grads))\n :rtype: tf.Tensor\n '
if tag:
return self.get_global_grad_norm_per_tag(tag=tag)
if (self._global_grad_norm is None):
self._global_grad_norm = self._global_norm(self.all_grads)
return self._global_grad_norm
def get_global_grad_norm_per_tag(self, tag):
'\n :param str tag:\n :return: sqrt(sum(t**2 for t in grads_of_vars_of_this_tag))\n :rtype: tf.Tensor\n '
if (tag not in self._global_grad_norm_per_tag):
from returnn.tf.util.basic import get_valid_scope_name_from_str
with tf.name_scope(('global_norm_for_tag_%s' % get_valid_scope_name_from_str(tag))):
norm = self._global_norm({self.var_grads[var] for var in self.vars_by_tag[tag]})
if self.updater.config.bool_or_other('debug_grad_summaries', False):
tf_compat.v1.summary.scalar(('global_norm_for_tag_%s' % get_valid_scope_name_from_str(tag)), norm)
self._global_grad_norm_per_tag[tag] = norm
return self._global_grad_norm_per_tag[tag]
def get_maximize_grad_norm_var_grads(self, factor):
'\n :param tf.Tensor|float factor:\n :return: dict: var -> grad\n :rtype: dict[tf.Variable,tf.Tensor]\n '
if (self._maximize_grad_norm_var_grads is None):
loss_ext = (self.get_global_grad_norm() * (- factor))
grads_and_vars_ext = self.updater._compute_gradients(loss_ext, var_list=self.all_vars)
self._maximize_grad_norm_var_grads = {var: grad for (grad, var) in grads_and_vars_ext if (grad is not None)}
return self._maximize_grad_norm_var_grads
def get_maximize_grad_norm_grad(self, factor, var):
'\n :param float|tf.Tensor factor:\n :param tf.Variable var:\n :rtype: tf.Tensor|None\n '
return self.get_maximize_grad_norm_var_grads(factor).get(var, None)
def clip_by_global_norm(self, grad, clip_norm, global_norm_tag=None):
'\n Wraps tf.clip_by_global_norm.\n\n :param tf.Tensor grad:\n :param tf.Tensor|float clip_norm:\n :param str|None global_norm_tag:\n :rtype: tf.Tensor\n '
norm = self.get_global_grad_norm(tag=global_norm_tag)
((grad,), _) = tf.clip_by_global_norm([grad], clip_norm=clip_norm, use_norm=norm)
return grad
def set_zero_on_high_global_norm(self, grad, grad_norm_threshold, global_norm_tag=None):
'\n :param tf.Tensor grad:\n :param float grad_norm_threshold:\n :param str|None global_norm_tag:\n :rtype: tf.Tensor\n '
norm = self.get_global_grad_norm(tag=global_norm_tag)
zero_cond = tf.logical_or(tf_compat.v1.is_nan(norm), tf_compat.v1.is_inf(norm))
zero_cond = tf.logical_or(zero_cond, tf.greater(norm, grad_norm_threshold))
return tf.where(zero_cond, tf.zeros_like(grad), grad)
@classmethod
def _get_updater_opts_from_var(cls, var):
'\n :param tf.Variable var:\n :rtype: returnn.util.basic.CollectionReadCheckCovered\n '
from returnn.util.basic import CollectionReadCheckCovered
updater_opts = getattr(var, 'RETURNN_updater_opts', None)
if (updater_opts is None):
updater_opts = CollectionReadCheckCovered({})
assert isinstance(updater_opts, CollectionReadCheckCovered)
return updater_opts
def _post_process_grad(self, grad, var, global_info):
'\n :param tf.Tensor grad:\n :param tf.Variable var:\n :param WrapOptimizer._GetGlobalInfo global_info:\n :return: new grad, apply grad opts\n :rtype: (tf.Tensor, dict[str])\n '
updater_opts = self._get_updater_opts_from_var(var)
accum_grad_multiple_num_steps = updater_opts.get('accum_grad_multiple_step', self.config.int('accum_grad_multiple_step', 0))
grad_noise = updater_opts.get('gradient_noise', self.config.float('gradient_noise', 0.0))
grad_clip = updater_opts.get('gradient_clip', self.config.float('gradient_clip', 0.0))
grad_clip_norm = updater_opts.get('gradient_clip_norm', self.config.float('gradient_clip_norm', 0.0))
grad_clip_avg_norm = updater_opts.get('gradient_clip_avg_norm', self.config.float('gradient_clip_avg_norm', 0.0))
grad_clip_global_norm = updater_opts.get('gradient_clip_global_norm', self.config.float('gradient_clip_global_norm', 0.0))
global_norm_tag = updater_opts.get('global_norm_tag', self.config.value('global_norm_tag', None))
grad_clip_global_norm_tag = updater_opts.get('gradient_clip_global_norm_tag', self.config.value('gradient_clip_global_norm_tag', global_norm_tag))
grad_norm_to_clip_to_zero = updater_opts.get('grad_norm_to_clip_to_zero', self.config.float('grad_norm_to_clip_to_zero', 0.0))
maximize_grad_norm = updater_opts.get('maximize_grad_norm', self.config.float('maximize_grad_norm', 0))
if maximize_grad_norm:
grad_ext = global_info.get_maximize_grad_norm_grad(maximize_grad_norm, var)
if (grad_ext is not None):
grad += grad_ext
if (accum_grad_multiple_num_steps is None):
accum_grad_multiple_num_steps = 0
if (accum_grad_multiple_num_steps >= 1):
grad = accum_grad_multiple_step(grad, var, train_step=self.global_train_step, num_accum_steps=accum_grad_multiple_num_steps)
if updater_opts.get('debug_grad_summaries', self.config.bool_or_other('debug_grad_summaries', False)):
from returnn.tf.util.basic import variable_summaries, get_base_name, reuse_name_scope_of_tensor
with reuse_name_scope_of_tensor(grad, prefix='grads/'):
variable_summaries(grad, name=('grad_of_%s' % get_base_name(var)))
with reuse_name_scope_of_tensor(var, prefix='vars/'):
variable_summaries(var, name=get_base_name(var))
if grad_noise:
assert (grad_noise > 0)
from returnn.tf.util.basic import add_scaled_noise_to_gradients
with tf.name_scope('grad_noise'):
((grad, var),) = add_scaled_noise_to_gradients([(grad, var)], grad_noise)
if grad_clip:
assert (grad_clip > 0)
with tf.name_scope('grad_clip'):
grad = tf.clip_by_value(grad, (- grad_clip), grad_clip)
if grad_clip_norm:
assert (grad_clip_norm > 0)
with tf.name_scope('grad_clip_norm'):
grad = tf.clip_by_norm(grad, grad_clip_norm)
if grad_clip_avg_norm:
assert (grad_clip_avg_norm > 0)
with tf.name_scope('grad_clip_avg_norm'):
grad = tf_compat.v1.clip_by_average_norm(grad, grad_clip_avg_norm)
if grad_clip_global_norm:
assert (grad_clip_global_norm > 0)
with tf.name_scope('grad_clip_global_norm'):
grad = global_info.clip_by_global_norm(grad, clip_norm=grad_clip_global_norm, global_norm_tag=grad_clip_global_norm_tag)
if updater_opts.get('gradient_nan_inf_filter', self.config.bool('gradient_nan_inf_filter', False)):
from returnn.tf.util.basic import nan_to_num
grad = nan_to_num(grad, nan_num=0.0, inf_num=0.0)
if grad_norm_to_clip_to_zero:
with tf.name_scope('grad_norm_to_clip_to_zero'):
grad = global_info.set_zero_on_high_global_norm(grad, grad_norm_threshold=grad_norm_to_clip_to_zero, global_norm_tag=global_norm_tag)
updater_opts.assert_all_read()
(opt_key, _) = self._get_optimizer_item_for_variable(var)
apply_grad_opts = {'opt_key': opt_key, 'accum_grad_multiple_num_steps': accum_grad_multiple_num_steps}
return (grad, apply_grad_opts)
def get_apply_grads_op(self, loss, var_list):
'\n :param tf.Tensor loss:\n :param list[tf.Variable] var_list:\n :return: op with all variable updates combined, using the optimizer\n :rtype: tf.Operation\n '
from returnn.util.basic import make_hashable
if (not var_list):
return tf.no_op(name='no_grad_vars_no_op')
grads_and_vars = self._compute_gradients(loss, var_list=var_list)
if self.config.is_true('use_horovod'):
import returnn.tf.horovod
if returnn.tf.horovod.get_ctx().is_reduce_type_grad():
import horovod.tensorflow as hvd
grads_and_vars = [((hvd.allreduce(grad, average=self.config.is_true('horovod_avg_grad')) if (grad is not None) else None), var) for (grad, var) in grads_and_vars]
var_grads = {var: grad for (grad, var) in grads_and_vars if (grad is not None)}
if (not var_grads):
raise Exception('no single variable to train')
global_info = self._GetGlobalInfo(updater=self, all_vars=var_list, var_grads=var_grads)
if self.config.bool_or_other('debug_grad_summaries', False):
tf_compat.v1.summary.scalar('global_grad_norm', global_info.get_global_grad_norm())
grads_per_apply_grad_opts = {}
for (grad, var) in grads_and_vars:
assert (var in var_list)
if (grad is None):
continue
(new_grad, apply_grad_opts) = self._post_process_grad(grad=grad, var=var, global_info=global_info)
grads_per_apply_grad_opts.setdefault(make_hashable(apply_grad_opts), []).append((new_grad, var))
if self.decouple_constraints:
with tf_compat.v1.variable_scope('optimize_constraints'):
with tf_compat.v1.variable_scope('factor'):
factor = (self.learning_rate / float(self.initial_learning_rate))
factor *= self.config.float('decouple_constraints_factor', 0.025)
for (apply_grad_opts, grads_and_vars_per_opts) in grads_per_apply_grad_opts.items():
for (i, (grad, var)) in enumerate(grads_and_vars_per_opts):
assert isinstance(var, tf.Variable)
l2 = getattr(var, 'RETURNN_constraint_L2', None)
if (not l2):
continue
with tf.control_dependencies([grad]):
def _get_apply_constraints_op():
return var.assign_sub((var * (l2 * 2.0)), use_locking=self.use_locking, read_value=False)
accum_grad_multiple_num_steps = apply_grad_opts.get('accum_grad_multiple_num_steps', 0)
if (accum_grad_multiple_num_steps > 1):
apply_constraint = tf.cond(tf.equal(tf_compat.v1.mod(self.global_train_step, accum_grad_multiple_num_steps), (accum_grad_multiple_num_steps - 1)), true_fn=_get_apply_constraints_op, false_fn=tf.no_op, name='apply_decoupled_constraints/accum_grad_multiple_step')
else:
apply_constraint = _get_apply_constraints_op()
with tf.control_dependencies([apply_constraint]):
grad = tf.identity(grad)
grads_and_vars_per_opts[i] = (grad, var)
all_apply_grads = []
assert grads_per_apply_grad_opts
for (apply_grad_opts, grads_and_vars_per_opts) in grads_per_apply_grad_opts.items():
all_apply_grads.append(self._apply_gradients(grads_and_vars_per_opts, **apply_grad_opts))
if (len(all_apply_grads) == 1):
return all_apply_grads[0]
return tf.group(*all_apply_grads)
|
def accum_grad_multiple_step(grad, var, train_step, num_accum_steps):
'\n :param tf.Tensor|tf.IndexedSlices grad:\n :param tf.Variable var:\n :param tf.Tensor train_step: int, scalar\n :param int num_accum_steps:\n :return: modified grad\n :rtype: tf.Tensor\n '
from returnn.tf.util.basic import reuse_name_scope_of_tensor, get_base_name
with reuse_name_scope_of_tensor(grad, postfix=('/%s_accum_grad' % get_base_name(grad))):
shape = var.get_shape().as_list()
v = tf_compat.v1.get_variable(name='var_accum_grad', shape=shape, dtype=grad.dtype, initializer=tf.zeros_initializer(), trainable=False)
return tf.cond(tf.less_equal(tf_compat.v1.mod(train_step, num_accum_steps), 0), (lambda : tf_compat.v1.assign(v, grad)), (lambda : tf_compat.v1.assign_add(v, grad)))
|
class _KerasOptimizerWrapper(Optimizer):
'\n Wraps a TF optimizer into a standard TF optimizer.\n '
@classmethod
def get_factory(cls, keras_class):
'\n :param type[T] keras_class: e.g. tf.keras.optimizers.Nadam\n :return function (kwargs)->Optimizer\n '
def creator(**kwargs):
'\n Factory.\n :rtype: T\n '
kwargs = kwargs.copy()
kwargs.pop('use_locking', None)
opt = keras_class(**kwargs)
return cls(opt, name=kwargs.get('name', None))
return creator
def __init__(self, optimizer, name=None):
'\n :param tf.keras.optimizers.Optimizer optimizer:\n :param str|None name:\n '
if (not name):
name = optimizer._name
super(_KerasOptimizerWrapper, self).__init__(name=name, use_locking=True)
self.keras_optimizer = optimizer
self._var_list = None
def _create_slots(self, var_list):
self._var_list = var_list
self.keras_optimizer._create_all_weights(var_list)
def _prepare(self):
self.keras_optimizer._prepare(self._var_list)
def _apply_dense(self, grad, var):
return self._resource_apply_dense(grad, var)
def _apply_sparse(self, grad, var):
return self._resource_apply_sparse(grad.values, var, grad.indices)
def _resource_apply_dense(self, grad, handle):
return self.keras_optimizer._resource_apply_dense(grad, handle, None)
def _resource_apply_sparse(self, grad, handle, indices):
return self.keras_optimizer._resource_apply_sparse(grad, handle, indices, None)
|
class BaseCustomOptimizer(Optimizer):
'\n Base class for our own optimizer implementations.\n This simplifies the interface to be implemented a bit from :class:`Optimizer`.\n You just have to implement :func:`_apply` here.\n See :class:`CustomGradientDescentOptimizer` or :class:`CustomAdamOptimizer` for as an example.\n '
def __init__(self, learning_rate, use_locking=False, name=None):
'Construct a new optimizer.\n\n Args:\n learning_rate: A Tensor or a floating point value. The learning\n rate to use.\n use_locking: If True use locks for update operations.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to `self.__class__.__name__`.\n '
if (name is None):
name = self.__class__.__name__
super(BaseCustomOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
def _prepare(self):
self._learning_rate_tensor = tf.convert_to_tensor(self._learning_rate, name='learning_rate')
def _apply(self, grad, var, indices=None):
'\n :param tf.Tensor grad:\n :param tf.Variable|resource_variable_ops.ResourceVariable var:\n :param tf.Tensor|None indices: if this is a sparse update, the indices of the grad values\n :return: update\n :rtype: tf.Tensor|tf.Operation\n '
raise NotImplementedError
def _apply_dense(self, grad, var):
return self._apply(grad=grad, var=var)
def _resource_apply_dense(self, grad, handle):
return self._apply_dense(grad=grad, var=handle)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return self._apply(grad=grad, var=handle, indices=indices)
def _resource_apply_sparse(self, grad, handle, indices):
return self._resource_apply_sparse_duplicate_indices(grad=grad, handle=handle, indices=indices)
def _apply_sparse_duplicate_indices(self, grad, var):
return self._apply(grad=grad.values, var=var, indices=grad.indices)
def _apply_sparse(self, grad, var):
return self._apply_sparse_duplicate_indices(grad=grad, var=var)
def _assign(self, ref, updates, indices=None):
if (indices is not None):
if isinstance(ref, tf.Variable):
return tf_compat.v1.scatter_update(ref, indices, updates, use_locking=self._use_locking)
elif isinstance(ref, resource_variable_ops.ResourceVariable):
with tf.control_dependencies([resource_variable_ops.resource_scatter_update(ref.handle, indices, updates)]):
return ref.value()
else:
raise TypeError(('did not expect type %r' % type(ref)))
else:
return tf_compat.v1.assign(ref, updates, use_locking=self._use_locking)
def _assign_add(self, ref, updates, indices=None):
if (indices is not None):
if isinstance(ref, tf.Variable):
return tf_compat.v1.scatter_add(ref, indices, updates, use_locking=self._use_locking)
elif isinstance(ref, resource_variable_ops.ResourceVariable):
with tf.control_dependencies([resource_variable_ops.resource_scatter_add(ref.handle, indices, updates)]):
return ref.value()
else:
raise TypeError(('did not expect type %r' % type(ref)))
else:
return tf_compat.v1.assign_add(ref, updates, use_locking=self._use_locking)
def _assign_sub(self, ref, updates, indices=None):
if (indices is not None):
if isinstance(ref, tf.Variable):
return tf_compat.v1.scatter_sub(ref, indices, updates, use_locking=self._use_locking)
elif isinstance(ref, resource_variable_ops.ResourceVariable):
with tf.control_dependencies([resource_variable_ops.resource_scatter_add(ref.handle, indices, (- updates))]):
return ref.value()
else:
raise TypeError(('did not expect type %r' % type(ref)))
else:
return tf_compat.v1.assign_sub(ref, updates, use_locking=self._use_locking)
def _gather(self, dense, indices=None):
'\n This is a simple helper to implement :func:`_apply`.\n\n :param tf.Tensor dense:\n :param tf.Tensor|None indices: if this is a sparse update, the indices of the grad values\n :rtype: tf.Tensor\n '
if (indices is not None):
return tf.gather(dense, indices=indices)
return dense
|
class CustomGradientDescentOptimizer(BaseCustomOptimizer):
'\n Just an example implementation for simple gradient descent.\n '
def _apply(self, grad, var, indices=None):
'\n :param tf.Tensor grad:\n :param tf.Variable|resource_variable_ops.ResourceVariable var:\n :param tf.Tensor|None indices: if this is a sparse update, the indices of the grad values\n :return: update\n :rtype: tf.Tensor|tf.Operation\n '
lr = tf.cast(self._learning_rate_tensor, grad.dtype.base_dtype)
return self._assign_sub(ref=var, updates=(lr * grad), indices=indices).op
|
class NormalizedSGD(CustomGradientDescentOptimizer):
"\n All grads are L2 normalized (via :func:`tf.nn.l2_normalize`), otherwise it's standard SGD.\n Via: https://github.com/kmkolasinski/deep-learning-notes/tree/master/max-normed-optimizer\n "
def _apply(self, grad, var, indices=None):
'\n :param tf.Tensor grad:\n :param tf.Variable|resource_variable_ops.ResourceVariable var:\n :param tf.Tensor|None indices: if this is a sparse update, the indices of the grad values\n :return: update\n :rtype: tf.Tensor|tf.Operation\n '
return super(NormalizedSGD, self)._apply(grad=tf.nn.l2_normalize(grad, None), var=var, indices=indices)
|
class NeuralOptimizer1(BaseCustomOptimizer):
'\n Via Neural Optimizer Search with Reinforcement Learning (https://proceedings.mlr.press/v70/bello17a/bello17a.pdf).\n\n Equivalent to the optimizer g * exp(sign(g) * sign(m)), we use:\n\n g * where(sign(g) == sign(m), 1.0, decrease_factor)\n\n where m is the running average of g.\n\n Calculation of m: m_t <- beta1 * m_{t-1} + (1 - beta1) * g\n Same beta1 default as in Adam and in the paper: beta1=0.9\n '
def __init__(self, beta1=0.9, decrease_factor=0.1, **kwargs):
'\n :param float beta1: used for the running average of m\n :param float decrease_factor: in the original paper, it is e^-2 ~= 0.135\n '
super(NeuralOptimizer1, self).__init__(**kwargs)
self._beta1 = beta1
self._decrease_factor = decrease_factor
def _prepare(self):
super(NeuralOptimizer1, self)._prepare()
self._beta1_t = tf.convert_to_tensor(self._beta1, name='beta1')
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, 'm', self._name)
def _apply(self, grad, var, indices=None):
lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
m_scaled_g_values = (grad * (1 - beta1_t))
m_t = tf_compat.v1.assign(m, (m * beta1_t), use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._assign_add(m, updates=m_scaled_g_values, indices=indices)
m_gathered = self._gather(m_t, indices=indices)
ones = tf.ones_like(grad)
update = ((lr * grad) * tf.where(tf.equal(tf.sign(m_gathered), tf.sign(grad)), ones, (ones * self._decrease_factor)))
var_update = self._assign_sub(ref=var, updates=update, indices=indices)
return tf.group(*[var_update, m_t])
|
class GradVarianceScaledOptimizer(BaseCustomOptimizer):
'\n Let m be the running average of g.\n Calculation of m: m_t <- beta1 * m_{t-1} + (1 - beta1) * g\n Same beta1 default as in Adam and in the paper: beta1=0.9\n\n Let v be the running average of the variance of g, i.e. of (g - m)^2.\n '
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-08, **kwargs):
'\n :param float beta1: used for the running average of g (m)\n :param float beta2: used for the running average of variance of g (v)\n :param float epsilon:\n '
super(GradVarianceScaledOptimizer, self).__init__(**kwargs)
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _prepare(self):
super(GradVarianceScaledOptimizer, self)._prepare()
self._beta1_t = tf.convert_to_tensor(self._beta1, name='beta1')
self._beta2_t = tf.convert_to_tensor(self._beta2, name='beta2')
self._epsilon_t = tf.convert_to_tensor(self._epsilon, name='epsilon')
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, 'm', self._name)
self._zeros_slot(v, 'v', self._name)
def _apply(self, grad, var, indices=None):
lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
m_scaled_g_values = (grad * (1 - beta1_t))
m_t = tf_compat.v1.assign(m, (m * beta1_t), use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._assign_add(m, updates=m_scaled_g_values, indices=indices)
m_gathered = self._gather(m_t, indices=indices)
variance = tf_compat.v1.squared_difference(grad, m_gathered)
v_scaled_new_values = (variance * (1 - beta2_t))
v_t = tf_compat.v1.assign(v, (v * beta2_t), use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._assign_add(v, updates=v_scaled_new_values, indices=indices)
v_gathered = self._gather(v_t, indices=indices)
factor = (v_gathered / (variance + epsilon_t))
update = ((lr * grad) * tf.minimum(factor, 1.0))
var_update = self._assign_sub(ref=var, updates=update, indices=indices)
return tf.group(*[var_update, m_t])
|
class NadamOptimizer(tf_compat.v1.train.AdamOptimizer):
'\n Optimizer that implements the Nadam algorithm.\n See [Dozat, T., 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).\n\n Copied from:\n https://github.com/tensorflow/tensorflow/blob/v1.15.5/tensorflow/contrib/opt/python/training/nadam_optimizer.py\n\n We have this here to have this Nadam variant available in TF 2\n because the Keras Nadam behaves a bit different.\n https://github.com/rwth-i6/returnn/issues/766\n https://github.com/tensorflow/tensorflow/issues/53204\n\n We can still use this old code because the underlying kernel still supports the ``use_nesterov`` option.\n '
def _apply_dense(self, grad, var):
from tensorflow.python.training import training_ops
from tensorflow.python.ops import math_ops
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
(beta1_power, beta2_power) = self._get_beta_accumulators()
return training_ops.apply_adam(var, m, v, math_ops.cast(beta1_power, var.dtype.base_dtype), math_ops.cast(beta2_power, var.dtype.base_dtype), math_ops.cast(self._lr_t, var.dtype.base_dtype), math_ops.cast(self._beta1_t, var.dtype.base_dtype), math_ops.cast(self._beta2_t, var.dtype.base_dtype), math_ops.cast(self._epsilon_t, var.dtype.base_dtype), grad, use_locking=self._use_locking, use_nesterov=True).op
def _resource_apply_dense(self, grad, var):
from tensorflow.python.training import training_ops
from tensorflow.python.ops import math_ops
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
(beta1_power, beta2_power) = self._get_beta_accumulators()
return training_ops.resource_apply_adam(var.handle, m.handle, v.handle, math_ops.cast(beta1_power, grad.dtype.base_dtype), math_ops.cast(beta2_power, grad.dtype.base_dtype), math_ops.cast(self._lr_t, grad.dtype.base_dtype), math_ops.cast(self._beta1_t, grad.dtype.base_dtype), math_ops.cast(self._beta2_t, grad.dtype.base_dtype), math_ops.cast(self._epsilon_t, grad.dtype.base_dtype), grad, use_locking=self._use_locking, use_nesterov=True)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
(beta1_power, beta2_power) = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = ((lr_t * math_ops.sqrt((1 - beta2_power))) / (1 - beta1_power))
m = self.get_slot(var, 'm')
m_scaled_g_values = (grad * (1 - beta1_t))
m_t = state_ops.assign(m, (m * beta1_t), use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
m_bar = (m_scaled_g_values + (beta1_t * array_ops.gather(m_t, indices)))
v = self.get_slot(var, 'v')
v_scaled_g_values = ((grad * grad) * (1 - beta2_t))
v_t = state_ops.assign(v, (v * beta2_t), use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_t_slice = array_ops.gather(v_t, indices)
v_sqrt = math_ops.sqrt(v_t_slice)
var_update = scatter_add(var, indices, (((- lr) * m_bar) / (v_sqrt + epsilon_t)))
return control_flow_ops.group(*[var_update, m_bar, v_t])
|
class CustomAdamOptimizer(BaseCustomOptimizer):
'\n Reimplementation of Adam.\n See also :class:`tf.compat.v1.train.AdamOptimizer`.\n\n ```\n t <- t + 1\n lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)\n\n m_t <- beta1 * m_{t-1} + (1 - beta1) * g\n v_t <- beta2 * v_{t-1} + (1 - beta2) * g * g\n variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)\n ```\n '
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-08, **kwargs):
'\n :param float beta1: used for the running average of g (m)\n :param float beta2: used for the running average of g*g (v)\n :param float epsilon:\n '
super(CustomAdamOptimizer, self).__init__(**kwargs)
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _prepare(self):
super(CustomAdamOptimizer, self)._prepare()
self._beta1_t = tf.convert_to_tensor(self._beta1, name='beta1')
self._beta2_t = tf.convert_to_tensor(self._beta2, name='beta2')
self._epsilon_t = tf.convert_to_tensor(self._epsilon, name='epsilon')
def _create_slots(self, var_list):
self._beta1_power = tf.Variable(initial_value=self._beta1, name='beta1_power')
self._beta2_power = tf.Variable(initial_value=self._beta2, name='beta2_power')
for v in var_list:
self._zeros_slot(v, 'm', self._name)
self._zeros_slot(v, 'v', self._name)
def _apply(self, grad, var, indices=None):
'\n :param tf.Tensor grad:\n :param tf.Variable|resource_variable_ops.ResourceVariable var:\n :param tf.Tensor|None indices: if this is a sparse update, the indices of the grad values\n :return: update\n :rtype: tf.Tensor|tf.Operation\n '
lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
lr *= (tf.sqrt((1.0 - self._beta2_power)) / (1.0 - self._beta1_power))
m = self._assign(m, updates=((beta1_t * self._gather(m, indices)) + ((1.0 - beta1_t) * grad)), indices=indices)
v = self._assign(v, updates=((beta2_t * self._gather(v, indices)) + ((1.0 - beta2_t) * (grad * grad))), indices=indices)
update = (lr * (self._gather(m, indices) / (tf.sqrt(self._gather(v, indices)) + epsilon_t)))
var_update = self._assign_sub(ref=var, updates=update, indices=indices)
return tf.group(*[var_update, m, v])
def _finish(self, update_ops, name_scope):
with tf.control_dependencies(update_ops), tf_compat.v1.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign((self._beta1_power * self._beta1_t), use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign((self._beta2_power * self._beta2_t), use_locking=self._use_locking)
return tf.group(*(update_ops + [update_beta1, update_beta2]), name=name_scope)
|
class AMSGradOptimizer(Optimizer):
'\n https://colab.research.google.com/notebook#fileId=1xXFAuHM2Ae-OmF5M8Cn9ypGCa_HHBgfG&scrollTo=N1-2wPHN1Otn\n https://openreview.net/pdf?id=ryQu7f-RZ\n https://keras.io/optimizers/\n https://ruder.io/deep-learning-optimization-2017/index.html#fixingtheexponentialmovingaverage\n https://github.com/taki0112/AMSGrad-Tensorflow\n '
def __init__(self, learning_rate=0.001, decay=False, beta1=0.9, beta2=0.99, epsilon=0.0, var_list=()):
super(AMSGradOptimizer, self).__init__(name='AMSGradOptimizer', use_locking=False)
self.learning_rate = learning_rate
self.decay = decay
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.var_list = var_list
self.m = {}
self.v = {}
self.v_hat = {}
self.t = tf.Variable(0.0, trainable=False)
for var in self.var_list:
self.m[var] = tf.Variable(tf.zeros(tf.shape(var.initial_value)), trainable=False)
self.v[var] = tf.Variable(tf.zeros(tf.shape(var.initial_value)), trainable=False)
self.v_hat[var] = tf.Variable(tf.zeros(tf.shape(var.initial_value)), trainable=False)
def apply_gradients(self, gradient_variables):
'\n :param list[(tf.Tensor,tf.Variable)] gradient_variables:\n :rtype: tf.Operation\n '
with tf.control_dependencies([self.t.assign_add(1.0)]):
learning_rate = self.learning_rate
if self.decay:
learning_rate /= tf.sqrt(self.t)
update_ops = []
for (g, var) in gradient_variables:
m = self.m[var].assign(((self.beta1 * self.m[var]) + ((1 - self.beta1) * g)))
v = self.v[var].assign(((self.beta2 * self.v[var]) + (((1 - self.beta2) * g) * g)))
v_hat = self.v_hat[var].assign(tf.maximum(self.v_hat[var], v))
update = (((- learning_rate) * m) / (self.epsilon + tf.sqrt(v_hat)))
update_ops.append(var.assign_add(update))
return tf.group(*update_ops)
def _apply_dense(self, grad, var):
raise NotImplementedError
def _resource_apply_dense(self, grad, handle):
raise NotImplementedError
def _resource_apply_sparse(self, grad, handle, indices):
raise NotImplementedError
def _apply_sparse(self, grad, var):
raise NotImplementedError
|
def FeatureDim(description, dimension, **kwargs):
'\n DEPRECATED. Use :class:`Dim` instead, and setting the `kind` is not needed anymore.\n\n :param str description:\n :param int|None dimension:\n :rtype: Dim\n '
return Dim(kind=Dim.Types.Feature, description=description, dimension=dimension, **kwargs)
|
def SpatialDim(description, dimension=None, **kwargs):
'\n DEPRECATED. Use :class:`Dim` instead, and setting the `kind` is not needed anymore.\n\n :param str description:\n :param int|None dimension:\n :rtype: Dim\n '
return Dim(kind=Dim.Types.Spatial, description=description, dimension=dimension, **kwargs)
|
class BatchInfo():
'\n A batched tensor is a tensor with batch dimension,\n i.e. consisting of multiple samples/sequences\n which are supposed to be totally independent of each other.\n\n The batch dim can consists out of one or more flattened "virtual" dims,\n which :class:`BatchInfo` keeps track of.\n This class provides some additional information\n about the batch dimension.\n Only one instance per different type of batch-dim is expected\n (i.e. `batch_info1 is batch_info2` <==> same batch info).\n\n When we pass data from the dataset to the network\n (in all cases (training, inference ...) via :class:`Runner` in the TF engine),\n we get a batch dimension due to the minibatch construction.\n This is a global batch size\n (usually dynamic, because every minibatch/step can have a different amount of samples,\n although we can also support static sizes, which is needed e.g. for TPUs)\n represented by :class:`BatchInfo.GlobalBatchDim`.\n\n When we do beam search (see :class:`SearchBeam`),\n we have multiple hypotheses per batch item,\n and thus a different batch dimension.\n\n We can also pack arrays (multiple sequences)\n (also referred to as "flattened tensors", "non-padded tensors", "ragged tensors").\n See e.g. :class:`FlattenBatchLayer` or :func:`flatten_with_seq_len_mask`.\n Also see :class:`tf.RaggedTensor` which also represents\n packed tensors (but only batch-major, although this is just a reinterpretation).\n We do not directly use :class:`tf.RaggedTensor` in :class:`Data`\n to have robust and reliable code (which expects :class:`tf.Tensor`).\n However, we maybe can make use of some of the functions in :mod:`tf.ragged`.\n '
class VirtualDimBase(object):
'\n Represents one virtual dim, flattened into the batch dim.\n '
def short_repr(self):
'\n :rtype: str\n '
raise NotImplementedError
def __repr__(self):
return ('%s{%s}' % (self.__class__.__name__, self.short_repr()))
class FixedDim(VirtualDimBase):
'\n Represents a dim with fixed size.\n '
def __init__(self, size: Union[(tf.Tensor, int)], dim_tag: Optional[Dim]=None):
'\n :param size:\n :param dim_tag:\n '
self.size = size
self.dim_tag = dim_tag
def short_repr(self):
'\n :rtype: str\n '
if isinstance(self.size, int):
return ('F(%i)' % self.size)
return 'F(?)'
class GlobalBatchDim(FixedDim):
'\n Represents the global batch dim by the network (minibatch construction from the dataset).\n '
def __init__(self, size: Union[(tf.Tensor, int)], dim_tag: Optional[Dim]=None):
if (not dim_tag):
dim_tag = batch_dim
super().__init__(size=size, dim_tag=dim_tag)
def short_repr(self):
'\n :rtype: str\n '
if (isinstance(self.size, int) and (self.size >= 0)):
return ('B(%i)' % self.size)
return 'B'
class BeamDim(FixedDim):
'\n Represents a search beam.\n '
def __init__(self, beam):
'\n :param SearchBeam beam:\n '
super(BatchInfo.BeamDim, self).__init__(size=beam.beam_size)
self.beam = beam
def short_repr(self):
'\n :rtype: str\n '
return ('Beam{%r}(%s)' % (self.beam.name, self.size))
class PaddedDim(FixedDim):
'\n Represents a dim with variable size, which is flattened with padding (not packed) into the batch.\n '
def __init__(self, dim_tag):
'\n :param Dim dim_tag:\n '
super(BatchInfo.PaddedDim, self).__init__(size=dim_tag.get_dim_value())
self.dim_tag = dim_tag
def short_repr(self):
'\n :rtype: str\n '
return ('Padded{%r}' % self.dim_tag.description)
class PackedDim(VirtualDimBase):
'\n Represents a dim with variable sizes, which is packed (un-padded) into the batch.\n Variable w.r.t. other dims (must be per batch entry).\n '
def __init__(self, dim_tag, key_axes):
'\n :param Dim dim_tag:\n :param list[BatchInfo.VirtualDimBase] key_axes:\n most common case would be [GlobalBatchDim(...)],\n but [GlobalBatchDim(...),BeamDim(...)] is also common.\n '
self.dim_tag = dim_tag
self.key_axes = key_axes
@property
def sizes(self):
'\n :return: shape [B_flat]\n :rtype: tf.Tensor\n '
assert (self.dim_tag.dyn_size is not None)
return self.dim_tag.dyn_size
def short_repr(self):
'\n :rtype: str\n '
return ('Packed{%r}' % (self.dim_tag.description,))
def __init__(self, base, new_dim, new_dim_index=None):
'\n :param BatchInfo|None base:\n If this is extended or based on another batch.\n Except of the batch dim of the dataset minibatch,\n we would always have a base.\n :param BatchInfo.VirtualDimBase|None new_dim:\n :param int|None new_dim_index:\n\n In most cases, this constructor would probably not be used directly by the user.\n '
self.base = base
virtual_dims = (list(base.virtual_dims) if base else [])
if new_dim:
if (new_dim_index is None):
assert (not virtual_dims)
new_dim_index = 0
if (new_dim_index < 0):
assert (new_dim_index == (- 1))
virtual_dims.append(new_dim)
else:
virtual_dims.insert(new_dim_index, new_dim)
self.virtual_dims = virtual_dims
self._dim = None
self.batch_dim_tag: Optional[Dim] = None
if ((not base) and isinstance(new_dim, BatchInfo.GlobalBatchDim)):
self.batch_dim_tag = new_dim.dim_tag
else:
self.batch_dim_tag = Dim(kind=Dim.Types.Batch, description=('batch:%s' % self.short_repr()), batch=self, dimension=self.static_dim)
self._global_beam_dims_by_beam_name = {}
self._global_padded_dims_by_dim_tag = {}
self._packed_dims_by_dim_tag = {}
self.descendants = []
self._descendants_by_beam_name = {}
self._global_descendants_by_virtual_dims = {}
if base:
base.descendants.append(self)
if isinstance(new_dim, BatchInfo.BeamDim):
beam = new_dim.beam
assert (beam.name not in base._descendants_by_beam_name)
base._descendants_by_beam_name[beam.name] = self
global_base = self.get_global_base()
assert (tuple(self.virtual_dims) not in global_base._global_descendants_by_virtual_dims)
global_base._global_descendants_by_virtual_dims[tuple(self.virtual_dims)] = self
@classmethod
def make_global_batch_info(cls, batch_dim):
'\n :param tf.Tensor|int batch_dim:\n :return: global batch info w.r.t. the network / graph\n :rtype: BatchInfo\n '
return BatchInfo(base=None, new_dim=BatchInfo.GlobalBatchDim(size=batch_dim))
_global_broadcast_batch = None
@classmethod
def make_global_broadcast_batch_info(cls):
'\n :return: BatchInfo with no virtual dims, s.t. the dimension is 1 (== prod([])) (broadcastable)\n :rtype: BatchInfo\n '
if cls._global_broadcast_batch:
return cls._global_broadcast_batch
cls._global_broadcast_batch = BatchInfo(base=None, new_dim=None)
return cls._global_broadcast_batch
@classmethod
def get_common_batch_info(cls, batches):
'\n :param list[BatchInfo|None] batches:\n :rtype: BatchInfo|None\n '
if (not batches):
return None
if (len(batches) == 1):
return batches[0]
batches_ = []
for batch in batches:
if (batch and (batch not in batches_)):
batches_.append(batch)
batches = batches_
if (not batches_):
return None
if (len(batches) == 1):
return batches[0]
base = batches[0].get_global_base()
all_virtual_dims = []
for batch in batches:
for dim in batch.virtual_dims:
if (dim not in all_virtual_dims):
same_type_last_idx = None
for (i, dim_) in enumerate(all_virtual_dims):
if (type(dim_) == type(dim)):
same_type_last_idx = i
if (same_type_last_idx is not None):
all_virtual_dims.insert((same_type_last_idx + 1), dim)
else:
all_virtual_dims.append(dim)
for batch in batches:
if (set(batch.virtual_dims) == set(all_virtual_dims)):
return batch
global_batch_dims = [dim for dim in all_virtual_dims if isinstance(dim, BatchInfo.GlobalBatchDim)]
assert (len(global_batch_dims) == 1)
global_batch_dim = global_batch_dims[0]
assert (base.virtual_dims == [global_batch_dim])
beams = [dim for dim in all_virtual_dims if isinstance(dim, BatchInfo.BeamDim)]
if beams:
base = base.copy_extend_with_beam(SearchBeam.get_combined_beam(*(b.beam for b in beams)))
dim_idx = 0
for dim in all_virtual_dims:
if (dim in global_batch_dims):
dim_idx += (1 + len(beams))
continue
if (dim in beams):
continue
base = base._copy_extend_dim(new_dim=dim, new_dim_idx=dim_idx)
dim_idx += 1
return base
def __repr__(self):
return ('BatchInfo{%s}' % ', '.join([dim.short_repr() for dim in self.virtual_dims]))
def short_repr(self):
'\n :rtype: str\n '
return '&'.join(([dim.short_repr() for dim in self.virtual_dims] or ['Bx']))
def __getstate__(self):
raise Exception(('Pickling of BatchInfo is not supported. (%s)' % self))
@property
def dim(self):
'\n :rtype: tf.Tensor|int\n '
if (self._dim is not None):
return self._dim
if (not self.virtual_dims):
return 1
if (len(self.virtual_dims) == 1):
dim = self.virtual_dims[0]
assert isinstance(dim, BatchInfo.FixedDim)
return dim.size
from returnn.tf.util.basic import same_control_flow_ctx, optional_mul
if all((isinstance(dim, BatchInfo.FixedDim) for dim in self.virtual_dims)):
dims = self.virtual_dims
sizes = [dim.size for dim in dims]
with same_control_flow_ctx(sizes):
value = optional_mul(*sizes)
self._dim = value
return value
if all((isinstance(dim, (BatchInfo.PackedDim, BatchInfo.GlobalBatchDim)) for dim in self.virtual_dims)):
dims = [dim for dim in self.virtual_dims if isinstance(dim, BatchInfo.PackedDim)]
if (len(dims) > 1):
raise NotImplementedError(('%s: currently only support one packed dim but have %r' % (self, dims)))
(dim,) = dims
assert isinstance(dim, BatchInfo.PackedDim)
with same_control_flow_ctx(dim.dim_tag.dyn_size_ext.placeholder):
value = tf.reduce_sum(dim.dim_tag.dyn_size_ext.placeholder)
self._dim = value
return value
raise NotImplementedError(('%r.dim()' % self))
@dim.setter
def dim(self, value):
'\n Can only set the global batch dim.\n\n :param tf.Tensor|int value:\n '
assert (len(self.virtual_dims) == 1)
dim = self.virtual_dims[0]
assert isinstance(dim, BatchInfo.GlobalBatchDim)
dim.size = value
if dim.dim_tag:
dim.dim_tag.capacity = dim.dim_tag.size = (value if (isinstance(value, int) and (value > 0)) else None)
self._dim = value
@property
def static_dim(self):
'\n :rtype: int|None\n '
if (self._dim is not None):
return (self._dim if isinstance(self._dim, int) else None)
if (not self.virtual_dims):
return 1
if (len(self.virtual_dims) == 1):
dim = self.virtual_dims[0]
assert isinstance(dim, BatchInfo.FixedDim)
return (dim.size if isinstance(dim.size, int) else None)
from functools import reduce
from operator import mul
if all((isinstance(dim, BatchInfo.FixedDim) for dim in self.virtual_dims)):
dims = self.virtual_dims
sizes = [dim.size for dim in dims]
if all((isinstance(s, int) for s in sizes)):
return reduce(mul, sizes, 1)
return None
return None
@property
def beam(self):
'\n :rtype: SearchBeam|None\n '
beams = [dim for dim in self.virtual_dims if isinstance(dim, BatchInfo.BeamDim)]
if beams:
return beams[0].beam
return None
def get_base_chain(self):
'\n :rtype: list[BatchInfo]\n '
bases = []
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def get_global_base(self):
'\n :rtype: BatchInfo\n '
if (not self.base):
return self
return self.get_base_chain()[(- 1)]
def get_global_batch_dim(self):
'\n :rtype: BatchInfo.GlobalBatchDim\n '
global_beam_dims = [dim for dim in self.virtual_dims if isinstance(dim, BatchInfo.GlobalBatchDim)]
assert (len(global_beam_dims) == 1)
return global_beam_dims[0]
def is_global_batch(self):
'\n :rtype: bool\n '
global_beam_dims = [dim for dim in self.virtual_dims if isinstance(dim, BatchInfo.GlobalBatchDim)]
return ((len(global_beam_dims) == 1) and (len(self.virtual_dims) == 1))
def is_broadcast(self):
'\n :rtype: bool\n '
return (len(self.virtual_dims) == 0)
def _make_beam_dim(self, beam):
'\n :param SearchBeam beam:\n :rtype: BatchInfo.BeamDim\n '
assert self.virtual_dims
root = self.get_global_base()
if (beam.name in root._global_beam_dims_by_beam_name):
return root._global_beam_dims_by_beam_name[beam.name]
new_dim = BatchInfo.BeamDim(beam=beam)
root._global_beam_dims_by_beam_name[beam.name] = new_dim
return new_dim
def _make_packed_dim(self, dim_tag):
'\n :param Dim dim_tag:\n :rtype: BatchInfo.PackedDim\n '
assert self.virtual_dims
assert (dim_tag.dyn_size is not None)
dim_tag_base = dim_tag.get_same_base()
if (dim_tag_base in self._packed_dims_by_dim_tag):
return self._packed_dims_by_dim_tag[dim_tag_base]
new_dim = BatchInfo.PackedDim(dim_tag=dim_tag, key_axes=self.virtual_dims)
self._packed_dims_by_dim_tag[dim_tag_base] = new_dim
return new_dim
def _make_padded_dim(self, dim_tag):
'\n :param Dim dim_tag:\n :rtype: BatchInfo.PaddedDim\n '
assert self.virtual_dims
root = self.get_global_base()
assert (dim_tag.dyn_size is not None)
dim_tag_base = dim_tag.get_for_batch_ctx(self, dim_tag.control_flow_ctx)
if (dim_tag_base in root._global_padded_dims_by_dim_tag):
return root._global_padded_dims_by_dim_tag[dim_tag_base]
new_dim = BatchInfo.PaddedDim(dim_tag=dim_tag_base)
root._global_padded_dims_by_dim_tag[dim_tag_base] = new_dim
return new_dim
def _next_spatial_major_index(self):
idx = None
for (i, dim) in enumerate(self.virtual_dims):
if isinstance(dim, BatchInfo.GlobalBatchDim):
break
if isinstance(dim, BatchInfo.BeamDim):
break
assert isinstance(dim, BatchInfo.FixedDim)
idx = (i + 1)
if (idx is not None):
return idx
return 0
def copy_extend_with_beam(self, beam):
'\n :param SearchBeam beam:\n :rtype: BatchInfo\n '
assert self.virtual_dims
if (self.beam == beam):
return self
if (beam.name in self._descendants_by_beam_name):
return self._descendants_by_beam_name[beam.name]
return BatchInfo(base=self, new_dim=self._make_beam_dim(beam), new_dim_index=(self.virtual_dims.index(self.get_global_batch_dim()) + 1))
def copy_remove_beam(self):
'\n :rtype: BatchInfo\n '
if (not self.beam):
return self
assert self.virtual_dims
root = self.get_global_base()
dims_wo_beam = [dim for dim in self.virtual_dims if (not isinstance(dim, BatchInfo.BeamDim))]
return root._global_descendants_by_virtual_dims[tuple(dims_wo_beam)]
def copy_remove_dim(self, remove_dim):
'\n :param VirtualDimBase remove_dim:\n :rtype: BatchInfo\n '
assert self.virtual_dims
root = self.get_global_base()
dims_wo_dim = [dim for dim in self.virtual_dims if (dim != remove_dim)]
return root._global_descendants_by_virtual_dims[tuple(dims_wo_dim)]
def copy_set_beam(self, beam):
'\n :param SearchBeam|None beam:\n :rtype: BatchInfo\n '
batch = self.copy_remove_beam()
if beam:
batch = batch.copy_extend_with_beam(beam)
return batch
def copy_extend_with_packed_dim_tag(self, dim_tag, batch_major):
'\n :param Dim dim_tag:\n :param bool batch_major: if True, add new dim in front. otherwise, add new dim at the end\n :rtype: BatchInfo\n '
new_dim = self._make_packed_dim(dim_tag)
new_dim_idx = ((- 1) if batch_major else self._next_spatial_major_index())
return self._copy_extend_dim(new_dim=new_dim, new_dim_idx=new_dim_idx)
def copy_extend_with_padded_dim_tag(self, dim_tag, batch_major=None, new_dim_idx=None):
'\n :param Dim dim_tag:\n :param bool|None batch_major: if True, add new dim in front. otherwise, add new dim at the end\n :param int|None new_dim_idx:\n :rtype: BatchInfo\n '
new_dim = self._make_padded_dim(dim_tag)
if (new_dim_idx is None):
assert (batch_major is not None)
new_dim_idx = ((- 1) if batch_major else self._next_spatial_major_index())
else:
assert (batch_major is None)
return self._copy_extend_dim(new_dim=new_dim, new_dim_idx=new_dim_idx)
def copy_extend_with_padded_or_fixed_dim_tag(self, dim_tag, batch_major=None, new_dim_idx=None):
'\n :param Dim dim_tag:\n :param bool|None batch_major: if True, add new dim in front. otherwise, add new dim at the end\n :param int|None new_dim_idx:\n :rtype: BatchInfo\n '
if (dim_tag.dyn_size is not None):
new_dim = self._make_padded_dim(dim_tag)
else:
new_dim = BatchInfo.FixedDim(size=dim_tag.get_dim_value(), dim_tag=dim_tag)
if (new_dim_idx is None):
assert (batch_major is not None)
new_dim_idx = ((- 1) if batch_major else self._next_spatial_major_index())
else:
assert (batch_major is None)
return self._copy_extend_dim(new_dim=new_dim, new_dim_idx=new_dim_idx)
def _copy_extend_dim(self, new_dim, new_dim_idx):
'\n :param BatchInfo.VirtualDimBase new_dim:\n :param int new_dim_idx:\n :rtype: BatchInfo\n '
assert self.virtual_dims
root = self.get_global_base()
virtual_dims = list(self.virtual_dims)
if (new_dim_idx < 0):
assert (new_dim_idx == (- 1))
virtual_dims.append(new_dim)
else:
virtual_dims.insert(new_dim_idx, new_dim)
if (tuple(virtual_dims) in root._global_descendants_by_virtual_dims):
return root._global_descendants_by_virtual_dims[tuple(virtual_dims)]
return BatchInfo(base=self, new_dim=new_dim, new_dim_index=new_dim_idx)
|
class SearchBeam():
'\n Represents info about the beam from some beam search (e.g. via :func:`beam_search`),\n e.g. such as the beam size, but also the dependencies.\n This is somewhat parallel to :class:`SearchChoices`, but simpler,\n and independent from the layers/network (:class:`returnn.tf.layers.base.LayerBase`).\n '
def __init__(self, beam_size, dependency=NotSpecified, name=None, _next_frame=None):
'\n :param int beam_size:\n :param SearchBeam|NotSpecified|None dependency:\n :param str|None name:\n :param SearchBeam|None _next_frame:\n '
if isinstance(dependency, SearchBeam):
assert (name and dependency.name and (name != dependency.name))
if (name and os.path.basename(name).startswith('prev:')):
assert _next_frame
self.beam_size = beam_size
self.dependency = dependency
self.name = name
self._next_frame = _next_frame
def copy_as_prev_frame(self):
'\n :rtype: SearchBeam\n '
if self._next_frame:
return self
assert self.name
name = ('%s/prev:%s' % (os.path.dirname(self.name), os.path.basename(self.name)))
return SearchBeam(beam_size=self.beam_size, name=name, _next_frame=self)
def __repr__(self):
keys = ['name', 'beam_size']
if (self.dependency is not NotSpecified):
keys.append('dependency')
return ('%s(%s)' % (self.__class__.__name__, ', '.join([('%s=%r' % (key, getattr(self, key))) for key in keys])))
def __eq__(self, other):
'\n :param SearchBeam|object|None other:\n :rtype: bool\n '
if (self is other):
return True
if ((self is None) or (other is None)):
return False
if ((not isinstance(self, SearchBeam)) or (not isinstance(other, SearchBeam))):
return False
if ((self.name is None) or (other.name is None)):
return False
return (self.name == other.name)
def __ne__(self, other):
'\n :param SearchBeam|object|None other:\n :rtype: bool\n '
return (not (self == other))
def __hash__(self):
return hash(self.name)
def _get_dependency_list(self):
'\n :return: list as far as it is defined\n :rtype: list[SearchBeam]\n '
ls = [self]
while isinstance(ls[(- 1)].dependency, SearchBeam):
ls.append(ls[(- 1)].dependency)
return ls
@classmethod
def get_combined_beam(cls, beam1, beam2=None, *beams):
'\n Combines beams.\n This will throw an exception if they cannot be combined.\n Note that in beam search (see :class:`SearchChoices`),\n the logic to combine beams from different search choices\n happens in a generic way for all layers automatically\n via :func:`TFNetwork._create_layer_layer_desc`,\n so normally we already have the same beam.\n Unless we are at template construction.\n\n :param SearchBeam|None beam1:\n :param SearchBeam|None beam2:\n :param SearchBeam|None beams:\n :rtype: SearchBeam|None\n '
if beams:
beam12 = cls.get_combined_beam(beam1, beam2)
return cls.get_combined_beam(beam12, beams[0], *beams[1:])
if (beam2 is None):
return beam1
if (beam1 is None):
return beam2
if (beam1 == beam2):
if (beam2.dependency is NotSpecified):
return beam1
if (beam1.dependency is NotSpecified):
return beam2
return beam1
assert (beam1.name and beam2.name)
if (beam2._next_frame and (not beam1._next_frame)):
return beam1
if (beam1._next_frame and (not beam2._next_frame)):
return beam2
b1 = beam1
b2 = beam2
used_next_frame = False
if (b1._next_frame and b2._next_frame):
b1 = b1._next_frame
b2 = b2._next_frame
used_next_frame = True
l1 = b1._get_dependency_list()
l2 = b2._get_dependency_list()
if (b2 in l1):
return beam1
if (b1 in l2):
return beam2
if used_next_frame:
if (beam1 in l2):
return beam1
if (beam2 in l1):
return beam2
raise Exception('\n'.join(['Cannot combine beams:', (' 1: %s (deps: %s, next %s, next deps %s)' % (beam1, beam1._get_dependency_list(), beam1._next_frame, (beam1._next_frame._get_dependency_list() if beam1._next_frame else None))), (' 2: %s (deps: %s, next %s, next deps %s)' % (beam2, beam2._get_dependency_list(), beam2._next_frame, (beam2._next_frame._get_dependency_list() if beam2._next_frame else None)))]))
|
@contextlib.contextmanager
def gradient_checkpoint_scope():
'\n :return: context manager, where all tensors created inside the scope will be recomputed at backprop time,\n based on existing tensors which have been created earlier outside the scope.\n\n If prepare_gradient_checkpointing() is not called later, this does not have any effect.\n If no gradients are being calculated, this also does not have any effect.\n '
graph = tf_compat.v1.get_default_graph()
first_op_id = (graph._last_id + 1)
(yield)
last_op_id = (graph._last_id + 1)
assert (last_op_id > first_op_id)
for op_id in range(first_op_id, last_op_id):
op = graph._nodes_by_id[op_id]
if getattr(op, '_RETURNN_gradient_checkpoint_exclude', False):
continue
op._RETURNN_gradient_checkpoint_first_op_id = first_op_id
_grad_checkpoints.setdefault(graph, []).append((first_op_id, last_op_id))
|
@contextlib.contextmanager
def gradient_checkpoint_exclude_scope():
'\n :return: context manager, where all tensors created inside the scope will be excluded\n for recomputation at backprop time.\n '
graph = tf_compat.v1.get_default_graph()
first_op_id = (graph._last_id + 1)
(yield)
last_op_id = (graph._last_id + 1)
assert (last_op_id >= first_op_id)
for op_id in range(first_op_id, last_op_id):
op = graph._nodes_by_id[op_id]
if (getattr(op, '_RETURNN_gradient_checkpoint_first_op_id', None) is not None):
continue
op._RETURNN_gradient_checkpoint_exclude = True
|
def prepare_gradient_checkpointing():
'\n Call this after the computation graph for calculating the model + loss has been created,\n before the gradients are calculated (before tf.gradients is called).\n\n This will create a copy of all the ops from within the gradient_checkpoint_scope() scope.\n\n This patches the op._gradient_function of all consuming ops\n to use the copied ops instead.\n So effectively, for backpropagation, it will recalculate all such tensors.\n '
from tensorflow.python.framework import ops
copied_ops = {}
def _copy_op(op: tf.Operation) -> tf.Operation:
if (op in copied_ops):
return copied_ops[op]
new_inputs = []
for x in op.inputs:
x = _map_tensor(x)
new_inputs.append(x)
with tf_util.same_control_flow_ctx(op.outputs[0]), tf.name_scope(''):
new_op = tf_util.copy_op(op, inputs=new_inputs, name=op.name)
_set_wrapped_grad_func(new_op)
copied_ops[op] = new_op
return new_op
def _map_tensor(x: tf.Tensor) -> tf.Tensor:
assert isinstance(x, tf.Tensor)
if (getattr(x.op, '_RETURNN_gradient_checkpoint_first_op_id', None) is not None):
x_op_copy = _copy_op(x.op)
x = x_op_copy.outputs[x.value_index]
return x
def _set_wrapped_grad_func(op: tf.Operation):
if (getattr(op, '_RETURNN_gradient_checkpoint_wrapped_grad_func', None) is not None):
return
try:
orig_grad_func = ops.get_gradient_function(op)
except LookupError:
return
if (orig_grad_func is None):
return
class _WrappedOp():
def __init__(self):
self.op = op
self._inputs = tuple((_map_tensor(x) for x in op.inputs))
self._outputs = tuple((_map_tensor(x) for x in op.outputs))
@property
def inputs(self) -> Tuple[(tf.Tensor, ...)]:
'inputs'
return self._inputs
@property
def outputs(self) -> Tuple[(tf.Tensor, ...)]:
'outputs'
return self._outputs
def get_attr(self, name: str):
'get_attr'
return self.op.get_attr(name)
_wrapped_op = _WrappedOp()
def _wrapped_grad_func(op, *out_grads):
assert (op is _wrapped_op.op)
return orig_grad_func(_wrapped_op, *out_grads)
op._gradient_function = _wrapped_grad_func
op._RETURNN_gradient_checkpoint_wrapped_grad_func = _wrapped_grad_func
for (graph, ls) in _grad_checkpoints.items():
for (first_op_id, last_op_id) in ls:
for op_id in range(first_op_id, last_op_id):
op = graph._nodes_by_id[op_id]
assert isinstance(op, tf.Operation)
if getattr(op, '_RETURNN_gradient_checkpoint_exclude', False):
continue
_copy_op(op)
for op_in in op.inputs:
assert isinstance(op_in, tf.Tensor)
_set_wrapped_grad_func(op_in.op)
for op_out in op.outputs:
assert isinstance(op_out, tf.Tensor)
for op_ in op_out.consumers():
assert isinstance(op_, tf.Operation)
_set_wrapped_grad_func(op_)
|
def kenlm_checked_out():
'\n :rtype: bool\n '
return os.path.exists(('%s/lm/test.arpa' % kenlm_dir))
|
def get_tf_mod(verbose=False):
'\n :param bool verbose:\n :return: module\n '
global _tf_mod
if _tf_mod:
return _tf_mod
import platform
from glob import glob
from returnn.tf.util.basic import OpCodeCompiler
assert kenlm_checked_out(), ('submodule in %r not checked out?' % kenlm_dir)
files = glob(('%s/util/*.cc' % kenlm_dir))
files += glob(('%s/lm/*.cc' % kenlm_dir))
files += glob(('%s/util/double-conversion/*.cc' % kenlm_dir))
files = [fn for fn in files if (not (fn.endswith('main.cc') or fn.endswith('test.cc')))]
assert files, ('submodule in %r not checked out?' % kenlm_dir)
libs = ['z']
if (platform.system() != 'Darwin'):
libs.append('rt')
src_code = ''
src_code += _kenlm_src_code_workarounds
for fn in files:
f_code = open(fn).read()
f_code = ''.join([x for x in f_code if (ord(x) < 128)])
fn_short = os.path.basename(fn).replace('.', '_')
for word in ['kConverter']:
f_code = f_code.replace(word, ('%s_%s' % (fn_short, word)))
src_code += ('\n// ------------ %s : BEGIN { ------------\n' % os.path.basename(fn))
src_code += ('#line 1 "%s"\n' % os.path.basename(fn))
src_code += f_code
src_code += ('\n// ------------ %s : END } --------------\n\n' % os.path.basename(fn))
src_code += '\n\n// ------------ our code now: ------------\n\n'
src_code += '#line 1 "<our code>"\n'
src_code += _src_code
compiler = OpCodeCompiler(base_name='KenLM', code_version=1, code=src_code, include_paths=(kenlm_dir, (kenlm_dir + '/util/double-conversion')), c_macro_defines={'NDEBUG': 1, 'KENLM_MAX_ORDER': 6, 'HAVE_ZLIB': 1}, ld_flags=[('-l%s' % lib) for lib in libs], is_cpp=True, use_cuda_if_available=False, verbose=verbose)
tf_mod = compiler.load_tf_module()
assert hasattr(tf_mod, 'ken_lm_abs_score_strings'), ('content of mod: %r' % (dir(tf_mod),))
_tf_mod = tf_mod
return tf_mod
|
def ken_lm_load(filename):
'\n :param str filename:\n :return: TF resource handle\n :rtype: tf.Tensor\n '
return get_tf_mod().ken_lm_load_model(filename=filename)
|
def ken_lm_abs_score_strings(handle, strings):
'\n :param tf.Tensor handle: TF resource handle returned by :func:`ken_lm_load`\n :param tf.Tensor strings: strings which are being scores. white-space delimited words.\n :return: same shape as `strings`, float32\n :rtype: tf.Tensor\n '
return get_tf_mod().ken_lm_abs_score_strings(handle=handle, strings=strings)
|
def ken_lm_abs_score_bpe_strings(handle, bpe_merge_symbol, strings):
'\n :param tf.Tensor handle: TF resource handle returned by :func:`ken_lm_load`\n :param str bpe_merge_symbol: e.g. "@@"\n :param tf.Tensor strings: strings which are being scores. white-space delimited words.\n :return: same shape as `strings`, float32\n :rtype: tf.Tensor\n '
return get_tf_mod().ken_lm_abs_score_bpe_strings(handle=handle, bpe_merge_symbol=bpe_merge_symbol, strings=strings)
|
def ken_lm_abs_score_bpe_strings_dense(handle, bpe_merge_symbol, strings, labels):
'\n :param tf.Tensor handle: TF resource handle returned by :func:`ken_lm_load`\n :param str bpe_merge_symbol: e.g. "@@"\n :param tf.Tensor strings: strings which are being scores. white-space delimited words.\n :param tf.Tensor|tf.Variable labels:\n :return: same shape as `strings`, float32\n :rtype: tf.Tensor\n '
return get_tf_mod().ken_lm_abs_score_bpe_strings_dense(handle=handle, bpe_merge_symbol=bpe_merge_symbol, strings=strings, labels=labels)
|
def get_fst(filename):
'\n :param str filename: to OpenFst file\n :return: TF resource handle representing the FST\n :rtype: tf.Tensor\n '
return get_tf_mod().open_fst_load(filename=filename)
|
def fst_transition(fst_handle, states, inputs):
'\n :param tf.Tensor fst_handle: via :func:`get_fst`\n :param tf.Tensor states: [batch], int32\n :param tf.Tensor inputs: [batch], int32\n :return: (next_states, output_labels, weights). next_states can be -1 if invalid. all are shape [batch].\n :rtype: (tf.Tensor,tf.Tensor,tf.Tensor)\n '
return get_tf_mod().open_fst_transition(handle=fst_handle, states=states, inputs=inputs)
|
def openfst_checked_out():
'\n :return: whether the Git submodule is checked out\n :rtype: bool\n '
return os.path.exists(('%s/src/include/fst/fst.h' % openfst_dir))
|
def get_tf_mod(verbose=False):
'\n :param bool verbose:\n :return: module\n '
global _tf_mod
if _tf_mod:
return _tf_mod
from glob import glob
from returnn.tf.util.basic import OpCodeCompiler
assert openfst_checked_out(), ('submodule in %r not checked out?' % openfst_dir)
files = glob(('%s/src/lib/*.cc' % openfst_dir))
assert files, ('submodule in %r not checked out?' % openfst_dir)
files = sorted(files)
libs = []
if (platform.system() != 'Darwin'):
libs.append('rt')
src_code = ''
for fn in files:
f_code = open(fn).read()
f_code = ''.join([x for x in f_code if (ord(x) < 128)])
src_code += ('\n// ------------ %s : BEGIN { ------------\n' % os.path.basename(fn))
src_code += ('#line 1 "%s"\n' % os.path.basename(fn))
src_code += f_code
src_code += ('\n// ------------ %s : END } --------------\n\n' % os.path.basename(fn))
src_code += '\n\n// ------------ our code now: ------------\n\n'
src_code += '#line 1 "returnn/tf/util/open_fst.py:_src_code"\n'
src_code += _src_code
compiler = OpCodeCompiler(base_name='OpenFst', code_version=1, code=src_code, include_paths=(('%s/src/include' % openfst_dir),), c_macro_defines={'NDEBUG': 1}, ld_flags=[('-l%s' % lib) for lib in libs], is_cpp=True, use_cuda_if_available=False, verbose=verbose)
tf_mod = compiler.load_tf_module()
assert hasattr(tf_mod, 'open_fst_transition'), ('content of mod: %r' % (dir(tf_mod),))
_tf_mod = tf_mod
return tf_mod
|
def _demo():
def _make_int_list(s):
'\n :param str s:\n :rtype: list[int]\n '
return [int(s_) for s_ in s.split(',')]
from returnn.util import better_exchook
better_exchook.install()
from argparse import ArgumentParser
arg_parser = ArgumentParser()
arg_parser.add_argument('--states', type=_make_int_list, default=[0])
arg_parser.add_argument('--inputs', type=_make_int_list, default=[0])
arg_parser.add_argument('--fst', default=(returnn_dir + '/tests/lexicon_opt.fst'))
args = arg_parser.parse_args()
get_tf_mod(verbose=True)
assert os.path.exists(args.fst)
fst_tf = get_fst(filename=args.fst)
states_tf = tf.compat.v1.placeholder(tf.int32, [None])
inputs_tf = tf.compat.v1.placeholder(tf.int32, [None])
output_tf = fst_transition(fst_handle=fst_tf, states=states_tf, inputs=inputs_tf)
with tf.compat.v1.Session() as session:
(out_next_states, out_labels, out_scores) = session.run(output_tf, feed_dict={states_tf: args.states, inputs_tf: args.inputs})
print('states:', args.states)
print('inputs:', args.inputs)
print('output next states:', out_next_states)
print('output labels:', out_labels)
print('output scores:', out_scores)
|
def extern_data_template_from_config_opts(extern_data_dict: Dict[(str, Any)]) -> TensorDict:
'\n :param extern_data_dict: as you would specify in the config\n :return: extern data tensor dict\n '
extern_data = TensorDict()
extern_data.update(extern_data_dict, auto_convert=True)
if ('seq_tag' not in extern_data.data):
batch_dim = get_batch_dim_from_extern_data(extern_data)
extern_data.data['seq_tag'] = Tensor(name='seq_tag', dtype='string', dims=[batch_dim])
return extern_data
|
def raw_dict_to_extern_data(extern_data_raw: Dict[(str, Union[(torch.Tensor, numpy.ndarray)])], *, extern_data_template: TensorDict, device: Union[(str, torch.device)]) -> TensorDict:
'\n :param extern_data_raw: This comes out of the DataLoader.\n :param extern_data_template: Specified via `extern_data` in the config.\n :param device: E.g. the GPU.\n :return: tensor dict, like extern_data_template, but with raw tensors set to Torch tensors, on the right device.\n '
assert (isinstance(extern_data_raw, dict) and extern_data_raw)
batch_dim = get_batch_dim_from_extern_data(extern_data_template)
for dim in _get_dyn_dims_from_extern_data(extern_data_template):
dim.reset_eager()
if ((batch_dim.size is None) and (batch_dim.dyn_size_ext is None)):
batch_dim.dyn_size_ext = Tensor((batch_dim.name or 'batch'), dims=[], dtype='int32')
extern_data = TensorDict()
for (k, data) in extern_data_template.data.items():
data = data.copy_template()
raw_tensor = extern_data_raw[k]
assert (len(raw_tensor.shape) == data.batch_ndim), f'ndim mismatch for {k}: {raw_tensor.shape} vs {data}'
for (i, dim) in enumerate(data.dims):
if (dim.dimension is not None):
assert (dim.dimension == raw_tensor.shape[i]), f'shape mismatch for {k}: {raw_tensor.shape} vs {data.batch_shape}'
if isinstance(raw_tensor, torch.Tensor):
data.dtype = str(raw_tensor.dtype).split('.')[(- 1)]
data.raw_tensor = raw_tensor.to(device)
elif isinstance(raw_tensor, numpy.ndarray):
data.raw_tensor = raw_tensor
else:
raise TypeError(f'Unexpected type {type(raw_tensor)} for {k} in extern_data_raw.')
if (batch_dim.dyn_size_ext and (batch_dim.dyn_size_ext.raw_tensor is None)):
batch_dim.dyn_size_ext.raw_tensor = torch.tensor(extern_data_raw[k].shape[0], dtype=torch.int32)
if ((len(data.dims) >= 2) and (data.dims[1].size is None) and ((not data.dims[1].dyn_size_ext) or (data.dims[1].dyn_size_ext.raw_tensor is None))):
assert ((k + ':seq_len') in extern_data_raw), f'extern_data {data}, dyn spatial dim, missing {k}:seq_len in raw dict, check dataset or collate_batch'
size = extern_data_raw[(k + ':seq_len')]
assert (size.device.type == 'cpu')
size_dtype = str(size.dtype).split('.')[(- 1)]
if (data.dims[1].dyn_size_ext is None):
data.dims[1].dyn_size_ext = Tensor((data.dims[1].name or 'time'), dims=[batch_dim], dtype=size_dtype)
data.dims[1].dyn_size_ext.dtype = size_dtype
data.dims[1].dyn_size_ext.raw_tensor = size
extern_data.data[k] = data
return extern_data
|
def _get_dyn_dims_from_extern_data(extern_data: TensorDict) -> List[Dim]:
visited = set()
res = []
for (k, v) in extern_data.data.items():
for dim in v.dims:
if ((dim not in visited) and (dim.size is None)):
visited.add(dim)
res.append(dim)
return res
|
def get_batch_dim_from_extern_data(extern_data: TensorDict) -> Dim:
'\n We expect that the batch dim is the first dim in any of the tensors.\n See collate_batch.\n\n We allow that the batch dim is not necessarily the global batch_dim object.\n We also allow that this is not even marked as batch dim (is_batch_dim() can be False).\n '
batch_dim = next(iter(extern_data.data.values())).dims[0]
return batch_dim
|
def create_tensor(array: numpy.ndarray) -> Union[(torch.Tensor, numpy.ndarray)]:
'\n Adjust non-supported dtypes\n\n :param array: numpy array to be converted\n '
if (array.dtype.kind in 'UO'):
return array
if (array.dtype == numpy.uint32):
array = numpy.asarray(array, dtype=numpy.int64)
return torch.tensor(array)
|
def collate_batch(batch: List[Dict[(str, numpy.ndarray)]]) -> Dict[(str, Union[(torch.Tensor, numpy.ndarray)])]:
'\n :param batch:\n '
assert isinstance(batch, list)
assert batch, 'batch is empty?'
assert isinstance(batch[0], dict)
data_keys = list(batch[0].keys())
res = {}
for key in data_keys:
ls = [create_tensor(sample[key]) for sample in batch]
if (not ls):
raise ValueError('batch is empty?')
if isinstance(ls[0], torch.Tensor):
if (ls[0].ndim > 0):
padded = torch.nn.utils.rnn.pad_sequence(ls, batch_first=True, padding_value=0)
res[key] = padded
res[('%s:seq_len' % key)] = torch.tensor([v.shape[0] for v in ls], dtype=torch.int32)
else:
res[key] = torch.stack(ls, dim=0)
elif isinstance(ls[0], numpy.ndarray):
padded = numpy.stack(ls, axis=0)
res[key] = padded
return res
|
class ChunkingIterDataPipe(torch.utils.data.IterDataPipe):
"\n Splits each sequence in the given dataset into chunks according to the 'chunking' config option.\n So it transforms one sequences into multiple sequences.\n "
def __init__(self, dataset: torch.utils.data.IterableDataset, chunking, *, min_chunk_size=0):
'\n :param dataset: dataset to apply chunking to\n :param None|int|(int,int)|dict|(dict,dict) chunking: tuple (chunk_size, chunk_step).\n If given as single value,\n value will be used for both.\n Both chunk_size and chunk_step can be given as a dict data_key -> size/step.\n This can be used to apply chunking to only a subset of all data keys,\n or to use different chunking for different\n data keys.\n (The number of resulting chunks has to be match though for all given data keys, i.e. sequence lengths\n have to be considered.)\n '
super().__init__()
from returnn.datasets.basic import Dataset as ReturnnDataset
self._dataset = dataset
(self._chunk_size, self._chunk_step, custom_chunk_func) = ReturnnDataset._parse_chunking(chunking)
self._min_chunk_size = NumbersDict(min_chunk_size)
assert (not custom_chunk_func), f'Custom chunking function not supported, {chunking!r}'
def __iter__(self):
'\n :return: generator providing chunks in the form of a dict data_key -> data chunk\n :rtype: Iterable[dict[str, numpy.ndarray]]\n '
chunking_data_keys = list(self._chunk_size.keys())
for data_dict in self._dataset:
if (not chunking_data_keys):
chunking_data_keys = list(data_dict.keys())
chunking_data_key_black_list = ['seq_tag']
for key in chunking_data_key_black_list:
if (key in chunking_data_keys):
chunking_data_keys.remove(key)
assert chunking_data_keys, 'Dataset produced sequence without any data.'
data_chunks = {}
num_chunks = None
for data_key in chunking_data_keys:
chunk_size = self._chunk_size[data_key]
chunk_step = self._chunk_step[data_key]
min_chunk_size = self._min_chunk_size[data_key]
data = data_dict[data_key]
chunks = [data[start_index:(start_index + chunk_size)] for start_index in range(0, len(data), chunk_step) if (len(data[start_index:(start_index + chunk_size)]) >= min_chunk_size)]
if (num_chunks is None):
num_chunks = len(chunks)
else:
assert (num_chunks == len(chunks)), 'Chunking resulted in different number of chunks for different data keys.'
data_chunks[data_key] = chunks
if (num_chunks == 0):
continue
assert num_chunks, 'Bug: no chunk produced from current sequence.'
for chunk_index in range(num_chunks):
chunk_data = {data_key: data_chunks[data_key][chunk_index] for data_key in data_chunks.keys()}
non_chunked_data = {data_key: data for (data_key, data) in data_dict.items() if (data_key not in chunk_data)}
if non_chunked_data:
chunk_data.update(deepcopy(non_chunked_data))
(yield chunk_data)
def __getitem__(self, index):
raise Exception(f'{self.__class__.__name__}.__getitem__ not supported')
@staticmethod
def _parse_chunking(chunking):
'\n Similar to returnn.datasets.basic.Dataset._parse_chunking().\n\n :param None|int|(int,int)|dict|(dict,dict) chunking: see __init__()\n :return: chunk_size, chunk_step\n :rtype: (NumbersDict,NumbersDict)\n '
if (not isinstance(chunking, (tuple, list))):
chunking = (chunking, None)
(chunk_size, chunk_step) = chunking
if (chunk_size is None):
chunk_size = 0
assert isinstance(chunk_size, (int, dict))
chunk_size = NumbersDict(chunk_size)
assert (chunk_size.min_value() > 0), 'chunk size must not be negative'
if (chunk_step in (None, 0)):
chunk_step = chunk_size
assert isinstance(chunk_step, (int, dict, NumbersDict))
chunk_step = NumbersDict(chunk_step)
assert (sorted(chunk_step.keys()) == sorted(chunk_size.keys()))
assert (chunk_step.min_value() > 0), 'chunking step must be positive'
return (chunk_size, chunk_step)
|
class BatchingIterDataPipe(torch.utils.data.IterDataPipe):
"\n Converts a dataset yielding sequences (dict data_key -> array per sequence) into a dataset yielding lists of\n these sequences, i.e. batches.\n Sequences are grouped in-order according to the 'max_tokens' and 'max_seqs' batch size\n limits.\n Note, that batches are not yet merged into a single (padded) data array here, this happens in 'collate_batch()'.\n "
def __init__(self, dataset: torch.utils.data.IterableDataset, batch_size=1, max_seqs=None):
'\n :param dataset: dataset to apply batching to\n :param int|dict[str,int]|None batch_size: Maximum number of time steps (e.g. audio frames / words) in one\n batch (padding included).\n If given as a dict data_key -> value, sets different individual limits per data key.\n If None, no limit.\n :param int|None max_seqs: maximum number of sequences in a batch,\n None means unlimited (also -1 to match TF backend)\n '
super().__init__()
self._dataset = dataset
self._max_batch_size = NumbersDict((sys.maxsize if (batch_size is None) else batch_size))
self._max_seqs = (sys.maxsize if ((max_seqs is None) or (max_seqs == (- 1))) else max_seqs)
assert (self._max_batch_size.min_value() > 0)
assert (self._max_seqs > 0)
def __iter__(self):
'\n :return: generator providing batches in the form of lists of sequences, where each sequence is a dict\n data_key -> data_array.\n :rtype: Iterable[list[dict[str, numpy.ndarray]]]\n '
current_batch = []
current_max_sequence_lengths = NumbersDict(0)
for data_dict in self._dataset:
if (len(current_batch) == self._max_seqs):
(yield current_batch)
current_batch = []
current_max_sequence_lengths = NumbersDict(0)
sequence_lengths = NumbersDict({data_key: data.shape[0] for (data_key, data) in data_dict.items() if data.shape})
max_sequence_lengths_if_included = NumbersDict.max([current_max_sequence_lengths, sequence_lengths])
batch_size_if_included = (max_sequence_lengths_if_included * (len(current_batch) + 1))
if (current_batch and batch_size_if_included.any_compare(self._max_batch_size, (lambda a, b: (a > b)))):
(yield current_batch)
current_batch = [data_dict]
current_max_sequence_lengths = sequence_lengths
else:
current_batch.append(data_dict)
current_max_sequence_lengths = max_sequence_lengths_if_included
if current_batch:
(yield current_batch)
|
class LenFilterDataPipe(torch.utils.data.IterDataPipe):
'\n Removes sequences which are either too long or too short from a dataset\n Returns dataset yielding list of data lengths within the defined range\n '
def __init__(self, dataset: torch.utils.data.IterableDataset, min_seq_length: Union[(int, NumbersDict)]=None, max_seq_length: Union[(int, NumbersDict)]=None):
'\n :param dataset: dataset to apply the filter to\n :param min_seq_length: minimum sequence length either in general or per data_key via dict\n :param max_seq_length: maximum sequence length either in general or per data_key via dict\n '
super().__init__()
self._dataset = dataset
self._min_seq_length = NumbersDict((0 if (min_seq_length is None) else min_seq_length))
self._max_seq_length = NumbersDict((sys.maxsize if (max_seq_length is None) else max_seq_length))
def __iter__(self):
'\n :return: generator providing filtered data where each sequence is a dict\n data_key -> data_array.\n :rtype: Iterable[dict[str, numpy.ndarray]]\n '
for data_dict in self._dataset:
sequence_lengths = NumbersDict({data_key: data.shape[0] for (data_key, data) in data_dict.items() if data.shape})
if sequence_lengths.any_compare(self._min_seq_length, (lambda a, b: (a < b))):
continue
if sequence_lengths.any_compare(self._max_seq_length, (lambda a, b: (a > b))):
continue
(yield data_dict)
def __getitem__(self, index):
raise Exception(f'{self.__class__.__name__}.__getitem__ not supported')
|
class ReturnnDatasetResetDefaultEpochCounterCallback():
'\n Default for reset_callback.\n Has an internal counter for the epoch, starting at epoch 1 (RETURNN convention).\n '
def __init__(self, dataset: ReturnnDataset):
self.dataset = dataset
self.epoch = 0
def __call__(self):
self.epoch += 1
self.dataset.init_seq_order(epoch=self.epoch)
|
class ReturnnDatasetResetMpSharedEpochCallback():
'\n Can be used as reset_callback.\n '
def __init__(self, dataset: ReturnnDataset, epoch_mp_shared: torch.multiprocessing.Value):
self.dataset = dataset
self.epoch_mp_shared = epoch_mp_shared
def __call__(self):
epoch = self.epoch_mp_shared.value
self.dataset.init_seq_order(epoch=epoch)
|
class ReturnnDatasetIterDataPipe(torch.utils.data.IterDataPipe):
'\n Converts a RETURNN dataset into a PyTorch IterableDataset.\n '
def __init__(self, returnn_dataset: ReturnnDataset, *, reset_callback: Optional[ResetCallbackT]=None):
'\n :param returnn_dataset: dataset to be wrapped\n :param reset_callback: callback function to be called when the dataset is reset, e.g. to init the epoch.\n ReturnnDatasetResetDefaultEpochCounterCallback(returnn_dataset) is the default.\n '
self._dataset = returnn_dataset
if (not reset_callback):
reset_callback = ReturnnDatasetResetDefaultEpochCounterCallback(returnn_dataset)
self._reset_callback = reset_callback
def reset(self):
'\n :return:\n '
self._reset_callback()
def __iter__(self) -> Iterable[Dict[(str, numpy.ndarray)]]:
'\n :return: generator providing data samples in the form of a dict data_key -> data\n '
data_keys = self._dataset.get_data_keys()
seq_index = 0
while self._dataset.is_less_than_num_seqs(seq_index):
self._dataset.load_seqs(seq_index, (seq_index + 1))
data = {data_key: self._dataset.get_data(seq_index, data_key) for data_key in data_keys}
data['seq_tag'] = str_to_numpy_array(self._dataset.get_tag(seq_index))
(yield data)
seq_index += 1
def __getitem__(self, index):
raise Exception(f'{self.__class__.__name__}.__getitem__ not supported')
|
class ReturnnDatasetPerEpochMapDataPipe(torch.utils.data.MapDataPipe):
'\n Converts a RETURNN dataset into a PyTorch map-style Dataset.\n '
def __int__(self, returnn_dataset: ReturnnDataset, *, reset_callback: Optional[ResetCallbackT]=None):
'\n :param returnn_dataset: dataset to be wrapped\n :param reset_callback: callback function to be called when the dataset is reset, e.g. to init the epoch.\n ReturnnDatasetResetDefaultEpochCounterCallback(returnn_dataset) is the default.\n '
assert (returnn_dataset.have_corpus_seq_idx() and returnn_dataset.have_get_corpus_seq())
self._dataset = returnn_dataset
if (not reset_callback):
reset_callback = ReturnnDatasetResetDefaultEpochCounterCallback(returnn_dataset)
self._reset_callback = reset_callback
def reset(self):
'\n :return:\n '
self._reset_callback()
def __len__(self):
'\n :return: number of data samples in the dataset\n :rtype: int\n '
return self._dataset.num_seqs
def __getitem__(self, index):
'\n :param int index:\n :return: data sample in the form of a dict data_key -> data\n :rtype: dict[str, numpy.ndarray]\n '
corpus_seq_idx = self._dataset.get_corpus_seq_idx(index)
seq = self._dataset.get_corpus_seq(corpus_seq_idx)
return seq.features
|
class ReturnnDatasetFullMapDataPipe(torch.utils.data.MapDataPipe):
'\n Converts a RETURNN dataset into a PyTorch map-style Dataset.\n This is over the full dataset, using the default ordering.\n RETURNN-dataset-side sorting/shuffling is not supported here.\n Sorting/shuffling is intended to be done in the further PyTorch data pipeline.\n '
def __int__(self, returnn_dataset: ReturnnDataset):
'\n :param returnn_dataset: dataset to be wrapped\n '
assert returnn_dataset.have_get_corpus_seq()
self._dataset = returnn_dataset
def __len__(self):
'\n :return: number of data samples in the dataset\n :rtype: int\n '
return self._dataset.get_total_num_seqs()
def __getitem__(self, index):
'\n :param int index:\n :return: data sample in the form of a dict data_key -> data\n :rtype: dict[str, numpy.ndarray]\n '
seq = self._dataset.get_corpus_seq(index)
return seq.features
|
def tensor_dict_numpy_to_torch_(x: TensorDict):
'\n :func:`tensor_numpy_to_torch_` on all values\n '
for v in x.data.values():
tensor_numpy_to_torch_(v)
|
def tensor_numpy_to_torch_(x: Tensor[numpy.ndarray]):
'\n torch.from_numpy() on Tensor, including dims\n '
if ((x.raw_tensor is None) or isinstance(x.raw_tensor, torch.Tensor)):
pass
else:
assert isinstance(x.raw_tensor, numpy.ndarray)
x.raw_tensor = torch.from_numpy(x.raw_tensor)
for dim in x.dims:
dim.transform_tensors(tensor_numpy_to_torch_)
|
def tensor_dict_torch_to_numpy_(x: TensorDict):
'\n :func:`tensor_torch_to_numpy_` on all values\n '
for v in x.data.values():
tensor_torch_to_numpy_(v)
|
def tensor_torch_to_numpy_(x: Tensor[torch.Tensor]):
'\n .numpy() on Tensor, including dims\n '
if ((x.raw_tensor is None) or isinstance(x.raw_tensor, numpy.ndarray)):
pass
else:
assert isinstance(x.raw_tensor, torch.Tensor)
x.raw_tensor = x.raw_tensor.detach().cpu().numpy()
for dim in x.dims:
dim.transform_tensors(tensor_torch_to_numpy_)
|
class DistributedContext():
'\n This class setups some helper functions for torch distributed training\n '
def __init__(self, options: Dict[(str, Any)]):
import torch.distributed as dist
self._opts = options
dist.init_process_group(backend=self._opts.get('backend', None))
self._local_rank = int(os.environ['LOCAL_RANK'])
self._local_size = int(os.environ['LOCAL_WORLD_SIZE'])
self._rank = dist.get_rank()
self._size = dist.get_world_size()
_logger.info(('Torch distributed initialized. Hostname %s, pid %i, rank %i / size %i, local rank %s / local size %s.' % (socket.gethostname(), os.getpid(), self._rank, self._size, self._local_rank, self._local_size)))
self._reduce_type = self._opts.get('reduce_type', 'grad')
self._param_sync_step: Optional[int] = self._opts.get('param_sync_step', None)
if (self._reduce_type == 'param'):
assert (isinstance(self._param_sync_step, int) and (self._param_sync_step > 0)), f'reduce_type param: param_sync_step must be a positive int, got {self._param_sync_step!r} ({type(self._param_sync_step).__name__})'
_logger.info(f'reduce_type param: param_sync_step {self._param_sync_step}')
elif (self._reduce_type == 'grad'):
_logger.info('reduce_type grad')
else:
raise ValueError(f'invalid reduce_type {self._reduce_type!r}')
def local_rank(self) -> int:
'local rank'
return self._local_rank
def local_size(self) -> int:
'local size'
return self._local_size
def rank(self) -> int:
'global rank'
return self._rank
def size(self) -> int:
'global size'
return self._size
def get_param_sync_step(self) -> Optional[int]:
'param sync step'
return self._param_sync_step
def maybe_make_distributed_module(self, module: torch.nn.Module) -> Optional[DistributedDataParallel]:
'\n Maybe make a wrapped distributed module.\n\n :param module: original module\n :return: potentially wrapped module\n '
if (self._reduce_type == 'param'):
return None
cls = self._opts.get('class', DistributedDataParallel)
if (cls is not DistributedDataParallel):
_logger.warning(f'Using custom class {cls} instead of DistributedDataParallel, might be unsupported.')
kwargs = self._opts.get('options', {})
return cls(module=module, device_ids=[self.local_rank()], **kwargs)
def step_after_param_update(self, *, module: torch.nn.Module, epoch_step_idx: int):
'one train step'
if ((self._reduce_type == 'param') and ((epoch_step_idx % self._param_sync_step) == (self._param_sync_step - 1))):
_sync_params_avg(module=module, sync_on_cpu=self._opts.get('sync_on_cpu', False))
|
def get_ctx(config=None) -> Optional[DistributedContext]:
'\n :param Config|None config:\n :returns: the global context if Torch distributed is enabled, or None otherwise.\n If we did not setup the context yet, it will automatically create it.\n '
global _is_set_up, _ctx
if _is_set_up:
return _ctx
if (not config):
from returnn.config import get_global_config
config = get_global_config(raise_exception=False)
if (not config):
return None
_is_set_up = True
opts = config.typed_value('torch_distributed')
if (opts is None):
return None
assert isinstance(opts, dict)
_ctx = DistributedContext(opts)
if _ctx.get_param_sync_step():
accum_grad_multiple_step = config.int('accum_grad_multiple_step', 1)
assert ((_ctx.get_param_sync_step() % accum_grad_multiple_step) == 0), f'{_ctx}: param_sync_step {_ctx.get_param_sync_step()} must be a multiple of accum_grad_multiple_step {accum_grad_multiple_step}'
return _ctx
|
def _sync_params_avg(*, module: torch.nn.Module, sync_on_cpu: bool=False):
import torch.distributed as dist
if (dist.get_backend() == 'gloo'):
reduce_op = dist.ReduceOp.SUM
elif hasattr(dist.ReduceOp, 'AVG'):
reduce_op = dist.ReduceOp.AVG
else:
reduce_op = dist.ReduceOp.SUM
old_dev = None
if sync_on_cpu:
old_dev = next(iter(module.parameters())).device
module.to(torch.device('cpu'))
reduce_op = dist.ReduceOp.SUM
try:
for param in module.parameters():
dist.all_reduce(param.data, op=reduce_op)
if (reduce_op == dist.ReduceOp.SUM):
param.data /= dist.get_world_size()
finally:
if old_dev:
module.to(old_dev)
|
class Engine(EngineBase):
'\n PyTorch engine\n '
def __init__(self, config: Config):
'\n :param config:\n '
super(Engine, self).__init__(config=config)
rf.select_backend_torch()
if (util.BackendEngine.selected_engine is None):
util.BackendEngine.select_engine(default_fallback_engine=util.BackendEngine.Torch, config=self.config)
self.model_filename = self.config.value('model', None)
self._mp_manager = torch.multiprocessing.Manager()
self._epoch_mp_shared = self._mp_manager.Value('i', 0)
self.train_dataset = None
self.eval_datasets = {}
self.extern_data = None
self._train_dataloader = None
self._eval_dataloaders = {}
self._start_epoch = None
self._final_epoch = None
self._min_seq_length = (config.typed_value('min_seq_length', None) or config.int('min_seq_length', None))
self._max_seq_length = (config.typed_value('max_seq_length', None) or config.int('max_seq_length', None))
self._orig_model = None
self._pt_model = None
self._train_step_func = None
self._forward_step_func = self.config.typed_value('forward_step')
self._forward_step_expected_outputs = None
if (self.config.typed_value('model_outputs') is not None):
self._forward_step_expected_outputs = TensorDict()
self._forward_step_expected_outputs.update(self.config.typed_value('model_outputs'), auto_convert=True)
self._save_model_epoch_interval = 1
self._updater = None
self._use_autocast = False
self._autocast_dtype = None
self._grad_scaler = None
dev_ = get_device_from_config_opt(config.value('device', None))
self._device = dev_.result
print('Using device:', self._device, f"({(dev_.reason or '?')})", file=log.v2)
self._torch_distributed_ctx = None
self._ddp_pt_model = None
self._accum_grad_multiple_step = config.int('accum_grad_multiple_step', 1)
if (config.typed_value('torch_distributed') is not None):
self._torch_distributed_ctx = dist_get_ctx(config=config)
local_rank = self._torch_distributed_ctx.local_rank()
print(f'Start running torch distributed training on local rank {local_rank}.', file=log.v2)
assert (self._device == 'cuda'), f'torch distributed: unexpected device {self._device!r}'
self._device = f'cuda:{local_rank}'
if ((self._device == 'cuda') or self._device.startswith('cuda:')):
diagnose_gpu.print_using_cuda_device_report(self._device, file=log.v2)
if self._device.startswith('cuda:'):
torch.cuda.set_device(self._device)
self._log_memory_usage = config.bool('torch_log_memory_usage', False)
self._reset_dev_memory_caches = config.bool('reset_dev_memory_caches', False)
amp_options = self.config.opt_typed_value('torch_amp')
grad_scaler_opts = self.config.typed_value('grad_scaler', NotSpecified)
if (amp_options is not None):
self._use_autocast = True
if isinstance(amp_options, dict):
amp_options = util.CollectionReadCheckCovered(amp_options)
dtype = amp_options.get('dtype', None)
grad_scaler_opts = amp_options.get('grad_scaler', grad_scaler_opts)
amp_options.assert_all_read()
elif isinstance(amp_options, str):
dtype = amp_options
else:
raise TypeError(f'Invalid type for torch_amp: {type(amp_options)}')
if isinstance(dtype, str):
dtype = getattr(torch, dtype)
assert (isinstance(dtype, torch.dtype) or (dtype is None))
print(f'Using autocast (automatic mixed precision (AMP)) with dtype {dtype}', file=log.v2)
self._autocast_dtype = dtype
if (grad_scaler_opts is NotSpecified):
grad_scaler_opts = ({} if self._use_autocast else None)
if (grad_scaler_opts is not None):
assert isinstance(grad_scaler_opts, dict)
print('Using GradScaler with options:', grad_scaler_opts, file=log.v2)
self._grad_scaler = amp.GradScaler(**grad_scaler_opts)
def init_network_from_config(self, config: Optional[Config]=None):
'init model'
assert ((config is self.config) or (not config))
super().init_network_from_config(config=config)
extern_data_dict = self.config.typed_value('extern_data')
assert extern_data_dict, 'extern_data is not specified in config'
self.extern_data = extern_data_util.extern_data_template_from_config_opts(extern_data_dict)
self._load_model()
def init_train_from_config(self, config: Optional[Config]=None, train_data: Optional[Dataset]=None, dev_data: Optional[Dataset]=None, eval_data: Optional[Dataset]=None):
'\n :param config:\n :param train_data:\n :param dev_data:\n :param eval_data:\n '
assert ((config is self.config) or (not config))
config = self.config
super().init_train_from_config(config=config)
self.train_dataset = train_data
self.eval_datasets.clear()
if dev_data:
self.eval_datasets['dev'] = dev_data
if eval_data:
self.eval_datasets['eval'] = eval_data
if config.has('eval_datasets'):
for (dataset_name, dataset_opts) in config.typed_value('eval_datasets', {}).items():
self.eval_datasets[dataset_name] = init_dataset(dataset_opts, default_kwargs={'name': dataset_name})
self._train_dataloader = (self._create_data_loader(train_data) if train_data else None)
for (dataset_name, dataset) in self.eval_datasets.items():
self._eval_dataloaders[dataset_name] = self._create_data_loader(dataset)
self._start_epoch = self.get_train_start_epoch(self.config)
self._final_epoch = self.config_get_final_epoch(self.config)
self.init_network_from_config(config=config)
self._save_model_epoch_interval = config.int('save_interval', 1)
if self._torch_distributed_ctx:
self._ddp_pt_model = self._torch_distributed_ctx.maybe_make_distributed_module(module=_WrappedModuleRunStep(module=self._pt_model, engine=self))
self._updater = Updater(config=self.config, network=self._pt_model, device=self._device, initial_learning_rate=self.learning_rate)
self._updater.create_optimizer()
if (self._start_epoch > 1):
self._load_optimizer(epoch=(self._start_epoch - 1))
self._train_step_func = self.config.typed_value('train_step')
assert self._train_step_func, 'train_step not defined'
def train(self):
'\n Main training loop.\n '
assert (self._pt_model is not None), 'Model not initialized, call init_train_from_config().'
assert (self.train_dataset is not None), 'Train dataset missing, call init_train_from_config() with train_data.'
if (self._start_epoch > self._final_epoch):
print(f'Already trained until final epoch {self._final_epoch}, nothing to do.', file=log.v3)
return
print(f'Starting training at epoch {self._start_epoch}, global train step {self.global_train_step}', file=log.v3)
start_epoch = self._start_epoch
self.epoch = (start_epoch - 1)
self._check_epoch_missing_eval()
while ((self.epoch + 1) <= self._final_epoch):
self.epoch += 1
self._epoch_mp_shared.value = self.epoch
epoch_start_global_train_step = self.global_train_step
epoch_num_tries = 0
epoch = self.epoch
while True:
epoch_num_tries += 1
if (epoch_num_tries > 1):
print(f'Retry of train epoch {self.epoch}, global train step {self.global_train_step}', file=log.v3)
self.init_train_epoch()
try:
self.train_epoch()
except Exception as exc:
if self._handle_train_exception(exc, epoch_num_tries=epoch_num_tries, epoch_start_global_train_step=epoch_start_global_train_step, start_epoch=start_epoch):
self._maybe_reset_dev_memory_caches(force=True)
self._reset_train_epoch()
assert ((self.epoch == epoch) and (self.global_train_step == epoch_start_global_train_step))
continue
raise
break
print(f'Finished training at epoch {self.epoch}, global train step {self.global_train_step}', file=log.v3)
def _handle_train_exception(self, exc: Exception, *, epoch_num_tries: int, epoch_start_global_train_step: int, start_epoch: int) -> bool:
'\n :return: True if we should retry, False if reraise\n '
from returnn.util.better_exchook import get_func_from_code_object, iter_traceback
print(f'{type(exc).__name__}: {exc}', file=log.v1)
module_names_by_id = {}
for (name, mod) in self._orig_model.named_modules():
if (id(mod) not in module_names_by_id):
module_names_by_id[id(mod)] = (name or '(root)')
exc_ext = []
for frame in iter_traceback(exc.__traceback__):
frame_self = frame.f_locals.get('self')
if isinstance(frame_self, (torch.nn.Module, rf.Module)):
func = get_func_from_code_object(frame.f_code, frame=frame)
if (func and func.__name__ and func.__name__.startswith('_')):
continue
func_name = ((func and func.__qualname__) or type(frame_self).__name__)
exc_ext.append(f"({func_name}) {module_names_by_id.get(id(frame_self), '(unknown)')}")
if (not exc_ext):
exc_ext.append('(No module call frames.)')
if ((len(exc.args) == 1) and isinstance(exc.args[0], str)):
exc.args = ('\n'.join(([exc.args[0], '', 'Module call stack:'] + exc_ext)),)
else:
print('Module call stack:\n', '\n'.join(exc_ext), file=log.v3)
if isinstance(exc, torch.cuda.OutOfMemoryError):
if ((epoch_num_tries <= 3) and (self.epoch > start_epoch) and (self.global_train_step > epoch_start_global_train_step) and (self._save_model_epoch_interval == 1) and (not self._torch_distributed_ctx)):
print('Retry after OOM', file=log.v3)
return True
return False
def init_train_epoch(self):
'\n init train (sub)epoch. LR etc\n '
self.learning_rate = self.learning_rate_control.get_learning_rate_for_epoch(self.epoch)
self._updater.set_learning_rate(self.learning_rate)
self._updater.set_current_train_step(global_train_step=self.global_train_step, epoch=self.epoch)
self.learning_rate_control.epoch_data[self.epoch].meta.update({'global_train_step': self.global_train_step, 'effective_learning_rate': self._updater.get_effective_learning_rate()})
def _reset_train_epoch(self):
epoch = self.epoch
self._load_model()
assert (epoch == self.epoch)
self._load_optimizer(epoch=(epoch - 1))
def _maybe_reset_dev_memory_caches(self, *, force: bool=False):
if ((not force) and (not self._reset_dev_memory_caches)):
return
gc.collect()
torch.cuda.empty_cache()
def _reset_dev_memory_stats(self):
dev = torch.device(self._device)
if (dev.type == 'cuda'):
torch.cuda.reset_peak_memory_stats(dev)
self._maybe_report_dev_memory_stats()
def _maybe_report_dev_memory_stats(self):
if (not self._log_memory_usage):
return
dev = torch.device(self._device)
if (dev.type == 'cuda'):
stats = [f'alloc cur {util.human_bytes_size(torch.cuda.memory_allocated(dev))}', f'alloc peak {util.human_bytes_size(torch.cuda.max_memory_allocated(dev))}', f'reserved cur {util.human_bytes_size(torch.cuda.memory_reserved(dev))}', f'reserved peak {util.human_bytes_size(torch.cuda.max_memory_reserved(dev))}']
print(f'Memory usage ({self._device}):', ' '.join(stats), file=log.v1)
def train_epoch(self):
'\n train one (sub)epoch\n '
print('start', self.get_epoch_str(), 'global train step', self.global_train_step, 'with effective learning rate', self._updater.get_effective_learning_rate(), '...', file=log.v3)
accumulated_losses_dict = NumbersDict()
accumulated_inv_norm_factors_dict = NumbersDict()
step_idx = 0
epoch_start_time = time.time()
data_iter = iter(self._train_dataloader)
elapsed_computation_time = 0
self._pt_model.train()
self._maybe_reset_dev_memory_caches()
self._reset_dev_memory_stats()
if self.config.bool('debug_shell_before_train_loop', False):
print('debug_shell_before_train_loop', file=log.v1)
debug_shell(user_ns=locals(), user_global_ns=globals(), exit_afterwards=False)
while True:
with torch.no_grad():
extern_data_raw = next(data_iter, None)
step_begin_time = time.time()
_has_data = torch.tensor([(extern_data_raw is not None)], dtype=torch.int8)
if self._torch_distributed_ctx:
torch.distributed.all_reduce(_has_data, op=torch.distributed.ReduceOp.MIN)
if (not _has_data[0]):
break
if ((step_idx % self._accum_grad_multiple_step) == 0):
self._updater.get_optimizer().zero_grad()
extern_data = extern_data_util.raw_dict_to_extern_data(extern_data_raw, extern_data_template=self.extern_data, device=self._device)
self._run_step(extern_data, train_flag=True, train_func=True)
train_ctx = rf.get_run_ctx()
total_loss = train_ctx.total_loss()
losses_dict = NumbersDict({name: (float(loss.get_summed_loss().raw_tensor.detach().cpu().numpy()) if (self._device != 'meta') else float('nan')) for (name, loss) in train_ctx.losses.items()})
inv_norm_factors_dict = NumbersDict({name: float(_to_raw(loss.get_inv_norm_factor())) for (name, loss) in train_ctx.losses.items()})
with (self._ddp_pt_model.no_sync() if ((self._ddp_pt_model is not None) and ((step_idx % self._accum_grad_multiple_step) != (self._accum_grad_multiple_step - 1))) else nullcontext()):
if (self._grad_scaler is not None):
self._grad_scaler.scale(total_loss.raw_tensor).backward()
else:
total_loss.raw_tensor.backward()
if ((step_idx % self._accum_grad_multiple_step) == (self._accum_grad_multiple_step - 1)):
self._updater.step(grad_scaler=self._grad_scaler)
if self._torch_distributed_ctx:
self._torch_distributed_ctx.step_after_param_update(module=self._pt_model, epoch_step_idx=step_idx)
step_duration = (time.time() - step_begin_time)
elapsed_computation_time += step_duration
accumulated_losses_dict += losses_dict
accumulated_inv_norm_factors_dict += inv_norm_factors_dict
_print_process(f'ep {self.epoch} train', step=step_idx, eval_info=dict((losses_dict / inv_norm_factors_dict)), step_duration=step_duration, log_memory_usage_device=(self._device if self._log_memory_usage else None))
step_idx += 1
self.global_train_step += 1
self._updater.set_current_train_step(global_train_step=self.global_train_step, epoch=self.epoch)
elapsed = (time.time() - epoch_start_time)
elapsed_computation_percentage = (elapsed_computation_time / elapsed)
print(('Trained %i steps, %s elapsed (%.1f%% computing time)' % (step_idx, hms(elapsed), (elapsed_computation_percentage * 100.0))), file=log.v3)
self.learning_rate_control.epoch_data[self.epoch].meta.update({'epoch_num_train_steps': step_idx, 'epoch_train_time_secs': round(elapsed)})
accumulated_losses_dict = (accumulated_losses_dict / accumulated_inv_norm_factors_dict)
self.learning_rate_control.set_epoch_error(self.epoch, {f'train_loss_{k}': v for (k, v) in accumulated_losses_dict.items()})
if self._do_save():
self.learning_rate_control.save()
print(f'Total train loss:', _format_score(dict(accumulated_losses_dict)), file=log.v3)
self._maybe_report_dev_memory_stats()
if (((self.epoch % self._save_model_epoch_interval) == 0) or (self.epoch == self._final_epoch)):
if self.model_filename:
self._save_model()
self._save_optimizer()
else:
print('Not saving model, `model` not specified.', file=log.v3)
self.eval_model()
if self.config.bool_or_other('cleanup_old_models', None):
self.cleanup_old_models()
def _do_save(self):
if (self._device == 'meta'):
return False
if (not super()._do_save()):
return False
return True
def eval_model(self, *, skip_already_evaluated: bool=False):
'\n Runs model on all eval datasets and calculates the loss.\n '
self._pt_model.eval()
self._maybe_reset_dev_memory_caches()
self._reset_dev_memory_stats()
eval_dump_str = []
score_keys = None
error_keys = None
for (dataset_name, dataset) in self.eval_datasets.items():
if (skip_already_evaluated and self._is_dataset_evaluated(name=dataset_name)):
continue
data_loader = self._eval_dataloaders[dataset_name]
if (self._torch_distributed_ctx and (self._torch_distributed_ctx.rank() != 0)):
iter(data_loader)
continue
print(f'Evaluating dataset {dataset_name!r}', file=log.v3)
accumulated_losses_dict = NumbersDict()
accumulated_inv_norm_factors_dict = NumbersDict()
step_idx = 0
with torch.no_grad():
for extern_data_raw in data_loader:
extern_data = extern_data_util.raw_dict_to_extern_data(extern_data_raw, extern_data_template=self.extern_data, device=self._device)
self._run_step(extern_data, train_func=True)
train_ctx = rf.get_run_ctx()
if (score_keys is None):
score_keys = [name for (name, loss) in train_ctx.losses.items() if (not loss.as_error)]
error_keys = [name for (name, loss) in train_ctx.losses.items() if loss.as_error]
losses_dict = NumbersDict({name: (float(loss.get_summed_loss().raw_tensor.detach().cpu().numpy()) if (self._device != 'meta') else float('nan')) for (name, loss) in train_ctx.losses.items()})
inv_norm_factors_dict = NumbersDict({name: float(_to_raw(loss.get_inv_norm_factor())) for (name, loss) in train_ctx.losses.items()})
accumulated_losses_dict += losses_dict
accumulated_inv_norm_factors_dict += inv_norm_factors_dict
_print_process(f'ep {self.epoch} {dataset_name} eval', step=step_idx, eval_info=dict((losses_dict / inv_norm_factors_dict)), log_memory_usage_device=(self._device if self._log_memory_usage else None))
step_idx += 1
assert (step_idx > 0), f'No data in dataset {dataset_name!r}.'
accumulated_losses_dict = (accumulated_losses_dict / accumulated_inv_norm_factors_dict)
self.learning_rate_control.set_epoch_error(self.epoch, {f'{dataset_name}_loss_{k}': v for (k, v) in accumulated_losses_dict.items()})
if self._do_save():
self.learning_rate_control.save()
eval_dump_str += [('%s: score %s error %s' % (dataset_name, _format_score({name: accumulated_losses_dict[name] for name in score_keys}), _format_score({name: accumulated_losses_dict[name] for name in error_keys})))]
if ((not self._torch_distributed_ctx) or (self._torch_distributed_ctx.rank() == 0)):
print((' '.join(eval_dump_str) if eval_dump_str else '(No evaluations.)'), file=log.v1)
self._maybe_report_dev_memory_stats()
def _create_data_loader(self, dataset: Dataset) -> DataLoader:
'\n :param dataset: RETURNN dataset\n :return: PyTorch data loader created from given RETURNN dataset\n '
dataset_reset = returnn_dataset_wrapper.ReturnnDatasetResetMpSharedEpochCallback(dataset=dataset, epoch_mp_shared=self._epoch_mp_shared)
wrapped_dataset = returnn_dataset_wrapper.ReturnnDatasetIterDataPipe(dataset, reset_callback=dataset_reset)
if ((self._min_seq_length is not None) or (self._max_seq_length is not None)):
wrapped_dataset = data_pipeline.LenFilterDataPipe(wrapped_dataset, min_seq_length=self._min_seq_length, max_seq_length=self._max_seq_length)
chunking = self.config.typed_value('chunking', None)
min_chunk_size = self.config.typed_value('min_chunk_size', 0)
if chunking:
wrapped_dataset = data_pipeline.ChunkingIterDataPipe(wrapped_dataset, chunking, min_chunk_size=min_chunk_size)
assert (self.config.typed_value('batch_size') is not None), 'batch_size not defined in config'
batch_size = self.config.typed_value('batch_size', 1)
max_seqs = self.config.int('max_seqs', (- 1))
batches_dataset = data_pipeline.BatchingIterDataPipe(wrapped_dataset, batch_size=batch_size, max_seqs=max_seqs)
loader_opts = (self.config.typed_value('torch_dataloader_opts') or {})
assert isinstance(loader_opts, dict), f'config torch_dataloader_opts, expected dict, got {type(loader_opts)}'
if loader_opts.get('num_workers'):
loader_opts.setdefault('persistent_workers', True)
loader_opts.setdefault('worker_init_fn', _data_loader_worker_init_func)
return DataLoader(batches_dataset, collate_fn=data_pipeline.collate_batch, batch_size=None, shuffle=None, sampler=None, batch_sampler=None, **loader_opts)
def _run_step(self, extern_data: TensorDict, *, train_flag: bool=False, train_func: bool, _inside_wrapped: bool=False):
'\n :param extern_data: model inputs for the step\n :return: Nothing, all outputs are written to the run context (:func:`rf.get_run_ctx`).\n '
if ((self._ddp_pt_model is not None) and (not _inside_wrapped)):
self._ddp_pt_model(extern_data=extern_data, train_flag=train_flag, train_func=train_func)
return
if train_func:
assert (self._train_step_func is not None)
rf.init_train_step_run_ctx(train_flag=train_flag, step=self.global_train_step, epoch=self.epoch)
else:
assert (self._forward_step_func is not None), 'define forward_step in the config'
rf.init_forward_step_run_ctx(expected_outputs=self._forward_step_expected_outputs, step=self.global_train_step, epoch=self.epoch)
with (autocast(device_type=self._device.split(':')[0], dtype=self._autocast_dtype) if self._use_autocast else nullcontext()), rf.set_default_device_ctx(self._device):
sentinel_kw = {('__fwd_compatible_random_arg_%i' % int((random() * 100))): None}
if train_func:
self._train_step_func(model=self._orig_model, extern_data=extern_data, **sentinel_kw)
else:
self._forward_step_func(model=self._orig_model, extern_data=extern_data, **sentinel_kw)
def _load_model(self):
'\n Sets self._model to a torch.nn.Module.\n '
(epoch, model_epoch_filename) = self.get_epoch_model(self.config)
step = None
filename = None
checkpoint_state = None
if model_epoch_filename:
filename = (model_epoch_filename + util.get_model_filename_postfix())
print(('Load model %s' % (filename,)), file=log.v4)
checkpoint_state = torch.load(filename, map_location=self._device)
if (epoch is None):
epoch = checkpoint_state['epoch']
step = checkpoint_state['step']
print(f' epoch {epoch}, global train step {step}', file=log.v4)
step -= 1
is_training = (self.config.value('task', 'train') == 'train')
is_first_train_epoch = ((not epoch) and (is_training or (self.config.value('task', 'train') == 'initialize_model')))
if (not model_epoch_filename):
step = 0
epoch = (self._start_epoch or 1)
self._create_model(epoch=epoch, step=step)
if (checkpoint_state is not None):
(missing_keys, unexpected_keys) = self._pt_model.load_state_dict(checkpoint_state['model'], strict=False)
if missing_keys:
raise Exception('\n'.join([f'While loading model {filename}:', ('Unexpected key(s) in state_dict: ' + ', '.join(map(repr, unexpected_keys))), ('Missing key(s) in state_dict: ' + ', '.join(map(repr, missing_keys))), 'Any missing key is an error.']))
if unexpected_keys:
print((f'Note: While loading {filename}, unexpected key(s) in state_dict: ' + ', '.join(map(repr, unexpected_keys))), file=log.v4)
preload_from_files = self.config.typed_value('preload_from_files', {})
if preload_from_files:
for (preload_key, opts) in reversed(sorted(preload_from_files.items())):
assert (isinstance(opts, dict) and ('filename' in opts))
if opts.get('init_for_train', False):
if (not is_first_train_epoch):
continue
elif is_training:
continue
print(f"Pre-load weights for key '{preload_key}' from {opts['filename']}", file=log.v3)
preload_model_state = torch.load(opts['filename'])
if (opts.get('checkpoint_key', 'model') is not None):
preload_model_state = preload_model_state[opts.get('checkpoint_key', 'model')]
if opts.get('prefix', ''):
preload_model_state = {(opts['prefix'] + key): value for (key, value) in preload_model_state.items()}
ignore_params = opts.get('ignore_params', [])
ignore_params_prefixes = opts.get('ignore_params_prefixes', [])
for key in list(preload_model_state.keys()):
if ((key in ignore_params) or any([key.startswith(ignore_key) for ignore_key in ignore_params_prefixes])):
print(f'Ignoring variable {key}', file=log.v3)
preload_model_state.pop(key)
for (new_name, name_in_checkpoint) in opts.get('var_name_mapping', {}).items():
preload_model_state[new_name] = preload_model_state.pop(name_in_checkpoint)
(missing_keys, _) = self._pt_model.load_state_dict(preload_model_state, strict=False)
if (missing_keys and (not opts.get('ignore_missing', False))):
prefix_keys = [key for key in self._pt_model.state_dict() if key.startswith(opts.get('prefix', ''))]
missing_prefix_keys = set(prefix_keys).intersection(set(missing_keys))
assert (not missing_prefix_keys), f'Missing keys and ignore_missing=False: {missing_prefix_keys}'
print(f'Missing keys: {missing_keys}', file=log.v4)
del checkpoint_state
gc.collect()
self._pt_model.to(self._device)
if (model_epoch_filename and is_training):
epoch += 1
step += 1
self.epoch = epoch
self.global_train_step = step
def _create_model(self, *, epoch: int, step: int):
"\n Set up self._pt_model and self._orig_model\n by calling get_model from the config.\n\n Note on the `epoch` and `step` args:\n In case we are loading a model:\n This is the epoch and step of the model we are loading.\n In case we are initializing a model:\n Epoch starts at 1, step starts at 0.\n The step is the global train step, i.e. the number of train steps we have done so far over all epochs,\n so it does not reset to 0 at each epoch.\n In a checkpoint, we stored the epoch of the most recent epoch we just finished.\n We stored the global train step after we already incremented it (that's why you have step -= 1 above).\n The checkpoint is always stored when we just have finished the epoch.\n\n :param epoch:\n :param step:\n "
random_seed = self.config.int('random_seed', 42)
random_seed = (((((epoch * 193939) + (step * 19937)) + (random_seed * 27644437)) + 479001599) % (2 ** 31))
rf.set_random_seed(random_seed)
get_model_func = self.config.typed_value('get_model')
assert get_model_func, 'get_model not defined in config'
sentinel_kw = {('__fwd_compatible_random_arg_%i' % int((random() * 100))): None}
model = get_model_func(epoch=epoch, step=step, **sentinel_kw)
self._orig_model = model
if isinstance(model, rf.Module):
self._pt_model = rf_module_to_pt_module(model)
elif isinstance(model, torch.nn.Module):
self._pt_model = model
else:
raise TypeError(f'get_model returned {model} of type {type(model)}, expected rf.Module or torch.nn.Module')
assert isinstance(self._pt_model, torch.nn.Module)
print('Model:', self._pt_model, file=log.v4)
num_params = sum([parameter.numel() for parameter in self._pt_model.parameters()])
print(f'net params #: {num_params}', file=log.v2)
def get_pt_model(self) -> Optional[torch.nn.Module]:
'\n :return: PyTorch Module. in case this is using RF, it will return the wrapped module\n '
return self._pt_model
def _save_model(self):
'\n Saves the state of self._model to file.\n '
if (not self._do_save()):
return
filename = (self.get_epoch_model_filename() + util.get_model_filename_postfix())
directory = os.path.dirname(filename)
if (not os.path.exists(directory)):
os.makedirs(directory, exist_ok=True)
print(('Save model under %s' % (filename,)), file=log.v4)
tmp_filename = (filename + '.tmp_write')
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
torch.save({'model': self._pt_model.state_dict(), 'epoch': self.epoch, 'step': self.global_train_step, 'effective_learning_rate': (self._updater.get_effective_learning_rate() if self._updater else None), 'returnn_version': returnn.__long_version__}, tmp_filename)
os.rename(tmp_filename, filename)
def get_pt_optimizer(self) -> Optional[torch.optim.Optimizer]:
'\n :return: PyTorch optimizer\n '
if (not self._updater):
return None
return self._updater.get_optimizer()
def _load_optimizer(self, *, epoch: int):
'\n Loads a torch.optim.Optimizer from disk and uses it as the optimizer.\n This function is a wrapper to Updater.load_optimizer().\n '
filename = ((self.get_epoch_model_filename(epoch=epoch) + '.opt') + util.get_model_filename_postfix())
self._updater.load_optimizer(filename)
def _save_optimizer(self):
'\n Saves the optimizer state to a file.\n This function is a wrapper to Updater.save_optimizer().\n '
if (not self._do_save()):
return
filename = ((self.get_epoch_model_filename() + '.opt') + util.get_model_filename_postfix())
self._updater.save_optimizer(filename)
clean_epoch = (self.epoch - 2)
if (clean_epoch > 0):
filename = ((self.get_epoch_model_filename(epoch=clean_epoch) + '.opt') + util.get_model_filename_postfix())
if os.path.isfile(filename):
os.unlink(filename)
def forward_with_callback(self, *, dataset: Dataset, callback: ForwardCallbackIface):
'forward'
assert isinstance(dataset, Dataset)
assert isinstance(callback, ForwardCallbackIface)
epoch_start_time = time.time()
elapsed_computation_time = 0.0
self._pt_model.eval()
self._maybe_reset_dev_memory_caches()
self._reset_dev_memory_stats()
if dataset.supports_seq_order_sorting():
print('Dataset supports sorting, i.e. it will be sorted for optimal performance.', file=log.v3)
dataset.seq_ordering = 'sorted_reverse'
else:
print('Dataset does not support sorting, i.e. it will not be sorted for optimal performance.', file=log.v3)
assert ((self._min_seq_length is None) and (self._max_seq_length is None)), f'min_seq_length {self._min_seq_length}, max_seq_length {self._max_seq_length} not allowed, we want to keep all source sentences.'
data_loader = self._create_data_loader(dataset)
batch_dim = extern_data_util.get_batch_dim_from_extern_data(self.extern_data)
def _get_tensor_wo_batch_numpy(x: Tensor) -> Tensor:
if (batch_dim not in x.dims):
raise Exception(f'Expected {batch_dim} in {x}.')
if (x.dims.index(batch_dim) != 0):
x = x.copy_move_axis(x.dims.index(batch_dim), 0)
y_kwargs = x.copy_template_excluding_axis(0).get_kwargs()
y_kwargs['dims'] = [_get_dim_tag_wo_batch(dim) for dim in y_kwargs['dims']]
y = Tensor(**y_kwargs)
if (x.batch_ndim > 1):
raw = x.raw_tensor[batch_idx]
else:
raw = x.raw_tensor[batch_idx:(batch_idx + 1)].reshape(())
if isinstance(raw, torch.Tensor):
raw = raw.detach().cpu().numpy()
y.raw_tensor = raw
return y
def _get_dim_tag_wo_batch(dim: Dim) -> Dim:
'\n This is for dim tags with dyn_size_ext which include the batch_dim,\n e.g. the standard [batch] sizes.\n In the callback, we pass each sequence without the batch dim,\n so we must adapt the dim tags.\n '
if (not dim.dyn_size_ext):
return dim
if (batch_dim not in dim.dyn_size_ext.dims):
return dim
new_dim = dim.copy()
new_dim.dyn_size_ext = _get_tensor_wo_batch_numpy(dim.dyn_size_ext)
return new_dim
with torch.no_grad():
callback.init(model=self._orig_model)
step_idx = 0
for extern_data_raw in data_loader:
step_begin_time = time.time()
extern_data = extern_data_util.raw_dict_to_extern_data(extern_data_raw, extern_data_template=self.extern_data, device=self._device)
self._run_step(extern_data, train_func=False)
ctx = rf.get_run_ctx()
ctx.check_outputs_complete()
model_outputs = ctx.outputs
for batch_idx in range(batch_dim.get_dim_value()):
seq_tag = extern_data['seq_tag'].raw_tensor[batch_idx].item()
model_outputs_per_batch = TensorDict()
for (k, v) in model_outputs.data.items():
model_outputs_per_batch.data[k] = _get_tensor_wo_batch_numpy(v)
callback.process_seq(seq_tag=seq_tag, outputs=model_outputs_per_batch)
elapsed_computation_time += (time.time() - step_begin_time)
_print_process(f'ep {self.epoch} {dataset.name} forward', step=step_idx, eval_info=None, log_memory_usage_device=(self._device if self._log_memory_usage else None))
step_idx += 1
callback.finish()
elapsed = (time.time() - epoch_start_time)
elapsed_computation_percentage = (elapsed_computation_time / elapsed)
print(('Forward %i steps, %s elapsed (%.1f%% computing time)' % (step_idx, hms(elapsed), (elapsed_computation_percentage * 100.0))), file=log.v3)
self._maybe_report_dev_memory_stats()
@staticmethod
def delete_model(filename):
'\n :param str filename:\n :return: accumulated file-size in bytes of deleted files\n :rtype: int\n '
count_bytes = 0
assert os.path.exists((filename + '.pt'))
count_bytes += os.stat((filename + '.pt')).st_size
os.remove((filename + '.pt'))
if os.path.exists((filename + 'opt.pt')):
count_bytes += os.stat((filename + 'opt.pt')).st_size
os.remove((filename + 'opt.pt'))
assert (count_bytes > 0)
return count_bytes
def _check_epoch_missing_eval(self):
'\n Checks if there are outstanding tasks (eval_model) for the epoch,\n and executes them.\n '
if (not self.epoch):
return
if self.learning_rate_control.filename:
for (name, dataset) in self.eval_datasets.items():
if (not self._is_dataset_evaluated(name=name)):
print(f'Last epoch model not yet evaluated on {name}. Doing that now.', file=log.v3)
self.eval_model(skip_already_evaluated=True)
break
|
def _to_raw(n: Union[(int, float, Tensor)]):
if isinstance(n, (int, float)):
return n
if isinstance(n, Tensor):
return n.raw_tensor.detach().cpu().numpy()
raise TypeError(f'Unexpected {n} of type {type(n)}')
|
def _print_process(report_prefix: str, step: int, eval_info: Optional[Dict[(str, Any)]]=None, step_duration: Optional[float]=None, log_memory_usage_device: Optional[str]=None):
'\n Similar but simplified from TF engine _print_process.\n\n :param report_prefix:\n :param step:\n :param eval_info:\n :param step_duration:\n :param log_memory_usage_device: if given, will log memory usage (peak allocated memory)\n :return: nothing, will be printed to log\n '
if log.verbose[5]:
info = [report_prefix, ('step %i' % step)]
if eval_info:
info += [('%s %s' % (k, _format_value(v))) for (k, v) in sorted(eval_info.items())]
if log_memory_usage_device:
dev = torch.device(log_memory_usage_device)
if (dev.type == 'cuda'):
info += [f'mem_usage:{log_memory_usage_device} {util.human_bytes_size(torch.cuda.max_memory_allocated(dev))}']
if (step_duration is not None):
info += [('%.3f sec/step' % step_duration)]
print(', '.join(filter(None, info)), file=log.v5)
|
def _format_score(score: Dict[(str, float)]) -> str:
'\n Like the TF engine format_score.\n\n :param score:\n :return: score(s) as str\n '
if (not score):
return 'None'
if (len(score) == 1):
return _format_value(list(score.values())[0])
return ' '.join([('%s %s' % (key.split(':', 2)[(- 1)], _format_value(score[key]))) for key in sorted(score.keys())])
|
def _format_value(v: Any) -> str:
if isinstance(v, float):
if ((abs(v) > 1000.0) or (abs(v) < 0.001)):
return f'{v:.3e}'
else:
return f'{v:.3f}'
return str(v)
|
def _get_gpu_device() -> Optional[str]:
if torch.cuda.is_available():
return 'cuda'
if (hasattr(torch.backends, 'mps') and torch.backends.mps.is_available() and torch.backends.mps.is_built()):
return 'mps'
return None
|
def get_device_from_config_opt(device: Optional[str]) -> ResultWithReason[str]:
'\n :param device: as in config\n :return: resolved device\n '
if os.environ.get('PT_DEVICE'):
return ResultWithReason(os.environ['PT_DEVICE'], 'PT_DEVICE env var')
if (not device):
device = _get_gpu_device()
if device:
return ResultWithReason(device, 'GPU automatically selected')
return ResultWithReason('cpu', 'no GPU found')
reason = 'config'
if (device == 'gpu'):
device = _get_gpu_device()
if (not device):
reasons = diagnose_gpu.diagnose_no_gpu()
raise Exception(("No GPU device found, but config requested 'gpu' device.\n" + '\n'.join(reasons)))
reason = "'gpu' in config"
return ResultWithReason(device, reason)
|
def _data_loader_worker_init_func(worker_id: int):
if (sys.platform == 'linux'):
with open('/proc/self/comm', 'w') as f:
f.write(f'TDL worker {worker_id}')
|
class _WrappedModuleRunStep(torch.nn.Module):
'\n Wraps any Torch module (pure or RF),\n and the `forward` function calls the run step function (train_step or forward_step)\n and returns all produced raw tensors via the run context (losses or outputs) (:func:`rf.get_run_ctx`).\n This is useful to use the API of DistributedDataParallel and potentially other PyTorch modules.\n '
def __init__(self, *, module: torch.nn.Module, engine: Engine):
super().__init__()
self.module = module
self.engine = engine
def forward(self, *args, **kwargs):
'\n Call run step function (train_step or forward_step).\n\n :return: all produced raw tensors via the run context (:func:`rf.get_run_ctx`).\n '
self.engine._run_step(*args, **kwargs, _inside_wrapped=True)
res = {}
ctx = rf.get_run_ctx()
for (name, out) in ctx.outputs.data.items():
res[('output/' + name)] = out.raw_tensor
for (name, loss) in ctx.losses.items():
res[('loss/' + name)] = loss.loss.raw_tensor
return res
|
class TorchBackend(Backend[torch.Tensor]):
'\n PyTorch backend\n '
RawTensorType = torch.Tensor
@staticmethod
def executing_eagerly() -> bool:
'\n :return: whether we are executing eagerly\n '
return True
@staticmethod
def set_random_seed(seed: int):
'\n :param seed:\n '
torch.random.manual_seed(seed)
@staticmethod
def get_random_state() -> Dict[(str, bytes)]:
'\n :return: random state\n '
res = {'cpu': torch.random.get_rng_state().detach().cpu().numpy().tobytes()}
cuda_states = [state.detach().cpu().numpy().tobytes() for state in torch.cuda.get_rng_state_all()]
if (len(cuda_states) == 1):
res['cuda'] = cuda_states[0]
elif (len(cuda_states) > 1):
for (i, state) in enumerate(cuda_states):
res[f'cuda{i}'] = state
return res
@staticmethod
def set_random_state(state: Dict[(str, bytes)]):
'\n :param state: as returned by :func:`get_random_state`.\n This might not always be successful (e.g. different hardware, different backend version),\n so the calling code should always have called set_random_seed before to have the random generators\n in a reasonable fallback state.\n '
if ('cpu' in state):
torch.random.set_rng_state(torch.from_numpy(numpy.frombuffer(state['cpu'], dtype='uint8')))
if ('cuda' in state):
torch.cuda.set_rng_state_all(torch.from_numpy(numpy.frombuffer(state['cuda'], dtype='uint8')))
for (k, v) in state.items():
if k.startswith('cuda'):
i = int(k[4:])
torch.cuda.set_rng_state(torch.from_numpy(numpy.frombuffer(v, dtype='uint8')), i)
@staticmethod
def get_dtype_name_raw(raw_tensor: torch.Tensor) -> str:
'\n :return: dtype of raw tensor, as string\n '
return str(raw_tensor.dtype).replace('torch.', '')
@staticmethod
def as_dtype_raw(dtype_name: str) -> torch.dtype:
'\n :param dtype_name: e.g. "float32"\n :return: dtype object\n '
dtype = getattr(torch, dtype_name)
assert isinstance(dtype, torch.dtype)
return dtype
@staticmethod
def get_ndim_raw(raw_tensor: torch.Tensor) -> int:
'\n :return: ndim of raw tensor\n '
return raw_tensor.dim()
@staticmethod
def get_shape_raw(raw_tensor: torch.Tensor) -> Tuple[int]:
'shape'
return tuple(raw_tensor.shape)
@staticmethod
def get_shape_tuple_raw(raw_tensor: torch.Tensor) -> Tuple[int]:
'\n :return: shape of raw tensor\n '
return tuple(raw_tensor.shape)
@staticmethod
def get_known_shape_raw(raw_tensor: torch.Tensor) -> Tuple[Optional[int]]:
'\n :return: shape of raw tensor; here for PyTorch the full shape is always known\n '
return tuple(raw_tensor.size())
@staticmethod
def get_new_dim_raw(raw_tensor: torch.Tensor, axis: int, *, name: str) -> Dim:
'\n :param raw_tensor:\n :param axis:\n :param name:\n :return: new Dim object\n '
return Dim(int(raw_tensor.size(axis)), name=name)
@staticmethod
def get_device(x: Tensor[torch.Tensor]) -> Optional[str]:
'device'
raw_tensor: torch.Tensor = x.raw_tensor
if (raw_tensor is None):
return None
dev = raw_tensor.device
if (dev.index is None):
return dev.type
return f'{dev.type}:{dev.index}'
@staticmethod
def copy_to_device(x: Tensor, device: Optional[str]) -> Tensor:
'\n :param x:\n :param device:\n '
if (not device):
return x
x = x.copy()
x.raw_tensor = x.raw_tensor.to(device)
return x
@staticmethod
def expand_dims_raw(raw_tensor: torch.Tensor, axis: int) -> torch.Tensor:
'\n :param raw_tensor:\n :param axis: e.g. 1\n :return: raw tensor with new axis\n '
return raw_tensor.unsqueeze(axis)
@staticmethod
def expand_raw(raw_tensor: torch.Tensor, axis: int, dim: Union[(int, torch.Tensor)]) -> torch.Tensor:
'\n :param raw_tensor:\n :param axis: shape[axis] must be 1\n :param dim: the new dim for shape[axis]\n :return: shape[axis] expands to dim.\n in PyTorch or other frameworks which support custom strides,\n this is an efficient view and not a copy.\n '
return raw_tensor.expand(*[((- 1) if (i != axis) else dim) for i in range(raw_tensor.dim())])
@staticmethod
def copy(tensor: Tensor[torch.Tensor]) -> Tensor[torch.Tensor]:
'copy'
out = tensor.copy_template()
out.raw_tensor = tensor.raw_tensor.clone()
return out
@staticmethod
def cast_raw(raw_tensor: torch.Tensor, dtype: str) -> torch.Tensor:
'cast'
return raw_tensor.to(dtype=TorchBackend.as_dtype_raw(dtype))
@staticmethod
def set_requires_gradient(tensor: Tensor[torch.Tensor]):
'set requires grad'
tensor.raw_tensor.requires_grad = True
@staticmethod
def gradient(y: Tensor, x: Tensor) -> Tensor:
'gradient'
out = x.copy_template(name='gradient')
out.raw_tensor = torch.autograd.grad(y.raw_tensor, x.raw_tensor, create_graph=True)[0]
return out
@staticmethod
def stop_gradient(tensor: Tensor) -> Tensor:
'stop grad'
out = tensor.copy()
out.raw_tensor = out.raw_tensor.detach()
return out
@staticmethod
def scaled_gradient(tensor: Tensor, scale: Union[(float, Tensor)]) -> Tensor:
'scaled gradient'
from returnn.torch.util.scaled_gradient import scaled_gradient
out = tensor.copy()
out.raw_tensor = scaled_gradient(out.raw_tensor, scale=scale)
return out
@staticmethod
def scaled_gradient_ext(x: Tensor, *, scale: Union[(float, Tensor)]=1.0, shift: Optional[Union[(float, Tensor)]]=None, scale_shift_by_sum_over_axis: Optional[Dim]=None):
'scaled gradient ext'
from returnn.torch.util.scaled_gradient import scaled_gradient_ext
out = x.copy()
out.raw_tensor = scaled_gradient_ext(out.raw_tensor, scale=(scale.raw_tensor if isinstance(scale, Tensor) else scale), shift=(shift.raw_tensor if isinstance(shift, Tensor) else shift), scale_shift_by_sum_over_axis=(x.get_axis_from_description(scale_shift_by_sum_over_axis, allow_int=False) if (scale_shift_by_sum_over_axis is not None) else None))
return out
@staticmethod
def merge_dims(source: Tensor, *, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Merges a list of axes into a single one. (Flatten the dims.)\n E.g. input is (batch, width, height, dim) and dims=(width,height), then we get (batch, width*height, dim).\n Or input is (batch, time, height, dim) and axes=(height,dim), then we get (batch, time, height*dim).\n\n :param source:\n :param dims:\n :param out_dim:\n :return: tensor, out_dim\n '
assert dims
if (len(dims) == 1):
return (source, dims[0])
first_axis = min((source.dims.index(d) for d in dims))
pre_dims = source.dims[:first_axis]
post_dims = [d for d in source.dims if ((d not in dims) and (d not in pre_dims))]
if (out_dim is None):
out_dim = dims[0]
for d in dims[1:]:
out_dim = (out_dim * d)
source = source.copy_transpose(((tuple(pre_dims) + tuple(dims)) + tuple(post_dims)), allow_int=False)
out = Tensor('merge_dims', dims=((pre_dims + (out_dim,)) + tuple(post_dims)), dtype=source.dtype, sparse_dim=source.sparse_dim)
out_shape = [d.get_dim_value() for d in out.dims]
out.raw_tensor = torch.reshape(source.raw_tensor, out_shape)
if (source.feature_dim and (source.feature_dim in dims)):
out.feature_dim = out_dim
return (out, out_dim)
@staticmethod
def split_dims(source: Tensor, *, axis: Dim, dims: Sequence[Dim], pad_to_multiples: Optional[bool]=None, pad_value: Union[(None, int, float)]=None) -> Tensor:
'split dims'
assert (not axis.need_masking())
assert (pad_to_multiples in (None, False))
axis_ = source.get_axis_from_description(axis)
out_dims = ((source.dims[:axis_] + tuple(dims)) + source.dims[(axis_ + 1):])
out_shape = [d.get_dim_value() for d in out_dims]
out_raw = torch.reshape(source.raw_tensor, out_shape)
return Tensor('split_dims', dims=out_dims, dtype=source.dtype, sparse_dim=source.sparse_dim, raw_tensor=out_raw)
@staticmethod
def reshape(source: Tensor, in_dims: Sequence[Dim], out_dims: Sequence[Dim]) -> Tensor:
'reshape'
in_dims_axes = [source.get_axis_from_description(d, allow_int=False) for d in in_dims]
assert (sorted(set(in_dims_axes)) == sorted(in_dims_axes)), f'reshape {source}: invalid in_dims {in_dims}'
insert_axis = min(in_dims_axes)
dims = list(source.dim_tags)
permute = list(range(source.batch_ndim))
for axis in sorted(set(in_dims_axes), reverse=True):
dims.pop(axis)
permute.pop(axis)
permute = ((permute[:insert_axis] + in_dims_axes) + permute[insert_axis:])
source = source.copy_transpose(permute)
dims = ((dims[:insert_axis] + list(out_dims)) + dims[insert_axis:])
out = Tensor('reshape', dims=dims, dtype=source.dtype, sparse_dim=source.sparse_dim)
if (source.feature_dim and (source.feature_dim not in in_dims)):
out.feature_dim = source.feature_dim
out.raw_tensor = torch.reshape(source.placeholder, [d.get_dim_value() for d in dims])
return out
@staticmethod
def split(source: Tensor, *, axis: Dim, out_dims: Sequence[Dim]) -> Tuple[(Tensor, ...)]:
'split'
src_axis_int = source.get_axis_from_description(axis)
out_raw_list = torch.split(source.raw_tensor, split_size_or_sections=[d.get_dim_value() for d in out_dims], dim=src_axis_int)
out_tuple = tuple((source.copy_template_replace_dim_tag(axis=src_axis_int, new_dim_tag=dim, name=f'split{i}') for (i, dim) in enumerate(out_dims)))
for (i, out) in enumerate(out_tuple):
out.raw_tensor = out_raw_list[i]
return out_tuple
@staticmethod
def expand_dim(source: Tensor, dim: Dim) -> Tensor:
'expand dim'
assert (dim not in source.dims)
axis = len(source.dims)
if dim.is_static():
if source.have_feature_axis():
axis = source.feature_dim_axis
if dim.is_dynamic():
for (i, d) in reversed(list(enumerate(source.dims))):
assert isinstance(d, Dim)
if d.is_dynamic():
axis = (i + 1)
break
new_dim_tags = list(source.dims)
new_dim_tags.insert(axis, dim)
out = source.copy_template_new_dim_tags(new_dim_tags)
if source.feature_dim:
out.feature_dim = source.feature_dim
out_raw = torch.unsqueeze(source.raw_tensor, axis)
if (dim.is_dynamic() or (dim.dimension != 1)):
out_raw = torch.tile(out_raw, [(dim.get_dim_value() if (d == dim) else 1) for d in out.dims])
out.raw_tensor = out_raw
return out
@staticmethod
def squeeze(source: Tensor, axis: Dim) -> Tensor:
'squeeze'
axis = source.get_axis_from_description(axis)
out = source.copy_template_excluding_axis(axis)
out.raw_tensor = torch.squeeze(source.raw_tensor, axis)
return out
@staticmethod
def concat(*sources: Tuple[(Tensor, Dim)], allow_broadcast: bool=False, out_dim: Dim) -> Tensor:
'concat'
axis = sources[0][0].get_axis_from_description(sources[0][1])
other_dims = list(sources[0][0].dims)
other_dims.remove(sources[0][1])
need_broadcast = False
if allow_broadcast:
for (source, dim) in sources[1:]:
assert (dim in source.dims)
for dim_ in source.dims:
if (dim_ == dim):
continue
if (dim_ not in other_dims):
other_dims.append(dim_)
need_broadcast = True
sources_raw = []
if (allow_broadcast and need_broadcast):
for (source, dim) in sources:
templ = Tensor(source.name, dims=((other_dims[:axis] + [dim]) + other_dims[axis:]), dtype=source.dtype, sparse_dim=source.sparse_dim)
source_ = source.copy_compatible_to(templ, unbroadcast=True)
sources_raw.append(source_.raw_tensor)
else:
for (source, dim) in sources:
templ_dims = ((other_dims[:axis] + [dim]) + other_dims[axis:])
assert (set(templ_dims) == set(source.dims)), f'concat {source} {dim} not allowed with allow_broadcast=False'
source_ = source.copy_transpose(templ_dims)
sources_raw.append(source_.raw_tensor)
out = Tensor('concat', dims=((other_dims[:axis] + [out_dim]) + other_dims[axis:]), dtype=sources[0][0].dtype, sparse_dim=sources[0][0].sparse_dim)
if (sources[0][0].feature_dim and (sources[0][0].feature_dim != sources[0][1])):
out.feature_dim = sources[0][0].feature_dim
out.raw_tensor = torch.cat([s for s in sources_raw], dim=axis)
return out
@staticmethod
def pad(source: Tensor, *, axes: Sequence[Dim], padding: Sequence[Tuple[(Union[(Dim, int)], Union[(Dim, int)])]], out_dims: Sequence[Dim], mode: str='constant', value: Optional[Union[(rf.RawTensorTypes, Tensor)]]=None) -> Tensor:
'pad'
assert (len(out_dims) == len(axes) == len(padding))
out = source.copy_template_new_dim_tags([(out_dims[axes.index(dim)] if (dim in axes) else dim) for dim in source.dim_tags], keep_special_axes=True)
remaining_dims = set(axes)
raw_pad = []
for dim in reversed(source.dims):
if (dim not in remaining_dims):
raw_pad += [0, 0]
continue
remaining_dims.remove(dim)
pad_ = padding[axes.index(dim)]
raw_pad += [(pad_[0].get_dim_value() if isinstance(pad_[0], Dim) else pad_[0]), (pad_[1].get_dim_value() if isinstance(pad_[1], Dim) else pad_[1])]
if (not remaining_dims):
break
if isinstance(value, Tensor):
assert (value.dims == ()), f'value {value} must be a scalar'
value = value.raw_tensor
out.raw_tensor = torch.nn.functional.pad(source.raw_tensor, pad=raw_pad, mode=mode, value=value)
return out
@staticmethod
def cum_concat_step(source: Tensor, *, prev_accum: Tensor, axis: Dim, out_spatial_dim: Dim) -> Tensor:
'cum concat step'
out = prev_accum.copy_template_replace_dim_tag(axis=prev_accum.get_axis_from_description(axis), new_dim_tag=out_spatial_dim, name=f'{source.name}/cum_concat_step')
source_raw = source.copy_compatible_to_dims_raw(prev_accum.dims)
out.raw_tensor = torch.cat((prev_accum.raw_tensor, source_raw), dim=prev_accum.get_axis_from_description(axis))
return out
@staticmethod
def activation_raw(raw_tensor: torch.Tensor, func: str) -> torch.Tensor:
'\n :param raw_tensor:\n :param func: e.g. "tanh"\n :return: raw tensor after activation\n '
assert (func in Backend._AllowedActivationFuncs)
if hasattr(torch, func):
f = getattr(torch, func)
elif hasattr(torch.nn.functional, func):
f = getattr(torch.nn.functional, func)
else:
raise ValueError(f'unknown activation function {func!r}')
return f(raw_tensor)
@staticmethod
def softmax(tensor: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor:
'\n :param tensor:\n :param axis:\n :param use_mask:\n :return: softmax over axis\n '
out = tensor.copy_template('softmax')
if (use_mask and axis.need_masking()):
tensor = tensor.copy()
mask = tensor.get_sequence_mask_broadcast(axis=axis)
inf_value = get_global_inf_value()
tensor.raw_tensor = torch.where(mask, tensor.raw_tensor, (- inf_value))
out_raw = torch.softmax(tensor.raw_tensor, dim=tensor.dims.index(axis))
out.dtype = TorchBackend.get_dtype_name_raw(out_raw)
out.raw_tensor = out_raw
return out
@staticmethod
def log_softmax(tensor: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor:
'\n :param tensor:\n :param axis:\n :param use_mask:\n :return: log_softmax over axis\n '
out = tensor.copy_template('log_softmax')
if (use_mask and axis.need_masking()):
tensor = tensor.copy()
mask = tensor.get_sequence_mask_broadcast(axis=axis)
inf_value = get_global_inf_value()
tensor.raw_tensor = torch.where(mask, tensor.raw_tensor, (- inf_value))
out_raw = torch.log_softmax(tensor.raw_tensor, dim=tensor.dims.index(axis))
out.dtype = TorchBackend.get_dtype_name_raw(out_raw)
out.raw_tensor = out_raw
return out
@staticmethod
def softmax_cross_entropy_with_logits(*, logits: Tensor, targets: Tensor, axis: Dim):
"\n Efficient cross entropy. For PyTorch this is actually the default cross entropy function.\n (torch.nn.functional.cross_entropy)\n\n :param logits: target estimates given as inputs to softmax (i.e. unnormalized)\n :param targets: probabilities, i.e. normalized, can also be sparse\n :param axis: class labels dim over which softmax is computed\n :return: cross entropy (same Dims as 'logits' but without 'axis')\n "
assert (axis in logits.dims), 'Specified axis not present in logits.'
if (axis == targets.sparse_dim):
assert ((logits.dims_set - {axis}) == targets.dims_set), 'logits Dims and target Dims have to match (except for implicit sparse_dim).'
logits_dim_order = list(targets.dims)
if (len(logits_dim_order) > 0):
logits_dim_order.insert(1, axis)
else:
logits_dim_order = [axis]
if (targets.dtype != 'int64'):
targets = targets.copy()
targets.dtype = 'int64'
targets.raw_tensor = targets.raw_tensor.long()
else:
assert (not targets.sparse_dim), 'We expect that cross entropy would always be calculated along the sparse dim, if there is one.'
assert (logits.dims_set == targets.dims_set), 'logits Dims and target Dims have to match.'
assert (axis in targets.dims), 'Specified axis not present in targets.'
if (len(targets.dims) > 1):
targets = targets.copy_move_axis(targets.dims.index(axis), 1)
logits_dim_order = targets.dims
logits_axes_permutation = [logits_dim_order.index(dim) for dim in logits.dims]
logits = logits.copy_transpose(logits_axes_permutation)
raw_cross_entropy = torch.nn.functional.cross_entropy(input=logits.raw_tensor, target=targets.raw_tensor, reduction='none')
out_dims = list(logits.dims)
out_dims.remove(axis)
cross_entropy = Tensor(name='cross_entropy', dims=out_dims, raw_tensor=raw_cross_entropy, dtype=logits.dtype)
return cross_entropy
@staticmethod
def ctc_loss(*, logits: Tensor, targets: Tensor, input_spatial_dim: Dim, targets_spatial_dim: Dim, blank_index: int, max_approx: bool=False) -> Tensor:
'CTC'
if max_approx:
raise NotImplementedError('ctc_loss: max_approx not implemented for PyTorch')
assert (targets.sparse_dim and (targets.sparse_dim.dimension <= logits.feature_dim.dimension))
batch_dims = logits.remaining_dims((input_spatial_dim, logits.feature_dim))
logits = logits.copy_transpose((([input_spatial_dim] + batch_dims) + [logits.feature_dim]))
logits_raw: torch.Tensor = logits.raw_tensor
input_lengths = input_spatial_dim.dyn_size_ext.copy_transpose(batch_dims).raw_tensor
logits_raw_shape = logits_raw.shape
if (len(batch_dims) != 1):
logits_raw = torch.reshape(logits_raw, ((logits_raw.shape[:1] + ((- 1),)) + logits_raw.shape[(- 1):]))
input_lengths = torch.reshape(input_lengths, ((- 1),))
log_probs = torch.nn.functional.log_softmax(logits_raw, dim=(- 1))
targets = targets.copy_transpose((batch_dims + [targets_spatial_dim]))
targets_raw = targets.raw_tensor
targets_lengths = targets_spatial_dim.dyn_size_ext.copy_transpose(batch_dims).raw_tensor
if (len(batch_dims) != 1):
targets_raw = torch.reshape(targets_raw, ((- 1), targets_raw.shape[(- 1)]))
targets_lengths = torch.reshape(targets_lengths, ((- 1),))
loss_raw = torch.nn.functional.ctc_loss(log_probs=log_probs, targets=targets_raw, input_lengths=input_lengths, target_lengths=targets_lengths, blank=blank_index, zero_infinity=True, reduction='none')
if (len(batch_dims) != 1):
loss_raw = torch.reshape(loss_raw, logits_raw_shape[1:(- 1)])
loss = Tensor(name='ctc_loss', dims=batch_dims, raw_tensor=loss_raw, dtype=logits.dtype)
return loss
@staticmethod
def create_parameter_raw(tensor: rf.Parameter, *, device: Optional[str]=None) -> torch.nn.Parameter:
'\n :return: parameter\n '
assert all((d.is_static() for d in tensor.dims))
data = torch.zeros([d.dimension for d in tensor.dims], dtype=TorchBackend.as_dtype_raw(tensor.dtype), device=(device or rf.get_default_device()))
if (tensor.dtype.startswith('int') or (tensor.dtype == 'bool')):
requires_grad = False
else:
requires_grad = True
return torch.nn.Parameter(data, requires_grad=requires_grad)
@staticmethod
def set_parameter_initial_value(param: rf.Parameter, value: Union[(None, Tensor, rf.RawTensorTypes)]) -> None:
'\n :param param: parameter\n :param value: initial value\n '
if (value is None):
value = 0
raw_param = param.raw_tensor
assert isinstance(raw_param, torch.nn.Parameter)
with torch.no_grad():
if isinstance(value, Tensor):
value_raw = value.copy_compatible_to_dims_raw(param.dims)
raw_param.copy_(value_raw)
elif isinstance(value, numpy.ndarray):
raw_param.copy_(torch.from_numpy(value))
else:
raw_param.copy_(value)
@staticmethod
def set_parameter_trainable(param: rf.Parameter, trainable: bool) -> None:
'set trainable'
raw_param = param.raw_tensor
assert isinstance(raw_param, torch.nn.Parameter)
raw_param.requires_grad = trainable
@staticmethod
def parameter_assign(param: rf.Parameter, value: Tensor, *, op: str='assign') -> None:
'param assign'
raw_param = param.raw_tensor
assert isinstance(raw_param, torch.nn.Parameter)
value_raw = value.copy_compatible_to_dims_raw(param.dims)
with torch.no_grad():
if (op == 'assign'):
raw_param.copy_(value_raw)
elif (op == 'add'):
raw_param.add_(value_raw)
else:
raise ValueError(f'Parameter {param} assign: Unsupported op: {op}')
@staticmethod
def parameter_assign_key(param: rf.Parameter, key: rf.ItemKeyType, value: Tensor, *, op: str='assign', axis: Optional[Union[(Dim, Sequence[Dim])]]=None, key_dim: Optional[Union[(Dim, Sequence[Dim])]]=None) -> None:
'param assign'
raw_param = param.raw_tensor
assert isinstance(raw_param, torch.nn.Parameter)
(key_raw, res_dims) = _utils.strided_slice_raw_key(param, axis, key, key_dim)
value_raw = value.copy_compatible_to_dims_raw(res_dims)
with torch.no_grad():
if (op == 'assign'):
raw_param[key_raw] = value_raw
elif (op == 'add'):
raw_param[key_raw] += value_raw
else:
raise ValueError(f'Parameter {param} assign: Unsupported op: {op}')
@staticmethod
def compare_raw(a: torch.Tensor, kind: str, b: torch.Tensor) -> torch.Tensor:
'\n :param a:\n :param kind: "equal", "less", "less_equal", "greater", "greater_equal", "not_equal"\n :param b:\n :return: a `kind` b\n '
assert ((a.dim() == b.dim()) or (a.dim() == 0) or (b.dim() == 0))
if (kind == 'equal'):
kind = 'eq'
op = getattr(torch, kind)
return op(a, b)
@staticmethod
def combine_raw(a: torch.Tensor, kind: str, b: torch.Tensor) -> torch.Tensor:
'\n :param a:\n :param kind: "add", "sub", "mul", "truediv", "floordiv", "mod", "pow",\n "maximum", "minimum", "logical_and", "logical_or", "squared_difference"\n :param b:\n :return: a `kind` b\n '
assert ((a.dim() == b.dim()) or (a.dim() == 0) or (b.dim() == 0))
if (kind == 'squared_difference'):
return raw_ops.squared_difference(a, b)
kind = {'truediv': 'true_divide', 'floordiv': 'floor_divide', 'mod': 'remainder'}.get(kind, kind)
op = getattr(torch, kind)
return op(a, b)
@staticmethod
def reshape_raw(raw_tensor: torch.Tensor, shape: Union[(Sequence[Union[(int, torch.Tensor)]], torch.Tensor)]) -> torch.Tensor:
'\n :param raw_tensor:\n :param shape:\n :return: reshaped raw tensor; wraps torch.reshape\n '
return torch.reshape(raw_tensor, shape)
@classmethod
def squeeze_raw(cls, raw_tensor: torch.Tensor, axes: Sequence[int]) -> torch.Tensor:
'squeeze'
if (len(axes) == 1):
return raw_tensor.squeeze(dim=axes[0])
elif (len(axes) == 0):
return raw_tensor
else:
return super().squeeze_raw(raw_tensor, axes=axes)
@staticmethod
def transpose_raw(raw_tensor: torch.Tensor, perm: Sequence[int]) -> torch.Tensor:
'\n :param raw_tensor:\n :param perm: e.g. [0, 2, 1]\n :return: permuted (transposed) raw tensor; wraps torch.permute\n '
if all(((p == i) for (i, p) in enumerate(perm))):
return raw_tensor
return torch.permute(raw_tensor, tuple(perm))
@staticmethod
def convert_to_tensor(value: Union[(Tensor, torch.Tensor, RawTensorTypes)], *, dims: Sequence[Dim], dtype: str, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, name: Optional[str]=None) -> Tensor[torch.Tensor]:
'\n :param value:\n :param dims:\n :param dtype:\n :param sparse_dim:\n :param device:\n :param name:\n :return: tensor\n '
if isinstance(value, Tensor):
return value
if isinstance(value, torch.Tensor):
name = (name or 'raw_tensor')
else:
name = (name or 'const')
value = torch.tensor(value, dtype=TorchBackend.as_dtype_raw(dtype), device=(device or rf.get_default_device()))
assert isinstance(value, torch.Tensor)
return Tensor(name, dims=dims, dtype=dtype, sparse_dim=sparse_dim, raw_tensor=value)
@staticmethod
def full(dims: Sequence[Dim], fill_value: Union[(RawTensorTypes, Tensor)], *, dtype: str, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor:
'full'
shape = [dim.get_dim_value() for dim in dims]
if isinstance(fill_value, Tensor):
fill_value = fill_value.raw_tensor
if torch.onnx.is_in_onnx_export():
shape = [(dim.long() if isinstance(dim, torch.Tensor) else dim) for dim in shape]
raw_tensor = torch.full(shape, fill_value, dtype=TorchBackend.as_dtype_raw(dtype), device=(device or rf.get_default_device()))
return Tensor('full', dims=dims, sparse_dim=sparse_dim, feature_dim=feature_dim, dtype=dtype, raw_tensor=raw_tensor)
@staticmethod
def gather(source: Tensor, *, indices: Union[(Tensor, int)], axis: Dim, clip_to_valid: bool=False) -> Tensor:
'\n Gather.\n\n There are a few options in PyTorch, all having somewhat different semantics\n and different advantages or disadvantages and different limitations.\n\n - torch.gather, most generic\n - torch.index_select, similar as tf.gather, but does not support batch axes\n - Tensor.__getitem__\n - torch.embedding\n '
if isinstance(indices, Tensor):
indices: Tensor[torch.Tensor]
elif isinstance(indices, int):
indices = Tensor('indices_int', dims=(), dtype=rf.get_default_array_index_dtype(), raw_tensor=torch.tensor(indices, dtype=TorchBackend.as_dtype_raw(rf.get_default_array_index_dtype())))
else:
raise TypeError(f'Unsupported type for indices: {type(indices)}')
axis_int = source.get_axis_from_description(axis, allow_int=False)
if clip_to_valid:
indices = indices.copy()
dim: Dim = source.dims[axis_int]
if dim.dyn_size_ext:
assert dim.dyn_size_ext.dims_set.issubset(indices.dims_set), f'gather with clip_to_valid: indices ({indices}) dims must be a superset of {dim} dyn-size'
size = dim.dyn_size_ext.copy_compatible_to(indices, check_sparse=False)
indices.raw_tensor = torch.clamp(indices.raw_tensor, torch.tensor(0, device=indices.raw_tensor.device), (size.raw_tensor - 1).to(indices.raw_tensor.device))
else:
indices.raw_tensor = torch.clamp(indices.raw_tensor, 0, (source.raw_tensor.shape[axis_int] - 1))
index_own_dims = [dim for dim in indices.dims if ((dim not in source.dims) or (dim == axis))]
out = Tensor('gather', dims=((list(source.dims[:axis_int]) + index_own_dims) + list(source.dims[(axis_int + 1):])), dtype=source.dtype, sparse_dim=source.sparse_dim)
if (source.feature_dim and (source.feature_dim in out.dims)):
out.feature_dim = source.feature_dim
if indices.dims_set.intersection((source.dims_set - {axis})):
indices = indices.copy_compatible_to(out, check_dtype=False, check_sparse=False, unbroadcast=True)
if (len(index_own_dims) == 1):
index_own_dims_flat = index_own_dims[0]
elif (len(index_own_dims) == 0):
index_own_dims_flat = Dim(1, name='dummy')
indices = indices.copy_add_dim_by_tag(index_own_dims_flat, unbroadcast=True, axis=axis_int)
else:
(indices, index_own_dims_flat) = rf.merge_dims(indices, dims=index_own_dims)
index_ext_dims = list(source.dims)
index_ext_dims[axis_int] = index_own_dims_flat
assert (indices.dims == tuple(index_ext_dims))
out_raw = torch.gather(source.raw_tensor, dim=axis_int, index=indices.raw_tensor.type(torch.int64))
if (len(index_own_dims) == 1):
pass
elif (len(index_own_dims) == 0):
out_raw = out_raw.squeeze(axis_int)
else:
out_raw = out_raw.reshape([d.get_dim_value() for d in out.dims])
out.raw_tensor = out_raw
elif ((axis_int == 0) and (indices.batch_ndim == 0)):
out.raw_tensor = source.raw_tensor[indices.raw_tensor]
elif ((axis_int == 0) and (source.batch_ndim == 2)):
out.raw_tensor = torch.embedding(source.raw_tensor, indices.raw_tensor)
else:
out_raw = torch.index_select(source.raw_tensor, dim=axis_int, index=indices.raw_tensor.flatten())
out_shape = ((source.raw_tensor.shape[:axis_int] + indices.raw_tensor.shape) + source.raw_tensor.shape[(axis_int + 1):])
out.raw_tensor = out_raw.reshape(out_shape)
return out
@staticmethod
def scatter(source: Tensor, *, indices: Tensor, indices_dim: Union[(Dim, Sequence[Dim])], out_dim: Union[(Dim, Sequence[Dim])]) -> Tensor:
'\n Scatters into new zero-tensor.\n If entries in indices are duplicated, the corresponding values in source will be added together\n (scatter_add in PyTorch).\n (TF segment_sum can be implemented via this.)\n\n :param source: [batch_dims..., indices_dim(s)..., feature_dims...]\n :param indices: [batch_dims..., indices_dim(s)...] -> out_dim\n :param indices_dim:\n :param out_dim:\n :return: [batch_dims..., out_dim, feature_dims...]\n '
if isinstance(indices_dim, Dim):
indices_dim = [indices_dim]
else:
assert (len(indices_dim) >= 1)
indices_dim = list(indices_dim)
assert indices.dtype.startswith('int')
if isinstance(out_dim, Dim):
out_flat_dim = out_dim
out_dim = [out_dim]
elif (len(out_dim) == 1):
out_flat_dim = out_dim[0]
out_dim = [out_flat_dim]
else:
assert (len(out_dim) > 1)
out_flat_dim = out_dim[0]
for dim in out_dim[1:]:
out_flat_dim = (out_flat_dim * dim)
out_dim = list(out_dim)
batch_dims = indices.remaining_dims(indices_dim)
feature_dims = source.remaining_dims((batch_dims + indices_dim))
if (len(indices_dim) > 1):
(indices, indices_flat_dim) = rf.merge_dims(indices, dims=indices_dim)
(source, _) = rf.merge_dims(source, dims=indices_dim, out_dim=indices_flat_dim)
else:
indices_flat_dim = indices_dim[0]
source = source.copy_transpose(((batch_dims + [indices_flat_dim]) + feature_dims))
indices = indices.copy_compatible_to(source, unbroadcast=True, add_dims=True, check_sparse=False, check_dtype=False)
out_dims = ((batch_dims + [out_flat_dim]) + feature_dims)
out_shape = [d.get_dim_value() for d in out_dims]
out_raw = torch.zeros(out_shape, dtype=source.raw_tensor.dtype, device=source.raw_tensor.device)
out_raw.scatter_add_(dim=len(batch_dims), index=indices.raw_tensor.to(torch.int64), src=source.raw_tensor)
res = Tensor('scatter', dims=out_dims, dtype=source.dtype, sparse_dim=source.sparse_dim, raw_tensor=out_raw)
if (len(out_dim) > 1):
res = rf.split_dims(res, axis=out_flat_dim, dims=out_dim)
return res
@staticmethod
def slice(source: Tensor, *, axis: Dim, start: Optional[Union[(int, Tensor)]]=None, end: Optional[Union[(int, Tensor)]]=None, step: Optional[Union[(int, Tensor)]]=None, size: Optional[Union[(int, Tensor, Dim)]]=None, out_dim: Dim) -> Tensor:
'slice'
axis_int = source.get_axis_from_description(axis, allow_int=False)
out = source.copy_template_replace_dim_tag(axis=axis_int, new_dim_tag=out_dim)
if isinstance(start, Tensor):
assert (start.dims == ())
start = start.raw_tensor
if (start is None):
start = 0
if isinstance(size, Dim):
size = size.get_dim_value()
if (size is not None):
assert (end is None)
out.raw_tensor = torch.narrow(source.raw_tensor, dim=axis_int, start=start, length=size)
else:
if isinstance(end, Tensor):
assert (end.dims == ())
end = end.raw_tensor
if (end is None):
end = axis.get_dim_value()
out.raw_tensor = torch.narrow(source.raw_tensor, dim=axis_int, start=start, length=(end - start))
return out
@staticmethod
def where(cond: Tensor, true_: Union[(Tensor, rf.RawTensorTypes)], false_: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor:
'where'
true_ = rf.convert_to_tensor(true_, _backend=TorchBackend, device=cond.device)
false_ = rf.convert_to_tensor(false_, _backend=TorchBackend, device=cond.device)
out = Tensor.get_common_data([true_, false_, cond], allow_broadcast_all_sources=allow_broadcast_all_sources, name='where')
out.dtype = true_.dtype
out.sparse_dim = (true_.sparse_dim or false_.sparse_dim)
out.feature_dim = (true_.feature_dim or false_.feature_dim)
cond_bc_raw = cond.copy_compatible_to_dims_raw(out.dims)
true_bc_raw = true_.copy_compatible_to_dims_raw(out.dims)
false_bc_raw = false_.copy_compatible_to_dims_raw(out.dims)
out.raw_tensor = torch.where(cond_bc_raw, true_bc_raw, false_bc_raw)
return out
@staticmethod
def clip_by_value(x: Tensor, clip_value_min: Union[(Tensor, rf.RawTensorTypes)], clip_value_max: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor:
'clip by value'
clip_value_min = rf.convert_to_tensor(clip_value_min, _backend=TorchBackend, device=x.device)
clip_value_max = rf.convert_to_tensor(clip_value_max, _backend=TorchBackend, device=x.device)
out = Tensor.get_common_data([x, clip_value_min, clip_value_max], allow_broadcast_all_sources=allow_broadcast_all_sources, name='clip_by_value')
out.dtype = x.dtype
out.sparse_dim = x.sparse_dim
out.feature_dim = x.feature_dim
x_bc_raw = x.copy_compatible_to_dims_raw(out.dims)
min_bc_raw = clip_value_min.copy_compatible_to_dims_raw(out.dims)
max_bc_raw = clip_value_max.copy_compatible_to_dims_raw(out.dims)
out.raw_tensor = torch.clamp(x_bc_raw, min_bc_raw, max_bc_raw)
return out
@staticmethod
def matmul(a: _TT, b: _TT, *, reduce: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> _TT:
'\n batched matmul of a and b, see base class doc string\n '
if isinstance(reduce, Dim):
reduce = [reduce]
if (use_mask and any((dim.dyn_size_ext for dim in reduce))):
raise NotImplementedError('masking in matmul reduce not yet implemented')
a_dims = a.dims
b_dims = b.dims
assert all(((dim in a_dims) for dim in reduce)), f"'a' does not have the specified reduce dim(s) {reduce} (a dims: {a_dims})"
assert all(((dim in b_dims) for dim in reduce)), f"'b' does not have the specified reduce dim(s) {reduce} (b dims: {b_dims})"
if (len(reduce) > 1):
reduce = list(reduce)
reduce.sort(key=(lambda dim: a_dims.index(dim)))
a_reduce_axes = [a.get_axis_from_description(reduce_dim) for reduce_dim in reduce]
b_reduce_axes = [b.get_axis_from_description(reduce_dim) for reduce_dim in reduce]
common_dims = [dim for (i, dim) in enumerate(a_dims) if ((dim in b_dims) and (i not in a_reduce_axes))]
a_common_axes = [a_dims.index(common_dim) for common_dim in common_dims]
b_common_axes = [b_dims.index(common_dim) for common_dim in common_dims]
a_unique_axes = [i for i in range(len(a_dims)) if ((i not in a_reduce_axes) and (i not in a_common_axes))]
b_unique_axes = [i for i in range(len(b_dims)) if ((i not in b_reduce_axes) and (i not in b_common_axes))]
a_raw = a.raw_tensor
b_raw = b.raw_tensor
a_shape = a_raw.shape
b_shape = b_raw.shape
common_axes_shape = tuple((a_shape[i] for i in a_common_axes))
b_common_axes_shape = tuple((b_shape[i] for i in b_common_axes))
assert (common_axes_shape == b_common_axes_shape), 'Tensor shape for common Dims of a and b does not match.'
common_axes_total_dimension = prod(common_axes_shape)
a_unique_axes_shape = tuple((a_shape[i] for i in a_unique_axes))
b_unique_axes_shape = tuple((b_shape[i] for i in b_unique_axes))
a_unique_axes_total_dimension = prod(a_unique_axes_shape)
b_unique_axes_total_dimension = prod(b_unique_axes_shape)
reduce_axes_shape = tuple((a_shape[i] for i in a_reduce_axes))
b_reduce_axes_shape = tuple((b_shape[i] for i in b_reduce_axes))
assert (reduce_axes_shape == b_reduce_axes_shape), 'Tensor shape for reduce Dims does not match between a and b.'
reduce_axes_total_dimension = prod(reduce_axes_shape)
if ((len(b_common_axes) == 0) and (len(b_reduce_axes) == 1) and (len(b_unique_axes) == 1)):
a_raw = torch.permute(a_raw, (a_unique_axes + a_reduce_axes))
b_raw = torch.permute(b_raw, (b_unique_axes + b_reduce_axes))
raw_result = torch.nn.functional.linear(a_raw, b_raw)
else:
a_raw = torch.permute(a_raw, ((a_common_axes + a_unique_axes) + a_reduce_axes))
b_raw = torch.permute(b_raw, ((b_common_axes + b_reduce_axes) + b_unique_axes))
if (common_axes_total_dimension == 1):
a_raw = torch.reshape(a_raw, (a_unique_axes_total_dimension, reduce_axes_total_dimension))
b_raw = torch.reshape(b_raw, (reduce_axes_total_dimension, b_unique_axes_total_dimension))
raw_result = torch.mm(a_raw, b_raw)
else:
a_raw = torch.reshape(a_raw, (common_axes_total_dimension, a_unique_axes_total_dimension, reduce_axes_total_dimension))
b_raw = torch.reshape(b_raw, (common_axes_total_dimension, reduce_axes_total_dimension, b_unique_axes_total_dimension))
raw_result = torch.bmm(a_raw, b_raw)
raw_result = torch.reshape(raw_result, ((common_axes_shape + a_unique_axes_shape) + b_unique_axes_shape))
a_unique_dims = [a_dims[i] for i in a_unique_axes]
b_unique_dims = [b_dims[i] for i in b_unique_axes]
result_dims = ((common_dims + a_unique_dims) + b_unique_dims)
result_tensor = Tensor(name='dot', dims=result_dims, raw_tensor=raw_result, dtype=TorchBackend.get_dtype_name_raw(raw_result))
return result_tensor
@staticmethod
def range_over_dim(dim: Dim, *, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor[torch.Tensor]:
'\n :param dim:\n :param dtype:\n :param device:\n :return: tensor with shape [dim]\n '
if ((not dtype) and dim.dyn_size_ext):
dtype = dim.dyn_size_ext.dtype
if (not dtype):
dtype = rf.get_default_array_index_dtype()
out = Tensor('range', dims=[dim], sparse_dim=(dim if (dtype.startswith('int') or dtype.startswith('uint')) else None), dtype=dtype)
out.raw_tensor = torch.arange(dim.get_dim_value(), dtype=TorchBackend.as_dtype_raw(out.dtype), device=(device or rf.get_default_device()))
return out
@staticmethod
def reduce(source: Tensor[torch.Tensor], *, mode: str, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[torch.Tensor]:
'reduce'
assert (mode in Backend._AllowedReduceModes)
if isinstance(axis, Dim):
axis = [axis]
assert all((isinstance(dim, Dim) for dim in axis))
raw_dims = [source.get_axis_from_description(dim) for dim in axis]
res_dims = [dim for (i, dim) in enumerate(source.dims) if (i not in raw_dims)]
correction_factor: Optional[torch.Tensor] = None
if (use_mask and any((dim.need_masking() for dim in axis))):
source = source.copy()
dtype = source.raw_tensor.dtype
if (mode == 'max'):
mask_value = (torch.finfo(dtype).min if dtype.is_floating_point else torch.iinfo(dtype).min)
elif (mode == 'min'):
mask_value = (torch.finfo(dtype).max if dtype.is_floating_point else torch.iinfo(dtype).max)
elif (mode == 'sum'):
mask_value = 0
elif (mode == 'mean'):
mask_value = 0
for dim in axis:
if dim.need_masking():
total_num_el = dim.get_dim_value_tensor()
actual_num_el = dim.get_size_tensor()
num_el_reduce_dims = [dim_ for dim_ in axis if (dim_ in actual_num_el.dims)]
if num_el_reduce_dims:
actual_num_el = rf.reduce_sum(actual_num_el, axis=num_el_reduce_dims)
for dim_ in num_el_reduce_dims:
total_num_el *= dim_.get_dim_value_tensor()
correction_factor_ = (rf.cast(total_num_el, source.dtype) / rf.cast(actual_num_el, source.dtype))
correction_factor__ = correction_factor_.copy_compatible_to_dims_raw(res_dims)
if (correction_factor is None):
correction_factor = correction_factor__
else:
correction_factor *= correction_factor__
else:
raise NotImplementedError(f'reduce_{mode} not implemented with masking on tensor {source!r}.')
for dim in axis:
if dim.need_masking():
mask = source.get_sequence_mask_broadcast(dim)
source.raw_tensor = torch.where(mask, source.raw_tensor, mask_value)
func = getattr(torch, mode)
if (not res_dims):
raw_result = func(source.raw_tensor)
elif (len(raw_dims) == 1):
raw_result = func(source.raw_tensor, dim=raw_dims[0])
if (mode in ['max', 'min']):
(raw_result, _) = raw_result
else:
raw_result = func(source.raw_tensor, dim=raw_dims)
if (correction_factor is not None):
raw_result *= correction_factor.to(raw_result.device)
res = Tensor(name=f'reduce_{mode}', raw_tensor=raw_result, dims=res_dims, dtype=TorchBackend.get_dtype_name_raw(raw_result), sparse_dim=(axis[0] if mode.startswith('arg') else source.sparse_dim))
return res
@staticmethod
def top_k(source: Tensor[torch.Tensor], *, axis: Union[(Dim, Sequence[Dim])], k: Union[(int, Tensor)], k_dim: Optional[Dim]=None, sorted: bool=True) -> Tuple[(Tensor, Union[(Tensor, Sequence[Tensor])], Dim)]:
'top_k'
if (not k_dim):
k_dim = Dim(k, name='top-k-dim')
axes = ([axis] if isinstance(axis, Dim) else axis)
if any((a.need_masking() for a in axes)):
mask_value = (torch.finfo(source.raw_tensor.dtype).min if source.raw_tensor.dtype.is_floating_point else torch.iinfo(source.raw_tensor.dtype).min)
source = source.copy()
for a in axes:
if a.need_masking():
source = rf.where(a.get_mask(dim_order=source.dims, device=source.device), source, mask_value)
if isinstance(axis, (list, tuple)):
source = source.copy_transpose(([d for d in source.dims if (d not in axis)] + list(axis)))
source_raw_flat = source.raw_tensor.flatten(start_dim=(source.batch_ndim - len(axis)))
(values_raw, indices_raw) = torch.topk(source_raw_flat, k=k_dim.get_dim_value(), dim=(- 1), largest=True, sorted=sorted)
values = source.copy_template_new_dim_tags(new_dim_tags=(source.dims[:(- len(axis))] + (k_dim,)), name='top_k_values')
if (source.feature_dim and (source.feature_dim in values.dims)):
values.feature_dim = source.feature_dim
values.raw_tensor = values_raw
indices_out = []
for (i, a) in reversed(list(enumerate(axis))):
assert isinstance(a, Dim)
indices_out_raw = (indices_raw % a.dimension)
indices_raw = (indices_raw // a.dimension)
indices = values.copy_template(name=f'top_k_indices_{(a.name or i)}')
indices.dtype = TorchBackend.get_dtype_name_raw(indices_out_raw)
indices.sparse_dim = a
indices.raw_tensor = indices_out_raw
indices_out.insert(0, indices)
return (values, indices_out, k_dim)
assert isinstance(axis, Dim)
axis_int = source.get_axis_from_description(axis, allow_int=False)
(values_raw, indices_raw) = torch.topk(source.raw_tensor, k=k_dim.get_dim_value(), dim=axis_int, largest=True, sorted=sorted)
values = source.copy_template_replace_dim_tag(axis=axis_int, new_dim_tag=k_dim, name='top_k_values')
values.raw_tensor = values_raw
indices = source.copy_template_replace_dim_tag(axis=axis_int, new_dim_tag=k_dim, name='top_k_indices')
indices.dtype = TorchBackend.get_dtype_name_raw(indices_raw)
indices.sparse_dim = axis
indices.raw_tensor = indices_raw
return (values, indices, k_dim)
@staticmethod
@contextlib.contextmanager
def random_journal_record() -> Generator[_random_journal.RandomJournal]:
'\n :return: the journal\n '
prev_journal = TorchBackend._random_journal
try:
TorchBackend._random_journal = _random_journal.RandomJournal()
(yield TorchBackend._random_journal)
finally:
TorchBackend._random_journal = prev_journal
_random_journal = None
@staticmethod
def random(*, dims: Sequence[Dim], dtype: str, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, distribution: str, mean: Optional[Union[(int, float, Tensor)]]=None, stddev: Optional[Union[(int, float, Tensor)]]=None, bound: Optional[Union[(int, float, Tensor)]]=None, minval: Optional[Union[(int, float, Tensor)]]=None, maxval: Optional[Union[(int, float, Tensor)]]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor[torch.Tensor]]=None) -> Tensor:
'\n random. See `rf.random` for details.\n '
shape = [d.get_dim_value() for d in dims]
dtype_ = TorchBackend.as_dtype_raw(dtype)
if (out is None):
out = Tensor(name=f'random_{distribution}', dims=dims, dtype=dtype, sparse_dim=sparse_dim, feature_dim=feature_dim)
out.raw_tensor = torch.empty(shape, dtype=dtype_, device=(device or rf.get_default_device()))
assert (explicit_state is None)
generator = None
assert isinstance(static, bool)
if static:
assert (seed is not None)
generator = torch.Generator()
generator.manual_seed(seed)
else:
assert (seed is None)
assert (auto_update_state is None)
if (distribution == 'uniform'):
assert ((mean is None) and (stddev is None))
if dtype_.is_floating_point:
if (minval is None):
minval = 0
if (maxval is None):
maxval = 1
if isinstance(minval, Tensor):
assert (minval.dims == ()), f'only scalar minval supported, got {minval}'
minval = minval.raw_tensor
if isinstance(maxval, Tensor):
assert (maxval.dims == ()), f'only scalar maxval supported, got {maxval}'
maxval = maxval.raw_tensor
with torch.no_grad():
out.raw_tensor.uniform_(minval, maxval, generator=generator)
else:
if (minval is None):
minval = 0
assert (maxval is not None), 'maxval must be specified for integer random uniform'
if isinstance(minval, Tensor):
assert (minval.dims == ()), f'only scalar minval supported, got {minval}'
minval = minval.raw_tensor
if isinstance(maxval, Tensor):
assert (maxval.dims == ()), f'only scalar maxval supported, got {maxval}'
maxval = maxval.raw_tensor
with torch.no_grad():
out.raw_tensor.random_(minval, maxval, generator=generator)
elif (distribution == 'normal'):
assert ((minval is None) and (maxval is None))
if (mean is None):
mean = 0
if (stddev is None):
stddev = 1
if isinstance(mean, Tensor):
assert (mean.dims == ()), f'only scalar mean supported, got {mean}'
mean = mean.raw_tensor
if isinstance(stddev, Tensor):
assert (stddev.dims == ()), f'only scalar stddev supported, got {stddev}'
stddev = stddev.raw_tensor
with torch.no_grad():
out.raw_tensor.normal_(mean, stddev, generator=generator)
elif (distribution == 'truncated_normal'):
if (mean is None):
mean = 0
if (stddev is None):
stddev = 1
if (minval is None):
minval = (mean - (2 * stddev))
if (maxval is None):
maxval = (mean + (2 * stddev))
from . import _rand
_rand.no_grad_trunc_normal_(out.raw_tensor, mean=mean, std=stddev, a=minval, b=maxval, generator=generator)
else:
raise NotImplementedError(f'random distribution {distribution} not implemented')
if TorchBackend._random_journal:
out_ = out.copy()
out_.raw_tensor = out_.raw_tensor.detach().cpu().numpy()
TorchBackend._random_journal.append(distribution=distribution, mean=mean, stddev=stddev, bound=bound, minval=minval, maxval=maxval, seed=seed, static=static, out=out_)
return out
@staticmethod
def masked_select(tensor: Tensor, *, mask: Tensor, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n :param tensor:\n :param mask:\n :param dims: the order of the dims defines the format. those dims should be exactly the dims of the mask.\n :param out_dim:\n :return: tensor where all dims in mask/dims are removed and replaced by a new dim.\n the new dim is also returned.\n if mask==True for all elements, the returned tensor would be simply the flattened input tensor.\n '
assert (mask.dtype == 'bool')
assert (set(mask.dims) == set(dims))
remaining_dims = [d for d in tensor.dims if (d not in mask.dims)]
tensor_templ_dims = (tuple(dims) + tuple(remaining_dims))
in_raw = tensor.copy_compatible_to_dims_raw(tensor_templ_dims)
mask_raw = mask.copy_compatible_to_dims_raw(tensor_templ_dims)
in_raw = in_raw.clone(memory_format=torch.contiguous_format)
if (mask_raw.device.type == 'meta'):
out_raw = in_raw.flatten()
else:
out_raw = torch.masked_select(in_raw, mask_raw)
remaining_shape = [d.get_dim_value() for d in remaining_dims]
remaining_num_elements = (numpy.prod(remaining_shape) if remaining_shape else 1)
assert ((out_raw.numel() % remaining_num_elements) == 0)
flattened_num_elements = (out_raw.numel() // remaining_num_elements)
out_raw = torch.reshape(out_raw, ([flattened_num_elements] + remaining_shape))
if (not out_dim):
out_dim = Dim(None, name='masked_select')
if (not out_dim.dyn_size_ext):
out_dim.dyn_size_ext = Tensor('masked_select_size', dims=(), dtype='int64')
if (out_dim.dyn_size_ext.raw_tensor is None):
out_dim.dyn_size_ext.raw_tensor = torch.tensor(flattened_num_elements, dtype=torch.int64)
out = Tensor('masked_select', dims=((out_dim,) + tuple(remaining_dims)), dtype=tensor.dtype, sparse_dim=tensor.sparse_dim, raw_tensor=out_raw)
return (out, out_dim)
@staticmethod
def masked_scatter(source: Tensor, *, mask: Tensor, dims: Sequence[Dim], in_dim: Dim) -> Tensor:
'masked scatter'
assert (mask.dtype == 'bool')
assert (set(mask.dims) == set(dims))
assert (in_dim in source.dims)
remaining_dims = [d for d in source.dims if ((d not in mask.dims) and (d != in_dim))]
source_templ_dims = ((in_dim,) + tuple(remaining_dims))
tensor_templ_dims = (tuple(dims) + tuple(remaining_dims))
source_raw = source.copy_compatible_to_dims_raw(source_templ_dims)
mask_raw = mask.copy_compatible_to_dims_raw(tensor_templ_dims)
out_shape = [d.get_dim_value() for d in tensor_templ_dims]
out_raw = torch.zeros(out_shape, dtype=source_raw.dtype, device=source_raw.device)
out_raw.masked_scatter_(mask_raw, source_raw)
return Tensor('masked_scatter', dims=tensor_templ_dims, dtype=source.dtype, sparse_dim=source.sparse_dim, raw_tensor=out_raw)
@staticmethod
def batch_norm(source: Tensor[torch.Tensor], *, in_dim: Union[(Dim, Sequence[Dim])], running_mean: Optional[Tensor], running_variance: Optional[Tensor], gamma: Optional[Tensor], beta: Optional[Tensor], epsilon: float, momentum: float, affine: bool, use_mask: bool) -> Tensor:
'batch norm'
if use_mask:
raise NotImplementedError('batch_norm with masking not implemented')
if ((running_mean is None) != (running_variance is None)):
raise ValueError('running_mean and running_variance must be both None or both not None')
assert isinstance(in_dim, Dim)
if affine:
if ((gamma is None) or (beta is None)):
raise ValueError('gamma and beta must be given if affine=True')
if (not (gamma.dims == beta.dims == (in_dim,))):
raise ValueError(f'gamma and beta must have shape [{in_dim}], got gamma {gamma} and beta {beta}')
if (running_mean is not None):
if (not (running_mean.dims == running_variance.dims == (in_dim,))):
raise ValueError(f'running_mean and running_variance must have shape [{in_dim}], got running_mean {running_mean} and running_variance {running_variance}')
feat_axis = source.get_axis_from_description(in_dim)
if (feat_axis == 0):
pre_dims = 1
else:
pre_dims = numpy.prod(source.raw_tensor.shape[:feat_axis])
src_raw = torch.reshape(source.raw_tensor, [pre_dims, in_dim.get_dim_value(), (- 1)])
out_raw = torch.nn.functional.batch_norm(src_raw, running_mean=(running_mean.raw_tensor if (running_mean is not None) else None), running_var=(running_variance.raw_tensor if (running_variance is not None) else None), weight=(gamma.raw_tensor if affine else None), bias=(beta.raw_tensor if affine else None), training=(rf.get_run_ctx().train_flag or (running_mean is None)), momentum=momentum, eps=epsilon)
out = source.copy_template()
out.raw_tensor = torch.reshape(out_raw, source.raw_tensor.shape)
out.feature_dim = in_dim
return out
@staticmethod
def conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, strides: Optional[Union[(int, Sequence[int])]]=None, dilation_rate: Optional[Union[(int, Sequence[int])]]=None, groups: Optional[int]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]:
'conv'
if (not out_spatial_dims):
out_spatial_dims = rf.make_conv_out_spatial_dims(in_spatial_dims=in_spatial_dims, filter_size=[d.dimension for d in filter_size], strides=(strides or 1), dilation_rate=(dilation_rate or 1), padding=padding)
filter_in_dim = (in_dim if ((not groups) or (groups == 1)) else (in_dim // groups))
filter_dims = ((out_dim, filter_in_dim) + tuple(filter_size))
filter = filter.copy_transpose(filter_dims)
batch_dims = [d for d in source.dims if (d not in ((in_dim,) + tuple(in_spatial_dims)))]
source = source.copy_transpose(((batch_dims + [in_dim]) + list(in_spatial_dims)))
if (len(batch_dims) == 1):
src_raw = source.raw_tensor
else:
src_raw = torch.reshape(source.raw_tensor, ([(- 1), in_dim.get_dim_value()] + [d.get_dim_value() for d in in_spatial_dims]))
use_striding = (strides and ((strides > 1) if isinstance(strides, int) else any(((s > 1) for s in strides))))
if ((padding == 'same') and (not use_striding) and all((((d.dimension % 2) == 1) for d in filter_size))):
if all(((filter_size[0].dimension == d.dimension) for d in filter_size)):
padding = ((filter_size[0].dimension - 1) // 2)
else:
padding = tuple(((d.dimension // 2) for d in filter_size))
if ((padding == 'same') and (use_striding or torch.onnx.is_in_onnx_export())):
padding = 0
pads = []
for (i, s) in reversed(list(enumerate(filter_size))):
if use_striding:
stride_ = (strides[i] if isinstance(strides, (list, tuple)) else strides)
else:
stride_ = 1
pad = ((s.dimension - 1) - ((src_raw.shape[(2 + i)] - 1) % stride_))
pad_left = (pad // 2)
pad_right = (pad - pad_left)
pads.extend([pad_left, pad_right])
src_raw = torch.nn.functional.pad(src_raw, pads)
if (padding == 'valid'):
padding = 0
if (len(filter_size) == 1):
out_raw = torch.nn.functional.conv1d(src_raw, weight=filter.raw_tensor, bias=(bias.raw_tensor if (bias is not None) else None), stride=(strides or 1), padding=padding, dilation=(dilation_rate or 1), groups=(groups or 1))
elif (len(filter_size) == 2):
out_raw = torch.nn.functional.conv2d(src_raw, weight=filter.raw_tensor, bias=(bias.raw_tensor if (bias is not None) else None), stride=(strides or 1), padding=padding, dilation=(dilation_rate or 1), groups=(groups or 1))
elif (len(filter_size) == 3):
out_raw = torch.nn.functional.conv3d(src_raw, weight=filter.raw_tensor, bias=(bias.raw_tensor if (bias is not None) else None), stride=(strides or 1), padding=padding, dilation=(dilation_rate or 1), groups=(groups or 1))
else:
raise ValueError(f'invalid number of filter dims {filter_size}, expected 1, 2, or 3')
out = Tensor('conv', dims=((batch_dims + [out_dim]) + list(out_spatial_dims)), dtype=TorchBackend.get_dtype_name_raw(out_raw))
if (len(batch_dims) == 1):
out.raw_tensor = out_raw
else:
out.raw_tensor = torch.reshape(out_raw, [d.get_dim_value() for d in out.dims])
out.feature_dim = out_dim
return (out, out_spatial_dims)
@staticmethod
def pool(source: Tensor, *, mode: str, pool_size: Sequence[int], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Sequence[int], in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]:
'pool'
if (out_spatial_dims is None):
out_spatial_dims = rf.make_conv_out_spatial_dims(in_spatial_dims=in_spatial_dims, filter_size=pool_size, strides=strides, dilation_rate=dilation_rate, padding=padding)
batch_dims = [d for d in source.dims if (d not in tuple(in_spatial_dims))]
source = source.copy_transpose((batch_dims + list(in_spatial_dims)))
src_raw = torch.reshape(source.raw_tensor, ([(- 1), (batch_dims[(- 1)].get_dim_value() if batch_dims else 1)] + [d.get_dim_value() for d in in_spatial_dims]))
assert (isinstance(strides, (list, tuple)) and (len(strides) == len(in_spatial_dims) == len(pool_size)))
if (padding.lower() == 'same'):
padding = []
for (i, s) in enumerate(pool_size):
pad = ((s - 1) - ((src_raw.shape[(2 + i)] - 1) % strides[i]))
padding.append((pad // 2))
ceil_mode = True
elif (padding.lower() == 'valid'):
padding = 0
ceil_mode = False
else:
raise ValueError(f'invalid padding {padding!r}')
func_name = f'{mode}_pool{len(in_spatial_dims)}d'
func = getattr(torch.nn.functional, func_name)
kwargs = {}
if (dilation_rate and any(((d != 1) for d in ([dilation_rate] if isinstance(dilation_rate, int) else dilation_rate)))):
assert (mode == 'max'), 'dilation_rate only supported for max_pool'
kwargs['dilation'] = dilation_rate
if (mode == 'avg'):
kwargs['count_include_pad'] = False
out_raw = func(src_raw, kernel_size=pool_size, stride=strides, ceil_mode=ceil_mode, padding=padding, **kwargs)
out = Tensor('pool', dims=(batch_dims + list(out_spatial_dims)), dtype=source.dtype)
out.raw_tensor = torch.reshape(out_raw, [d.get_dim_value() for d in out.dims])
if (source.feature_dim and (source.feature_dim in out.dims)):
out.feature_dim = source.feature_dim
return (out, out_spatial_dims)
@staticmethod
def stft(x: Tensor, *, in_spatial_dim: Dim, frame_step: int, frame_length: int, fft_length: int, window_use_frame_length: bool=True, align_window_left: bool=True, window_enforce_even: bool=True, out_spatial_dim: Dim, out_dim: Dim) -> Tensor:
'stft'
batch_dims = [d for d in x.dims if (d != in_spatial_dim)]
x = x.copy_transpose((batch_dims + [in_spatial_dim]))
x_raw = torch.reshape(x.raw_tensor, [(- 1), in_spatial_dim.get_dim_value()])
if ((frame_length < fft_length) and window_use_frame_length):
x_raw = torch.nn.functional.pad(x_raw, (0, (fft_length - frame_length)))
if window_enforce_even:
frame_length -= (frame_length % 2)
window_pt = torch.hann_window(frame_length, device=x_raw.device)
if (frame_length < fft_length):
if align_window_left:
window_pt = torch.nn.functional.pad(window_pt, (0, (fft_length - frame_length)))
else:
pad_left = ((fft_length - frame_length) // 2)
pad_right = ((fft_length - frame_length) - pad_left)
window_pt = torch.nn.functional.pad(window_pt, (pad_left, pad_right))
y_raw = torch.stft(x_raw, n_fft=fft_length, hop_length=frame_step, win_length=fft_length, window=window_pt, center=False, return_complex=True)
y = Tensor('stft', dims=(batch_dims + [out_dim, out_spatial_dim]), dtype=TorchBackend.get_dtype_name_raw(y_raw))
y.feature_dim = out_dim
y.raw_tensor = torch.reshape(y_raw, [d.get_dim_value() for d in y.dims])
return y
@staticmethod
def lstm(source: _TT, *, state_h: _TT, state_c: _TT, ff_weight: _TT, rec_weight: _TT, bias: Optional[_TT], spatial_dim: Dim, in_dim: Dim, out_dim: Dim) -> Tuple[(_TT, Tuple[(_TT, _TT)])]:
'\n Wraps the functional LSTM from PyTorch.\n\n :return: Tuple consisting of two elements: the result as a :class:`Tensor`\n and the new state as a :class:`State` (different from the previous one).\n '
squeeze_spatial_dim = False
if (spatial_dim == single_step_dim):
spatial_dim = Dim(1, name='dummy-spatial-dim-single-step')
source = source.copy_add_dim_by_tag(spatial_dim, unbroadcast=True, axis=0)
squeeze_spatial_dim = True
if (bias is None):
lstm_params = (ff_weight.raw_tensor, rec_weight.raw_tensor)
has_biases = False
else:
bias_raw = (bias.raw_tensor * 0.5)
lstm_params = (ff_weight.raw_tensor, rec_weight.raw_tensor, bias_raw, bias_raw)
has_biases = True
batch_dims = [d for d in source.dims if ((d != spatial_dim) and (d != in_dim))]
source = source.copy_transpose((([spatial_dim] + batch_dims) + [in_dim]))
state_h = state_h.copy_transpose((batch_dims + [out_dim]))
state_c = state_c.copy_transpose((batch_dims + [out_dim]))
source_raw = source.raw_tensor
state_h_raw = state_h.raw_tensor
state_c_raw = state_c.raw_tensor
batch_dim = (torch.prod(torch.tensor([d.get_dim_value() for d in batch_dims])) if batch_dims else 1)
if (len(batch_dims) != 1):
source_raw = torch.reshape(source_raw, (([spatial_dim.get_dim_value()] + [batch_dim]) + [in_dim.get_dim_value()]))
state_h_raw = torch.reshape(state_h_raw, [1, batch_dim, out_dim.get_dim_value()])
state_c_raw = torch.reshape(state_c_raw, [1, batch_dim, out_dim.get_dim_value()])
sizes = spatial_dim.get_size_tensor()
sizes = sizes.copy_compatible_to(Tensor('batch_dims', batch_dims, dtype=sizes.dtype), unbroadcast=True, check_sparse=False)
sizes_raw = torch.reshape(sizes.raw_tensor, [batch_dim])
source_packed = torch.nn.utils.rnn.pack_padded_sequence(source_raw, sizes_raw, enforce_sorted=False)
state_h_raw = state_h_raw.index_select(dim=1, index=source_packed.sorted_indices)
state_c_raw = state_c_raw.index_select(dim=1, index=source_packed.sorted_indices)
(out_raw, new_state_h_raw, new_state_c_raw) = torch.lstm(source_packed.data, source_packed.batch_sizes, (state_h_raw, state_c_raw), lstm_params, has_biases=has_biases, num_layers=1, dropout=0.0, train=rf.get_run_ctx().train_flag, bidirectional=False)
new_state_h_raw = new_state_h_raw.index_select(dim=1, index=source_packed.unsorted_indices)
new_state_c_raw = new_state_c_raw.index_select(dim=1, index=source_packed.unsorted_indices)
output_packed = torch.nn.utils.rnn.PackedSequence(out_raw, batch_sizes=source_packed.batch_sizes, sorted_indices=source_packed.sorted_indices, unsorted_indices=source_packed.unsorted_indices)
out_raw = torch.nn.utils.rnn.pad_packed_sequence(output_packed)[0]
if (len(batch_dims) != 1):
out_raw = torch.reshape(out_raw, (([spatial_dim.get_dim_value()] + [d.get_dim_value() for d in batch_dims]) + [out_dim.get_dim_value()]))
new_state_h_raw = torch.reshape(new_state_h_raw, [d.get_dim_value() for d in state_h.dims])
new_state_c_raw = torch.reshape(new_state_c_raw, [d.get_dim_value() for d in state_c.dims])
out = source.copy_template_replace_dim_tag(axis=(- 1), new_dim_tag=out_dim, name='lstm')
out.feature_dim = out_dim
out.raw_tensor = out_raw
new_state_h = state_h.copy_template()
new_state_h.raw_tensor = new_state_h_raw
new_state_h.feature_dim = out_dim
new_state_c = state_c.copy_template()
new_state_c.raw_tensor = new_state_c_raw
new_state_c.feature_dim = out_dim
if squeeze_spatial_dim:
out = out.copy_squeeze_axes([out.get_axis_from_description(spatial_dim)])
return (out, (new_state_h, new_state_c))
TensorArrayType = List[Tensor]
@staticmethod
def tensor_array_unstack(tensor: Tensor, *, axis: Dim) -> TensorArrayType:
'unstack'
axis_int = tensor.get_axis_from_description(axis)
out_tensors_raw = torch.unbind(tensor.raw_tensor, dim=axis_int)
out_tensor_template = tensor.copy_template().copy_template_excluding_axis(axis_int)
out_tensors = []
for out_tensor_raw in out_tensors_raw:
out_tensor = out_tensor_template.copy_template()
out_tensor.raw_tensor = out_tensor_raw
out_tensors.append(out_tensor)
return out_tensors
@staticmethod
def tensor_array_stack(tensor_array: TensorArrayType, *, axis: Dim, tensor_template: Tensor) -> Tensor:
'stack'
if tensor_array:
tensor_template = tensor_array[0].copy_template()
out_tensor = tensor_template.copy_add_dim_by_tag(axis, unbroadcast=True, axis=0)
if (not tensor_array):
return rf.zeros_like(out_tensor)
tensor_array_raw = [tensor.copy_transpose(tensor_template.dims).raw_tensor for tensor in tensor_array]
out_tensor_raw = torch.stack(tensor_array_raw, dim=0)
out_tensor.raw_tensor = out_tensor_raw
return out_tensor
|
def no_grad_trunc_normal_(tensor: torch.Tensor, mean, std, a, b, *, generator=None):
'\n Code copied and adopted from torch.nn.init._no_grad_trunc_normal_,\n to support the extra `generator` argument (https://github.com/pytorch/pytorch/issues/98200).\n\n Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n\n :param tensor:\n :param mean:\n :param std:\n :param a:\n :param b:\n :param generator:\n :return: tensor\n '
def _norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2)
with torch.no_grad():
_lower = _norm_cdf(((a - mean) / std))
_upper = _norm_cdf(((b - mean) / std))
tensor.uniform_(((2 * _lower) - 1), ((2 * _upper) - 1), generator=generator)
tensor.erfinv_()
tensor.mul_((std * math.sqrt(2.0)))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
|
def pt_module_to_rf_module(pt_module: torch.nn.Module) -> rf.Module:
'\n :param pt_module: torch module\n :return: RF module\n '
assert isinstance(pt_module, torch.nn.Module)
if isinstance(pt_module, _RFModuleAsPTModule):
return pt_module.rf_module
return _PTModuleAsRFModule(pt_module=pt_module)
|
def pt_module_to_wrapped_rf_module(pt_module: torch.nn.Module) -> Optional[rf.Module]:
'\n :param pt_module: torch module\n :return: RF module if the torch module is a wrapped RF module, or None otherwise\n '
assert isinstance(pt_module, torch.nn.Module)
if isinstance(pt_module, _RFModuleAsPTModule):
return pt_module.rf_module
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.