code
stringlengths
17
6.64M
def _infer_dim_tags_tuple_from_shape(shape, batch_dim_axis, time_dim_axis, feature_dim_axis, sparse, batch, size_placeholder, name, extern_data): '\n :param tuple[int|None]|list[int|None] shape: this is without batch-dim-axis\n :param int|None batch_dim_axis:\n :param int|None time_dim_axis:\n :param int|None|NotSpecified feature_dim_axis:\n :param bool sparse:\n :param BatchInfo|None batch:\n :param dict[int,tf.Tensor]|None size_placeholder: key is axis without batch-dim\n :param bool extern_data:\n :param str name:\n :return: dim tags tuple\n :rtype: tuple[Dim]\n ' assert isinstance(shape, (tuple, list)) shape = tuple(shape) batch_shape = _batch_shape_from_shape(shape, batch_dim_axis=batch_dim_axis) if (feature_dim_axis is NotSpecified): feature_dim_axis = _default_feature_dim_axis(batch_dim_axis=batch_dim_axis, time_dim_axis=time_dim_axis, batch_shape=batch_shape, sparse=sparse) elif (feature_dim_axis is not None): if (feature_dim_axis < 0): feature_dim_axis += len(batch_shape) assert (0 <= feature_dim_axis < len(batch_shape)) dim_tags = {} if ((batch_dim_axis is not None) and (batch_dim_axis not in dim_tags)): if ((batch is None) or batch.is_global_batch()): batch_dim_ = batch_dim else: batch_dim_ = Dim(kind=Dim.Types.Batch, description=('batch:%s' % name), batch=batch, dimension=None) dim_tags[batch_dim_axis] = batch_dim_ if size_placeholder: for (axis_wo_b, size) in size_placeholder.items(): axis = _get_axis_wb(axis_wo_b, batch_dim_axis=batch_dim_axis) if (axis in dim_tags): continue tag = Dim.get_tag_from_size_tensor(size) if tag: dim_tags[axis] = tag spatial_axes = [axis for axis in range(len(batch_shape)) if ((axis != batch_dim_axis) and ((axis != feature_dim_axis) or (axis == time_dim_axis) or (batch_shape[axis] is None)))] for axis in range(len(batch_shape)): tag = dim_tags.get(axis) axis_wo_b = _get_axis_wo_b(axis, batch_dim_axis=batch_dim_axis) dyn_size = (size_placeholder.get(axis_wo_b) if (size_placeholder and (axis_wo_b is not None)) else None) dim = batch_shape[axis] if (extern_data and (dim is None) and (dyn_size is None) and (axis != batch_dim_axis)): if (not tag): if (axis == time_dim_axis): tag_name = 'time' else: tag_name = ('spatial%i' % axis) tag = Dim(description=('%s:var:extern_data:%s' % (tag_name, name)), kind=Dim.Types.Spatial, batch=batch, auto_generated=True, dimension=None) tag.dyn_size_ext = _t.Tensor(name=('%s_dim%i_size' % (name, axis_wo_b)), dtype=_t.Tensor.size_dtype, shape=(), batch=batch) dim_tags[axis] = tag dyn_size = tag.dyn_size if tag: assert isinstance(tag, Dim) assert (tag.dimension == dim) if (dyn_size is not None): assert tag.is_same_size_tensor(dyn_size) continue if ((axis == feature_dim_axis) and (dyn_size is None) and (axis != time_dim_axis)): tag = Dim(kind=Dim.Types.Feature, dimension=dim, description=('feature:%s' % name), batch=(batch if (dim is None) else None), undefined=(dim is None), auto_generated=True) else: assert (axis in spatial_axes) description = ('time' if (axis == time_dim_axis) else ('spatial%i' % spatial_axes.index(axis))) if (dyn_size is not None): description += (':var:%r' % dyn_size.name) elif (dim is None): description += ':var-unk' else: description += (':static%i' % dim) description += (':%s' % name) tag = Dim(kind=Dim.Types.Spatial, description=description, dimension=dim, dyn_size=dyn_size, batch=(batch if (dim is None) else None), undefined=((dim is None) and (dyn_size is None)), auto_generated=True) if ((dim is None) and (tag.dyn_size_ext is None)): tag.dyn_size_ext = _t.Tensor(name=('%s_dim%i_size' % (name, axis_wo_b)), dtype=_t.Tensor.size_dtype, shape=(), batch=batch) if (dyn_size is not None): tag.dyn_size_ext.placeholder = dyn_size dim_tags[axis] = tag assert (sorted(dim_tags.keys()) == list(range(len(batch_shape)))) return tuple((dim_tags[axis] for axis in range(len(batch_shape))))
def _auto_create_size_placeholders_on_dim_tags(name, dim_tags): '\n :param str name:\n :param tuple[Dim] dim_tags:\n ' batch_dim_axis = _batch_dim_axis_from_dim_tags_tuple(dim_tags) batch_dim_ = (dim_tags[batch_dim_axis] if (batch_dim_axis is not None) else None) if batch_dim_: batch_dim_._validate_in_current_graph() for (axis, tag) in enumerate(dim_tags): tag._validate_in_current_graph() if tag.is_batch_dim(): continue if (not tag.is_dynamic()): continue if (tag.dyn_size is not None): continue axis_wo_b = _get_axis_wo_b(axis, batch_dim_axis=batch_dim_axis) _create_size_placeholder(name=name, axis_wo_b=axis_wo_b, tag=tag, batch_dim=batch_dim_)
def _get_axis_wo_b(axis_wb, batch_dim_axis, batch_ndim=None): '\n :param int axis_wb: counted with batch-dim\n :param int|None batch_dim_axis:\n :param int|None batch_ndim: only used for axis_wb < 0. might be unknown (None)\n :return: axis counted without batch-dim\n :rtype: int|None\n ' if (axis_wb < 0): assert (batch_ndim is not None) assert ((axis_wb + batch_ndim) >= 0) axis_wb += batch_ndim assert (0 <= axis_wb < batch_ndim) if (batch_dim_axis is None): return axis_wb if (axis_wb == batch_dim_axis): return None if (axis_wb < batch_dim_axis): return axis_wb return (axis_wb - 1)
def _get_axis_wb(axis_wo_b, batch_dim_axis): '\n :param int axis_wo_b: counted without batch-dim\n :param int|None batch_dim_axis:\n :return: axis counted with batch-dim\n :rtype: int\n ' if (batch_dim_axis is None): return axis_wo_b if (axis_wo_b >= batch_dim_axis): return (axis_wo_b + 1) return axis_wo_b
def _infer_default_shape_and_time(batch_dim_axis, time_dim_axis, feature_dim_axis, sparse, dim): '\n This is the logic to infer some sensible/default shape when it is not specified.\n As this is somewhat adhoc, this is not recommended to be used anymore.\n\n :param int|None batch_dim_axis:\n :param int|None time_dim_axis:\n :param int|None|NotSpecified feature_dim_axis:\n :param bool sparse:\n :param int|None dim:\n :return: shape (without batch dim), time_dim_axis\n :rtype: (tuple[int|None],int|None)\n ' if (time_dim_axis is not None): assert (time_dim_axis != batch_dim_axis) shape = ((None,) * (_get_axis_wo_b(time_dim_axis, batch_dim_axis=batch_dim_axis) + 1)) else: shape = () if ((not sparse) and (feature_dim_axis is not None)): assert (dim is not NotSpecified), 'no shape specified, not sparse, feature_dim_axis existing -> need dim' if ((feature_dim_axis is NotSpecified) or (feature_dim_axis == (- 1))): shape = (shape + (dim,)) else: assert (0 <= feature_dim_axis != batch_dim_axis) feature_dim_axis_wo_batch = _get_axis_wo_b(feature_dim_axis, batch_dim_axis=batch_dim_axis) if (feature_dim_axis_wo_batch < len(shape)): shape = ((shape[:(- feature_dim_axis_wo_batch)] + (dim,)) + shape[(feature_dim_axis_wo_batch + 1):]) else: shape = ((shape + ((None,) * (feature_dim_axis_wo_batch - len(shape)))) + (dim,)) assert (len(shape) == (feature_dim_axis_wo_batch + 1)) return (shape, time_dim_axis)
def _default_time_dim_axis(batch_dim_axis, shape): '\n :param int|None batch_dim_axis:\n :param Sequence[int|None] shape: without batch-dim\n :return: time dim axis, counted with batch-dim\n :rtype: int|None\n ' if (batch_dim_axis is None): time_dim_axis = None else: taken_axes = {batch_dim_axis} batch_shape = _batch_shape_from_shape(shape, batch_dim_axis=batch_dim_axis) for (axis, _dim) in enumerate(batch_shape): if (_dim is not None): taken_axes.add(axis) available_axes = [i for i in range(len(batch_shape)) if (i not in taken_axes)] if available_axes: time_dim_axis = available_axes[0] else: time_dim_axis = None return time_dim_axis
def _default_time_dim_axis_no_shape(batch_dim_axis, feature_dim_axis): '\n :param int|None batch_dim_axis:\n :param int|None|NotSpecified feature_dim_axis:\n :return: time dim axis, counted with batch-dim\n :rtype: int|None\n ' if (batch_dim_axis is None): time_dim_axis = None else: taken_axes = {batch_dim_axis} if isinstance(feature_dim_axis, int): taken_axes.add(feature_dim_axis) time_dim_axis = [i for i in range((max(taken_axes) + 2)) if (i not in taken_axes)][0] return time_dim_axis
def _default_time_dim_axis_dim_tags(dim_tags): '\n :param list[Dim]|tuple[Dim] dim_tags:\n :return: time dim axis, counted with batch-dim\n :rtype: int|None\n ' dim_tags_dyn_spatial = [i for (i, tag) in enumerate(dim_tags) if (tag.is_spatial_dim() and (tag.dimension is None))] if dim_tags_dyn_spatial: return dim_tags_dyn_spatial[0] dim_tags_dyn = [i for (i, tag) in enumerate(dim_tags) if ((not tag.is_batch_dim()) and (tag.dimension is None))] if dim_tags_dyn: return dim_tags_dyn[0] return None
def _default_feature_dim_axis(batch_dim_axis, time_dim_axis, batch_shape, sparse): '\n :param int|None batch_dim_axis:\n :param int|None time_dim_axis:\n :param tuple[int|None] batch_shape:\n :param bool sparse:\n :return: feature dim axis, counted with batch-dim\n :rtype: int|None\n ' if sparse: return None batch_ndim = len(batch_shape) ndim = (batch_ndim if (batch_dim_axis is None) else (batch_ndim - 1)) if (ndim == 0): return None axes = [i for i in range(batch_ndim) if (i not in [batch_dim_axis, time_dim_axis])] if (not axes): return None static_axes = [i for i in axes if (batch_shape[i] is not None)] if static_axes: return static_axes[(- 1)] return axes[(- 1)]
class _TensorMixinBase(): name: str _dims: Tuple[(Dim, ...)] dtype: str sparse_dim: Optional[Dim] _feature_dim_axis: Optional[Union[(int, NotSpecified)]] _raw_tensor: Optional[_t.RawTensorType] raw_tensor: Optional[_t.RawTensorType] version: int _extra: Optional[_TensorExtra]
class _TensorOpOverloadsMixin(_TensorMixinBase): def __eq__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Union[(Tensor, bool)]: if (self.raw_tensor is None): return False import returnn.frontend as rf valid_types = ((rf.Tensor, self._raw_backend.RawTensorType) + tuple(rf.RawTensorTypes.__args__)) if isinstance(other, valid_types): return _rf().compare(self, '==', other) return False def __ne__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().compare(self, '!=', other) def __lt__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().compare(self, '<', other) def __le__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().compare(self, '<=', other) def __gt__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().compare(self, '>', other) def __ge__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().compare(self, '>=', other) def __add__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '+', other) def __radd__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '+', self) def __sub__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '-', other) def __rsub__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '-', self) def __mul__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '*', other) def __rmul__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '*', self) def __truediv__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '/', other) def __rtruediv__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '/', self) def __floordiv__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '//', other) def __rfloordiv__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '//', self) def __mod__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '%', other) def __rmod__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '%', self) def __pow__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, '**', other) def __rpow__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, '**', self) def __and__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, 'logical_and', other) def __rand__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, 'logical_and', self) def __or__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(self, 'logical_or', other) def __ror__(self: Tensor, other: Union[(_rf_types.RawTensorTypes, Tensor)]) -> Tensor: return _rf().combine(other, 'logical_or', self) def __neg__(self: Tensor): return _rf().neg(self) def __invert__(self: Tensor): return _rf().logical_not(self) def __abs__(self: Tensor): return _rf().abs(self) def __ceil__(self: Tensor): return _rf().ceil(self) def __floor__(self: Tensor): return _rf().floor(self)
def _rf(): import returnn.frontend as rf return rf
class ControlFlowContext(): '\n This represents the current control flow context, e.g. whether this runs in a loop or a conditional branch.\n\n In case of TF,\n this is a simple wrapper around the TF ControlFlowContext which comes from tf.while_loop or tf.cond.\n\n We have this wrapper to refer to a context which might not exist yet (e.g. at template construction time).\n Also, we might want to store additional information, such the spatial dim tag of the loop.\n ' class Types(): '\n Possible types of context.\n ' Loop = 'loop' CondTrue = 'cond-true' CondFalse = 'cond-false' def __init__(self, *, kind: str, identifier: str, outer_ctx: Optional[ControlFlowContext]=None): '\n :param kind: from ControlFlowContext.Types\n :param outer_ctx:\n ' self.kind = kind self.identifier = identifier self._outer_ctx = outer_ctx self._tf_control_flow_ctx = None self._loop_spatial_dim = None def __repr__(self): return ('ControlFlowContext{%s}' % self.repr_inner()) def repr_inner(self): '\n :rtype: str\n ' return '/'.join((ctx._repr_single() for ctx in self._abs_ctx_stack())) def _repr_single(self): '\n :rtype: str\n ' s = self.kind if (self.is_loop() and self.loop_spatial_dim): try: with util.guard_infinite_recursion(ControlFlowContext._repr_single, self): s += ('(%s)' % self.loop_spatial_dim.short_repr()) except util.InfiniteRecursionDetected as exc: s += ('(%s for loop_spatial_dim?)' % exc) return s def _abs_ctx_stack(self): '\n :rtype: list[ControlFlowContext]\n :return: chain of ctx, last is self\n ' chain = [] ctx = self while ctx: chain.append(ctx) ctx = ctx.outer_ctx chain.reverse() return chain @classmethod def abs_ctx_stack(cls, ctx): '\n :param ControlFlowContext|None ctx:\n :rtype: list[ControlFlowContext]\n ' if ctx: return ctx._abs_ctx_stack() return [] @classmethod def abs_ctx_stack_with_root(cls, ctx): '\n :param ControlFlowContext|None ctx:\n :rtype: list[ControlFlowContext|None]\n :return: chain of ctx, last is self, first is None\n ' ls = [None] if ctx: ls += ctx._abs_ctx_stack() return ls @classmethod def is_parent_or_same(cls, parent, child): '\n :param ControlFlowContext|None parent:\n :param ControlFlowContext|None child:\n :rtype: bool\n ' if (parent == child): return True if (not parent): return True if (not child): return False while child: if (child == parent): return True child = child.outer_ctx return False @classmethod def collect_parent_dims(cls, ctx): '\n :param ControlFlowContext|None ctx:\n :rtype: list[Dim]\n ' dims = [] for ctx_ in ControlFlowContext.abs_ctx_stack(ctx): if (ctx_.is_loop() and ctx_.loop_spatial_dim): dims.append(ctx_.loop_spatial_dim) return dims def is_loop(self): '\n :rtype: bool\n ' return (self.kind == self.Types.Loop) def is_cond(self): '\n :rtype: bool\n ' return (self.kind in {self.Types.CondTrue, self.Types.CondFalse}) @property def outer_ctx(self): '\n :rtype: ControlFlowContext|None\n ' return self._outer_ctx @property def tf_control_flow_ctx(self): '\n :rtype: tensorflow.python.ops.control_flow_ops.ControlFlowContext|None\n ' return self._tf_control_flow_ctx @tf_control_flow_ctx.setter def tf_control_flow_ctx(self, ctx): '\n :param tensorflow.python.ops.control_flow_ops.ControlFlowContext ctx:\n ' if self.is_loop(): assert ctx.IsWhileContext() if self.is_cond(): assert ctx.IsCondContext() self._tf_control_flow_ctx = ctx @property def loop_spatial_dim(self): '\n :rtype: Dim|None\n ' assert self.is_loop() return self._loop_spatial_dim @loop_spatial_dim.setter def loop_spatial_dim(self, dim): '\n :param Dim dim:\n ' assert self.is_loop() self._loop_spatial_dim = dim
class Dim(_DimMixin): '\n Represents a dimension of a tensor.\n This potentially comes with further information such as individual sequence lengths.\n See the module docstring.\n ' Types = DimTypes __slots__ = ('name', 'capacity', 'size', 'dyn_size_ext', '_dyn_size_max_value', '_extra') name: Optional[str] capacity: Optional[int] size: Optional[int] dyn_size_ext: Optional[_t.Tensor] _dyn_size_max_value: Optional[_t.Tensor] _extra: Optional[_DimExtra] def __init__(self, dimension: Optional[Union[(int, _t.Tensor)]], *, name: Optional[str]=None, capacity: Optional[int]=None, dyn_size_ext: Optional[_t.Tensor]=None, description: Optional[str]=None, **kwargs): if (dimension is None): self.capacity = capacity self.size = None self.dyn_size_ext = (dyn_size_ext.copy() if dyn_size_ext else None) elif isinstance(dimension, int): self.capacity = (capacity or dimension) self.size = dimension self.dyn_size_ext = None elif isinstance(dimension, _t.Tensor): self.capacity = capacity self.size = None self.dyn_size_ext = dimension.copy() else: raise TypeError(f'unexpected dimension type: {type(dimension)}') if ((not name) and (not description) and self.dyn_size_ext): name = self.dyn_size_ext.name self.name = (name or description) self._dyn_size_max_value = None self._extra = None if kwargs: self._handle_extra_kwargs(**kwargs) def __repr__(self): return ('Dim{%s}' % self.short_repr())
class VerifyOutShapeException(Exception): '\n Exception via :func:`Tensor.verify_out_shape`.\n '
class MarkedDim(): '\n Base class for marked dims, e.g. optional dims, or implicit (virtual) dims.\n ' def __init__(self, tag: _d.Dim): '\n :param tag:\n ' self.tag = tag def __repr__(self): return ('%s(%r)' % (self.__class__.__name__, self.tag)) def _eq_tuple(self): return (self.__class__, self.tag) def __hash__(self): return hash(self._eq_tuple()) def __eq__(self, other): if isinstance(other, MarkedDim): return (self._eq_tuple() == other._eq_tuple()) return False def __ne__(self, other): return (not (self == other)) def __lt__(self, other): '\n See :func:`Dim.__lt__`.\n ' if (not isinstance(other, (_d.Dim, MarkedDim))): raise TypeError(('cannot compare %r with %r' % (self, other))) if (self == other): return False return (_dim_extra.dim_cmp_value(self) < _dim_extra.dim_cmp_value(other)) def __gt__(self, other): return (other < self) def __ge__(self, other): return (not (self < other)) def __le__(self, other): return (not (self > other))
class ImplicitDim(MarkedDim): '\n Represents an implicit dim (dim tag) in :class:`Data`.\n https://github.com/rwth-i6/returnn/issues/706\n '
class ImplicitSparseDim(ImplicitDim): '\n Represents an implicit dim via Data.sparse_dim.\n '
class ImplicitDynSizeDim(ImplicitDim): '\n Represents an implicit dim via dynamic dim sizes.\n https://github.com/rwth-i6/returnn/issues/706\n (For example via :class:`CumConcatLayer`.)\n '
class OptionalDim(MarkedDim): '\n Represents a dim which might exist or not.\n '
class Tensor(_TensorMixin, _TensorOpOverloadsMixin, Generic[RawTensorType]): '\n Represents a tensor, in a frame-agnostic way. See the module docstring.\n ' size_dtype = 'int32' __slots__ = ('name', '_dims', 'dtype', 'sparse_dim', '_raw_tensor', '_feature_dim_axis', 'version', '_extra') name: str _dims: Tuple[(Dim, ...)] dtype: str sparse_dim: Optional[Dim] _raw_tensor: Optional[RawTensorType] _feature_dim_axis: Optional[Union[(int, NotSpecified)]] version: int _extra: Optional[_TensorExtra] def __init__(self, name: str, dims: Optional[Sequence[Dim]]=None, dtype: Optional[str]=None, *, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, feature_dim_axis: Optional[Union[(int, NotSpecified)]]=NotSpecified, raw_tensor: Optional[RawTensorType]=None, version: Optional[int]=None, **kwargs): '\n :param name:\n :param dims: the shape, where each dimension is described by a :class:`Dim`.\n :param dtype: e.g. "float32" or "int64"\n :param sparse_dim: when the values are indices into some dimension, this is the dimension.\n You can also interpret the whole tensor as a sparse representation of a dense one-hot tensor,\n where this sparse_dim becomes the additional dense dimension.\n :param raw_tensor: the raw tensor, e.g. numpy array, TF tensor, or PyTorch tensor\n :param version: behavior version just for Tensor. If not specified, and `dims` is None (old code),\n it uses version 1.\n - v1: the old behavior of Data. Specifically, time_dim_axis and feature_dim_axis are used\n and automatically inferred when not specified.\n - v2: time_dim_axis, feature_dim_axis are None by default.\n :param kwargs: see :func:`_handle_extra_kwargs`, :func:`infer_dim_tags`\n ' if (('sparse' in kwargs) and (sparse_dim is None)): sparse_dim = _tensor_extra.infer_sparse_dim(name=name, sparse_dim=sparse_dim, **kwargs) if (dims is not None): assert (('shape' not in kwargs) and ('dim_tags' not in kwargs)) if (version is None): version = 2 else: dims = _tensor_extra.infer_dim_tags(name=name, sparse_dim=sparse_dim, feature_dim_axis=feature_dim_axis, **kwargs) if (version is None): version = 1 if (dtype is None): if (version == 1): dtype = ('int32' if sparse_dim else 'float32') else: raise ValueError('Tensor dtype needs to be specified') self.name = name self._dims = tuple(dims) self.dtype = dtype self.sparse_dim = sparse_dim self._raw_tensor = None self.version = version self._extra = None if (feature_dim_axis is NotSpecified): if (version >= 2): feature_dim_axis = None elif (feature_dim_axis is None): pass elif isinstance(feature_dim_axis, int): assert (not self.sparse_dim), 'cannot have feature_dim_axis when sparse' if (feature_dim_axis < 0): feature_dim_axis += self.batch_ndim assert (0 <= feature_dim_axis < self.batch_ndim) else: raise TypeError(f'unexpected feature_dim_axis type {type(feature_dim_axis)}') self._feature_dim_axis = feature_dim_axis if feature_dim: self.feature_dim = feature_dim if kwargs: self._handle_extra_kwargs(**kwargs) if (raw_tensor is not None): self.raw_tensor = raw_tensor @property def dims(self) -> Tuple[(Dim, ...)]: '\n :return: dim tags\n ' return self._dims @property def dims_set(self) -> Set[Dim]: '\n :return: set of dim tags. in all high-level code, the order of dims is irrelevant.\n The order must not play a role\n (RETURNN principles: https://github.com/rwth-i6/returnn/wiki/RETURNN-principles).\n Note that we do not include any implicit dims here.\n Also see :func:`verify_out_shape` and https://github.com/rwth-i6/returnn/issues/1153.\n ' return set(self._dims) @property def raw_tensor(self) -> Optional[RawTensorType]: '\n :return: raw tensor\n ' return self._raw_tensor @raw_tensor.setter def raw_tensor(self, value: Optional[RawTensorType]): '\n :param value:\n ' if (value is not None): import returnn.frontend._backend as _backend_api backend = _backend_api.get_backend_by_raw_tensor_type(type(value)) raw_shape = backend.get_known_shape_raw(value) assert (len(raw_shape) == len(self._dims)), f'Mismatching shape ndim: Raw tensor {raw_shape} vs Tensor {self}' for (i, dim) in enumerate(self._dims): if (dim.dimension is None): continue if (raw_shape[i] != dim.dimension): raise Exception((f'''Mismatching shape: Raw tensor {raw_shape} vs Tensor {self}; ''' + backend.format_graph_output(value, max_depth=3))) if (not backend.executing_eagerly()): backend.set_known_shape_raw(value, self.batch_shape) assert (backend.get_dtype_name_raw(value) == self.dtype), f'{self} dtype {self.dtype} does not match raw tensor dtype {backend.get_dtype_name_raw(value)}' self._raw_tensor = value @property def feature_dim(self) -> Optional[Dim]: '\n :return: self.dims[self.feature_dim_axis] or None.\n See https://github.com/rwth-i6/returnn/issues/1273 for some discussion.\n ' if (self._feature_dim_axis is None): return None if isinstance(self._feature_dim_axis, int): return self._dims[self._feature_dim_axis] if (self.feature_dim_axis is None): return None return self._dims[self.feature_dim_axis] @feature_dim.setter def feature_dim(self, value: Optional[Dim]): '\n :param value:\n ' if (value is None): self._feature_dim_axis = None return assert (not self.sparse_dim), 'cannot have feature_dim_axis when sparse' self._feature_dim_axis = self.get_axis_from_description(value, allow_int=False) @property def device(self) -> Optional[str]: '\n :return: device\n ' if (self.raw_tensor is None): return None return self._raw_backend.get_device(self)
class TensorDict(): 'dict of tensors' def __init__(self, data: Optional[_DataStrictT]=None): self.data = {} if data: self.update(data) def __repr__(self): return f'{self.__class__.__name__}({self.data})' def update(self, data: Union[(_DataAutoConvertT, TensorDict)], *, auto_convert: bool=False): 'update' if isinstance(data, TensorDict): for (key, value) in data.data.items(): self.data[key] = value.copy() elif isinstance(data, dict): for (key, value) in data.items(): if auto_convert: value = _convert_to_tensor(value, name=key) else: assert isinstance(value, Tensor) self.data[key] = value.copy() elif isinstance(data, (list, tuple)): for value in data: if auto_convert: value = _convert_to_tensor(value) else: assert isinstance(value, Tensor) self.data[value.name] = value.copy() else: raise TypeError(f'invalid `data` type: {type(data)}') def __getitem__(self, item: str) -> Tensor: return self.data[item] def reset_content(self): "reset content, i.e. all raw_tensor's to None, including dyn_size_ext of dim tags" dims = [] dims_set = set() for value in self.data.values(): value.reset() for dim in value.dims: if (dim not in dims_set): dims_set.add(dim) dims.append(dim) for dim in dims: dim.reset_batch_and_raw() def copy_template(self) -> TensorDict: 'copy template' return TensorDict({k: v.copy_template() for (k, v) in self.data.items()}) def as_raw_tensor_dict(self, *, include_const_sizes: bool=False, include_scalar_dyn_sizes: bool=True, exclude_duplicate_dims: bool=False, expected_value_type: Union[(Type, Sequence[Type])]=object) -> Dict[(str, Any)]: '\n :return: dict of raw tensors, including any sequence lengths / dynamic sizes\n ' assert (not (include_const_sizes and (not include_scalar_dyn_sizes))) visited_dims = set() out = {} for (key, value) in self.data.items(): assert (key not in out) assert isinstance(value.raw_tensor, expected_value_type), f'key {key} {value}: unexpected {type(value.raw_tensor)}, expected {expected_value_type}' out[key] = value.raw_tensor for (i, dim) in enumerate(value.dims): if (exclude_duplicate_dims and (dim in visited_dims)): continue key_ = f'{key}:size{i}' assert (key_ not in out) if (dim.is_batch_dim() and ((not dim.dyn_size_ext) or (dim.dyn_size_ext.raw_tensor is None))): if include_scalar_dyn_sizes: dim_value = dim.get_dim_value() assert isinstance(dim_value, expected_value_type), f'key {key_} {dim}: unexpected {type(dim_value)}, expected {expected_value_type}' out[key_] = dim_value elif dim.dyn_size_ext: if (include_scalar_dyn_sizes or dim.dyn_size_ext.dims): assert isinstance(dim.dyn_size_ext.raw_tensor, expected_value_type), f'key {key_} {dim} {dim.dyn_size_ext}: unexpected {type(dim.dyn_size_ext.raw_tensor)}, expected {expected_value_type}' out[key_] = dim.dyn_size_ext.raw_tensor elif (dim.size is not None): if (include_scalar_dyn_sizes and include_const_sizes): assert isinstance(dim.size, expected_value_type), f'key {key_} {dim}: unexpected {type(dim.size)}, expected {expected_value_type}' out[key_] = dim.size else: raise Exception(f'cannot handle dim: {dim}') visited_dims.add(dim) return out def assign_from_raw_tensor_dict_(self, raw_tensor_dict: Dict[(str, Any)], *, with_scalar_dyn_sizes: bool=True, duplicate_dims_are_excluded: bool=False): '\n :param raw_tensor_dict: dict of raw tensors, including any sequence lengths / dynamic sizes\n :param with_scalar_dyn_sizes: `include_scalar_dyn_sizes` was used in :func:`as_raw_tensor_dict`\n :param duplicate_dims_are_excluded: `exclude_duplicate_dims` was used in :func:`as_raw_tensor_dict`\n ' visited_dims = set() for (key, value) in self.data.items(): assert (key in raw_tensor_dict) value.raw_tensor = raw_tensor_dict[key] for (i, dim) in enumerate(value.dims): dim: Dim if (duplicate_dims_are_excluded and (dim in visited_dims)): continue key_ = f'{key}:size{i}' dim.reset_raw(only_self=True) if (dim.is_batch_dim() and (not dim.dyn_size_ext)): dim.dyn_size_ext = Tensor('batch', [], dtype='int32') if dim.dyn_size_ext: if ((not with_scalar_dyn_sizes) and (not dim.dyn_size_ext.dims)): pass else: assert (key_ in raw_tensor_dict), f'keys: f{raw_tensor_dict.keys()}' dim.dyn_size_ext.raw_tensor = raw_tensor_dict[key_] elif (key_ in raw_tensor_dict): assert (dim.size == raw_tensor_dict[key_]) visited_dims.add(dim)
def _convert_to_tensor(opts: _TensorT, *, name: Optional[str]=None) -> Tensor: '\n :param opts:\n ' if isinstance(opts, Tensor): return opts assert isinstance(opts, dict) opts = opts.copy() if name: opts['name'] = name else: assert ('name' in opts), f'missing `name` in Tensor {opts!r}' return Tensor(**opts)
def tensor_dict_fill_random_numpy_(tensor_dict: TensorDict, *, rnd: Union[(int, numpy.random.RandomState)]=42, dyn_dim_max_sizes: Optional[Dict[(Dim, int)]]=None, dyn_dim_min_sizes: Optional[Dict[(Dim, int)]]=None): '\n Random fill with NumPy arrays.\n\n :param tensor_dict:\n :param rnd:\n :param dyn_dim_max_sizes: you can specify max sizes for dim tags with dynamic sizes.\n The fill random code makes sure that there is at least one entry where we reach the max size,\n so that the dim value will be the max size.\n :param dyn_dim_min_sizes:\n ' if (not isinstance(rnd, numpy.random.RandomState)): rnd = numpy.random.RandomState(rnd) for v in tensor_dict.data.values(): tensor_fill_random_numpy_(v, rnd=rnd, dyn_dim_max_sizes=dyn_dim_max_sizes, dyn_dim_min_sizes=dyn_dim_min_sizes)
def tensor_fill_random_numpy_(x: Tensor, *, min_val: int=0, max_val: Optional[int]=None, rnd: numpy.random.RandomState, dyn_dim_max_sizes: Optional[Dict[(Dim, int)]]=None, dyn_dim_min_sizes: Optional[Dict[(Dim, int)]]=None) -> bool: 'fill. return whether sth was filled' if (dyn_dim_max_sizes is None): dyn_dim_max_sizes = {} if (dyn_dim_min_sizes is None): dyn_dim_min_sizes = {} filled = False while True: have_unfilled = False filled_this_round = False for dim in x.dims: if (dim.is_batch_dim() and (not dim.dyn_size_ext)): dim.dyn_size_ext = Tensor('batch', [], dtype='int32') if (dim.is_dynamic() and (not dim.dyn_size_ext)): dim.dyn_size_ext = Tensor((dim.name or 'time'), dims=[batch_dim], dtype='int32') if (not dim.dyn_size_ext): continue if tensor_fill_random_numpy_(dim.dyn_size_ext, min_val=dyn_dim_min_sizes.get(dim, 2), max_val=dyn_dim_max_sizes.get(dim, None), rnd=rnd, dyn_dim_max_sizes=dyn_dim_max_sizes): if (dim in dyn_dim_max_sizes): i = rnd.randint(0, dim.dyn_size_ext.raw_tensor.size) dim.dyn_size_ext.raw_tensor.flat[i] = dyn_dim_max_sizes[dim] if (dim in dyn_dim_min_sizes): j = rnd.randint(0, (dim.dyn_size_ext.raw_tensor.size - 1)) if (j >= i): j += 1 dim.dyn_size_ext.raw_tensor.flat[j] = dyn_dim_min_sizes[dim] elif (dim in dyn_dim_min_sizes): raise Exception(f'also define {dim} in dyn_dim_max_sizes, not just dyn_dim_min_sizes') filled = True filled_this_round = True if (dim.dyn_size_ext.raw_tensor is None): have_unfilled = True elif (not isinstance(dim.dyn_size_ext.raw_tensor, numpy.ndarray)): have_unfilled = True if have_unfilled: assert filled_this_round, f'should have filled something, {x}' if (not have_unfilled): break if (x.raw_tensor is not None): if (not isinstance(x.raw_tensor, numpy.ndarray)): x.raw_tensor = None if (x.raw_tensor is None): shape = [d.get_dim_value() for d in x.dims] if x.dtype.startswith('int'): if (max_val is None): max_val = rnd.randint(5, 20) if (x.sparse_dim and (x.sparse_dim.dimension is not None)): max_val = x.sparse_dim.dimension x.raw_tensor = rnd.randint(min_val, max_val, size=shape, dtype=x.dtype) elif (x.dtype == 'bool'): x.raw_tensor = rnd.randint(0, 2, size=shape, dtype=x.dtype) elif x.dtype.startswith('float'): x.raw_tensor = rnd.normal(0.0, 1.0, size=shape).astype(x.dtype) elif x.dtype.startswith('complex'): real = rnd.normal(0.0, 1.0, size=shape) imag = rnd.normal(0.0, 1.0, size=shape) x.raw_tensor = (real + (1j * imag)).astype(x.dtype) else: raise NotImplementedError(f'not implemented for {x} dtype {x.dtype}') filled = True assert isinstance(x.raw_tensor, numpy.ndarray) return filled
def executing_eagerly(): '\n :return: True if we are currently executing eagerly.\n :rtype: bool\n ' if hasattr(tf, 'executing_eagerly'): return tf.executing_eagerly() return False
class MPIClusterResolver(tf.distribute.cluster_resolver.ClusterResolver): '\n ClusterResolver for MPI.\n Distributed TF is in general totally independent of MPI.\n We only use MPI here to figure out the ClusterSpec.\n After this is set up, MPI will not be used anymore.\n TF itself will not make use of MPI;\n all communications are handled via gRPC.\n (Although `Horovod <https://github.com/horovod/horovod>`_ would use MPI, but that is another topic.)\n\n If you use Sun Grid Engine (SGE) / Oracle Grid Engine,\n with the parallel environment (PE) feature\n (`doc <https://gridscheduler.sourceforge.net/htmlman/htmlman5/sge_pe.html>`_)\n (i.e. the SGE job was started e.g. via: `qsub -pe mpi 8`),\n then you might run your sub processes (slots) via `mpirun`, e.g. like::\n\n mpirun -np 8 -mca pml ob1 -mca btl ^openib python returnn/rnn.py ...\n\n Open MPI knows about SGE and will correctly start subprocesses (for each PE slot)\n (potentially remotely).\n From the `Open MPI doc <https://www.open-mpi.org/faq/?category=sge>`_:\n\n Open MPI will automatically detect when it is running inside SGE and will just "do the Right Thing."\n Specifically, if you execute an mpirun command in a SGE job,\n it will automatically use the SGE mechanisms to launch and kill processes.\n There is no need to specify what nodes to run on -\n Open MPI will obtain this information directly from SGE\n and default to a number of processes equal to the slot count specified.\n\n SGE provides the `PE_HOSTFILE` env var points to a file which lists all hosts\n and number of slots per host.\n This would be available for the SGE main job process, i.e. where `mpirun` is run,\n but this might *not* be available on the subprocesses (slots)\n which are started by `mpirun` remotely.\n\n Within such a MPI process, the only reliable way to get information about the other peer processes,\n we must use MPI functions.\n A straight-forward simple way for this is the `mpi4py <https://mpi4py.readthedocs.io/en/stable/>`_ module.\n `mpi4py` can be mixed together with `Horovod <https://github.com/horovod/horovod>`_, so this is a sensible choice.\n\n This is somewhat similar to :class:`SlurmClusterResolver`.\n Also related: `MPIClusterResolver PR <https://github.com/tensorflow/tensorflow/issues/38356>`_.\n https://github.com/Peidong-Wang/Distributed-TensorFlow-Using-MPI/\n https://stackoverflow.com/questions/10912793/how-are-mpi-processes-started\n ' def __init__(self): from mpi4py import MPI comm = MPI.COMM_WORLD self._host = MPI.Get_processor_name() self._port = _get_open_port() self._rank = comm.Get_rank() self._size = comm.Get_size() hosts = comm.allgather((self._rank, self._host, self._port)) peers = {rank: ('%s:%i' % (host, port)) for (rank, host, port) in hosts} assert ((len(peers) == self._size) and (0 in peers) and ((self._size - 1) in peers)) self._peers = [peers[i] for i in range(self._size)] self.task_type = 'worker' self.task_id = self._rank self.rpc_layer = 'grpc' def cluster_spec(self): 'Retrieve the current state of the cluster and return a ClusterSpec.\n\n Returns:\n A ClusterSpec representing the state of the cluster at the moment this\n function is called.\n\n Implementors of this function must take care in ensuring that the\n ClusterSpec returned is up-to-date at the time of calling this function.\n This usually means retrieving the information from the underlying cluster\n management system every time this function is invoked and reconstructing\n a cluster_spec, rather than attempting to cache anything.\n ' return ClusterSpec({'worker': self._peers}) def master(self, task_type=None, task_id=None, rpc_layer=None): 'Retrieves the name or URL of the session master.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC protocol for the given cluster.\n\n Returns:\n The name or URL of the session master.\n\n Implementors of this function must take care in ensuring that the master\n returned is up-to-date at the time to calling this function. This usually\n means retrieving the master every time this function is invoked.\n ' task_type = (task_type if (task_type is not None) else self.task_type) task_id = (task_id if (task_id is not None) else self.task_id) if ((task_type is not None) and (task_id is not None)): return format_master_url(self.cluster_spec().task_address(task_type, task_id), (rpc_layer or self.rpc_layer)) return '' def num_accelerators(self, task_type=None, task_id=None, config_proto=None): 'Returns the number of accelerator cores per worker.\n\n This returns the number of accelerator cores (such as GPUs and TPUs)\n available per worker.\n\n Optionally, we allow callers to specify the task_type, and task_id, for\n if they want to target a specific TensorFlow process to query\n the number of accelerators. This is to support heterogenous environments,\n where the number of accelerators cores per host is different.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the machine we\n want to query.\n task_id: (Optional) The index of the TensorFlow task of the machine we\n want to query.\n config_proto: (Optional) Configuration for starting a new session to\n query how many accelerator cores it has.\n\n Returns:\n A map of accelerator types to number of cores.\n ' return super(MPIClusterResolver, self).num_accelerators(task_type=task_type, task_id=task_id, config_proto=config_proto)
def _get_open_port(): '\n https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python\n\n :rtype: int\n ' import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) s.listen(1) port = s.getsockname()[1] s.close() return port
class LocalOnlyClusterResolver(tf.distribute.cluster_resolver.ClusterResolver): '\n Cluster resolver for one local instance.\n ' def __init__(self): self._port = _get_open_port() self._host = ('localhost:%i' % self._port) self.task_type = 'worker' self.task_id = 0 self.rpc_layer = 'grpc' def cluster_spec(self): '\n :rtype: ClusterSpec\n ' return ClusterSpec({'worker': [self._host]}) def master(self, task_type=None, task_id=None, rpc_layer=None): 'Retrieves the name or URL of the session master.\n\n Args:\n task_type: (Optional) The type of the TensorFlow task of the master.\n task_id: (Optional) The index of the TensorFlow task of the master.\n rpc_layer: (Optional) The RPC protocol for the given cluster.\n\n Returns:\n The name or URL of the session master.\n\n Implementors of this function must take care in ensuring that the master\n returned is up-to-date at the time to calling this function. This usually\n means retrieving the master every time this function is invoked.\n ' task_type = (task_type if (task_type is not None) else self.task_type) task_id = (task_id if (task_id is not None) else self.task_id) if ((task_type is not None) and (task_id is not None)): return format_master_url(self.cluster_spec().task_address(task_type, task_id), (rpc_layer or self.rpc_layer)) return ''
class _Controller(): '\n The controller encapsulates the logic needed for distributed TF in RETURNN.\n It would be setup via :func:`init_distributed_tf`.\n\n We currently check for the TF_CONFIG env var,\n and if set, use the :class:`TFConfigClusterResolver`.\n Otherwise we assume a MPI setup, and use\n :class:`MPIClusterResolver`.\n\n If we are using in-graph replication, and this is not the chief,\n this would just run a TF server and wait for commands.\n ' def __init__(self, config): '\n :param Config config:\n ' print('Initialize distributed TensorFlow', file=log.v2) self.config = config opts = config.get_of_type('distributed_tf', dict, {}) opts = CollectionReadCheckCovered(opts) self.opts = opts if opts.get('local_only', False): cluster_resolver = LocalOnlyClusterResolver() print('Use local-only cluster resolver,', file=log.v4, end=' ') elif os.environ.get('TF_CONFIG', ''): cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver() print(('Use TF_CONFIG %s,' % os.environ['TF_CONFIG']), file=log.v4, end=' ') else: cluster_resolver = MPIClusterResolver() print('Use MPI cluster resolver,', file=log.v4, end=' ') print(('cluster spec %s, master %s' % (cluster_resolver.cluster_spec(), cluster_resolver.master())), file=log.v4) self.cluster_resolver = cluster_resolver cluster_spec = cluster_resolver.cluster_spec() self.cluster_spec = cluster_spec tf_session_opts = config.typed_dict.get('tf_session_opts', {}) server_config = tf.compat.v1.ConfigProto(**tf_session_opts) server = tf.distribute.Server(cluster_spec, job_name=cluster_resolver.task_type, task_index=cluster_resolver.task_id, config=server_config) self.server = server self.strategy = ReturnnDefaultStrategy() self.opts.assert_all_read() def is_chief(self): '\n :return: whether we are the chief (worker 0)\n :rtype: bool\n ' if ('chief' in self.cluster_spec.jobs): return (self.cluster_resolver.task_type == 'chief') if ('master' in self.cluster_spec.jobs): return (self.cluster_resolver.task_type == 'master') if ('worker' in self.cluster_spec.jobs): if (self.cluster_resolver.task_type != 'worker'): return False return (self.cluster_resolver.task_id == 0) raise NotImplementedError(('is_chief unknown for cluster spec %r' % self.cluster_spec))
class ReturnnDefaultStrategy(tf.distribute.Strategy): '\n RETURNN default strategy.\n ' def __init__(self): super(ReturnnDefaultStrategy, self).__init__(extended=ReturnnDefaultStrategyExtended(self))
class ReturnnDefaultStrategyExtended(DefaultDistributionExtended): '\n RETURNN default strategy extended.\n '
def init_distributed_tf(config): '\n This is called early in startup of RETURNN.\n\n :param Config config:\n ' global _controller assert (not _controller), 'init_distributed_tf called twice?' _controller = _Controller(config=config)
def is_enabled(): '\n :rtype: bool\n ' return bool(_controller)
def get_session_target(): '\n This would be called if you have a local custom graph in the current process (replica)\n and want to execute parts of it. This is e.g. the case for between-graph replication.\n After creating the graph, you would create a session\n which connects to the server returned by this function.\n\n :return: URL of the TF server, where the local session should connect to\n :rtype: str\n ' assert _controller, 'init_distributed_tf not called?' return _controller.server.target
@contextlib.contextmanager def _temporary_init_distributed_tf(config): '\n This is useful for tests.\n\n :param config:\n :return: scope where we have initialized distributed TF, and going out-of-scope will uninit again\n ' global _controller init_distributed_tf(config=config) (yield) assert _controller _controller = None
class ReturnnLayersBackend(Backend[Layer]): '\n RETURNN layers backend (using TF), where raw_tensor represents a RETURNN layer\n ' RawTensorType = Layer is_tensorflow = True is_backend_raw_tensor_dim_tag_independent = False @staticmethod def executing_eagerly() -> bool: 'executing eagerly' return False @staticmethod def get_tensor_dependencies(x: Tensor[Layer]) -> Sequence[Tensor]: 'get tensor inputs' deps: List[Tensor] = [] visited = set() for dep in x.raw_tensor.get_tensor_dependencies(): if ((not dep.tensor) or (dep.tensor in visited)): continue visited.add(dep.tensor) deps.append(dep.tensor) return deps @staticmethod def get_tensor_consumers(x: Tensor[Layer]) -> Sequence[Tensor]: 'get tensor consumers' usages: List[Tensor] = [] visited = set() for use in x.raw_tensor.usages: if ((not use.tensor) or (use.tensor in visited)): continue visited.add(use.tensor) usages.append(use.tensor) return usages @staticmethod def cond(pred: Tensor, true_fn: Callable, false_fn: Callable): 'cond' with rfl.Cond(pred) as cond: cond.true = true_fn() cond.false = false_fn() return cond.result @staticmethod def while_loop(cond: Callable[([S], Union[(bool, Tensor)])], body: Callable[([S], S)], initial: S) -> S: 'while loop' def _loop(): loop = rfl.Loop(max_seq_len=rf.constant(((2 ** 31) - 1), dims=())) loop.state.state = initial with loop: loop.state.state = body(loop.state.state) loop.end(rf.logical_not(cond(loop.state.state)), include_eos=True) return loop.state.state return ReturnnLayersBackend.cond(cond(initial), _loop, (lambda : initial)) @staticmethod def set_random_seed(seed: int): '\n :param seed:\n ' tf_compat.v1.set_random_seed(seed) @staticmethod def get_random_state() -> Dict[(str, bytes)]: '\n :return: random state\n ' return {} @staticmethod def set_random_state(state: Dict[(str, bytes)]): '\n :param state: as returned by :func:`get_random_state`.\n This might not always be successful (e.g. different hardware, different backend version),\n so the calling code should always have called set_random_seed before to have the random generators\n in a reasonable fallback state.\n ' assert (not state) @staticmethod def get_dtype_name_raw(raw_tensor: Layer) -> str: 'dtype' return raw_tensor.tensor.dtype @staticmethod def as_dtype_raw(dtype_name: str) -> str: 'dtype' return dtype_name @staticmethod def get_ndim_raw(raw_tensor: Layer) -> int: 'ndim' return raw_tensor.tensor.batch_ndim @staticmethod def get_shape_raw(raw_tensor: Layer) -> Layer: 'shape' raise NotImplementedError @staticmethod def get_shape_tuple_raw(raw_tensor: Layer) -> Tuple[Union[(int, Layer)]]: 'shape' raise NotImplementedError @staticmethod def get_known_shape_raw(raw_tensor: Layer) -> Tuple[Optional[int]]: 'known shape' return raw_tensor.tensor.batch_shape @staticmethod def fill_raw(shape: Union[(Sequence[Union[(int, Layer)]], Layer)], value: Union[(Any, Layer)]) -> Layer: 'fill raw' raise Exception('fill_raw not supported in layers backend because dim tags would be unknown') @staticmethod def compare_raw(a: Layer, kind: str, b: Layer) -> Layer: 'compare' raise Exception('compare_raw should not get called') @staticmethod def combine_raw(a: Layer, kind: str, b: Layer) -> Layer: 'combine' raise Exception('combine_raw should not get called') @staticmethod def reshape_raw(raw_tensor: Layer, shape: Union[(Sequence[Union[(int, Layer)]], Layer)]) -> Layer: 'reshape_raw' raise Exception('reshape_raw not supported in layers backend because dim tags would be unknown') @staticmethod def make_output_tensor(tensor: Tensor, dims: Sequence[Dim], *, name: str) -> Tensor: 'only func where we have explicitly defined dim order in the output' return rfl.make_layer({'class': 'transpose', 'from': tensor, 'perm': dims}, name=name) @staticmethod def copy(tensor: Tensor) -> Tensor: 'copy' return rfl.make_layer({'class': 'identity', 'from': tensor}, name='copy') @staticmethod def cast(tensor: Tensor, dtype: str) -> Tensor: 'cast' return rfl.make_layer({'class': 'cast', 'from': tensor, 'dtype': dtype}, name='cast') @staticmethod def set_requires_gradient(tensor: Tensor): '\n set requires gradient; not needed for TensorFlow, will always calculate whatever is needed\n ' @staticmethod def gradient(y: Tensor, x: Tensor) -> Tensor: 'gradient' return rfl.make_layer({'class': 'gradient', 'y': y, 'x': x}, name='gradient') @staticmethod def stop_gradient(tensor: Tensor) -> Tensor: 'stop grad' return rfl.make_layer({'class': 'scaled_grad', 'from': tensor, 'scale': 0}, name='stop_gradient') @staticmethod def scaled_gradient(tensor: Tensor, scale: Union[(float, Tensor)]) -> Tensor: 'scaled gradient' return rfl.make_layer({'class': 'scaled_grad', 'from': tensor, 'scale': scale}, name='scaled_gradient') @staticmethod def scaled_gradient_ext(x: Tensor, *, scale: Union[(float, Tensor)]=1.0, shift: Optional[Union[(float, Tensor)]]=None, scale_shift_by_sum_over_axis: Optional[Dim]=None): 'scaled gradient ext' return rfl.make_layer({'class': 'scaled_grad', 'from': x, 'scale': scale, 'shift': shift, 'scale_shift_by_sum_over_axis': scale_shift_by_sum_over_axis}, name='scaled_gradient_ext') @staticmethod def merge_dims(source: Tensor, *, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: '\n Merges a list of axes into a single one. (Flatten the dims.)\n E.g. input is (batch, width, height, dim) and dims=(width,height), then we get (batch, width*height, dim).\n Or input is (batch, time, height, dim) and axes=(height,dim), then we get (batch, time, height*dim).\n\n :param source:\n :param dims:\n :param out_dim:\n :return: tensor, out_dim\n ' if (not isinstance(source, Tensor)): raise TypeError(f'merge_dims: unexpected type for source {source!r}, need tensor') if (out_dim is None): out_dim = dims[0] for d in dims[1:]: out_dim = (out_dim * d) layer = rfl.make_layer({'class': 'merge_dims', 'from': source, 'axes': dims, 'out_dim': out_dim}, name='merge_dims') return (layer, out_dim) @staticmethod def split_dims(source: Tensor, *, axis: Dim, dims: Sequence[Dim], pad_to_multiples: Optional[bool]=None, pad_value: Union[(None, int, float)]=None) -> Tensor: 'split dims' args = {} if ((pad_to_multiples is not None) or (pad_value is not None)): args['pad_to_multiples'] = pad_to_multiples args['pad_value'] = pad_value args = {key: value for (key, value) in args.items() if (value is not NotSpecified)} return rfl.make_layer({'class': 'split_dims', 'from': source, 'axis': axis, 'dims': dims, **args}, name='split_dims') @staticmethod def reshape(source: Tensor, in_dims: Sequence[Dim], out_dims: Sequence[Dim]) -> Tensor: 'reshape' return rfl.make_layer({'class': 'reshape', 'from': source, 'in_dims': in_dims, 'out_dims': out_dims, 'extra_deps': rfl.get_dim_deps(out_dims)}, name='reshape') @staticmethod def split(source: Tensor, *, axis: Dim, out_dims: Sequence[Dim]) -> Tuple[(Tensor, ...)]: 'split' res = rfl.make_layer({'class': 'split', 'from': source, 'axis': axis, 'out_dims': out_dims}, name='split') src_axis_int = source.get_axis_from_description(axis) return tuple((rfl._get_sub_layer(layer=res, name=str(i), data=source.copy_template_replace_dim_tag(axis=src_axis_int, new_dim_tag=dim, name=f'{source.name}/split:{i}:{dim.description}')) for (i, dim) in enumerate(out_dims))) @staticmethod def expand_dim(source: Tensor, dim: Dim) -> Tensor: 'expand dim' if source.have_feature_axis(): axis = 'spatial' elif (dim.dimension is not None): axis = 'feature' else: axis = 'spatial' return rfl.make_layer({'class': 'expand_dims', 'from': source, 'axis': axis, 'dim': dim}, name='expand_dims') @staticmethod def squeeze(source: Tensor, axis: Dim) -> Tensor: 'squeeze' return rfl.make_layer({'class': 'squeeze', 'from': source, 'axis': axis}, name='squeeze') @staticmethod def concat(*sources: Tuple[(Tensor, Dim)], allow_broadcast: bool=False, out_dim: Dim) -> Tensor: 'concat' opts = {} if allow_broadcast: opts['allow_broadcast'] = True out_dim = sum((d for (_, d) in sources)) return rfl.make_layer({'class': 'concat', 'from': sources, 'out_dim': out_dim, **opts}, name='concat') @staticmethod def pad(source: Tensor, *, axes: Sequence[Dim], padding: Sequence[Tuple[(Union[(Dim, int)], Union[(Dim, int)])]], out_dims: Sequence[Dim], mode: str='constant', value: Union[(rf.RawTensorTypes, Tensor)]=None) -> Tensor: 'pad' assert isinstance(value, (int, float, type(None))) return rfl.make_layer({'class': 'pad', 'from': source, 'axes': axes, 'padding': padding, 'out_dims': out_dims, 'mode': mode, 'value': value}, name='pad') @staticmethod def cum_concat_step(source: Tensor, *, prev_accum: Tensor, axis: Dim, out_spatial_dim: Dim) -> Tensor: 'cum_concat_step' return rfl.make_layer({'class': 'cum_concat', 'from': source, 'state': {'state': prev_accum}, 'out_spatial_dim': out_spatial_dim, 'axis': axis}, name='cum_concat') @staticmethod def activation(tensor: Tensor, func: str) -> Tensor: 'activation' return rfl.make_layer({'class': 'activation', 'activation': func, 'from': tensor}, name=func) @staticmethod def activation_raw(raw_tensor: Layer, func: str) -> Layer: 'activation' return rfl.make_layer({'class': 'activation', 'activation': func, 'from': raw_tensor.tensor}, name=func).raw_tensor @staticmethod def safe_log(tensor: Tensor, *, eps: float) -> Tensor: 'safe log' return rfl.make_layer({'class': 'activation', 'activation': 'safe_log', 'eps': eps, 'from': tensor}, name='safe_log') @staticmethod def softmax(tensor: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor: 'softmax' args = {} if (not use_mask): args['use_time_mask'] = False return rfl.make_layer({'class': 'softmax_over_spatial', 'axis': axis, 'from': tensor, **args}, name='softmax') @staticmethod def log_softmax(tensor: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor: 'log softmax' args = {} if (not use_mask): args['use_time_mask'] = False return rfl.make_layer({'class': 'softmax_over_spatial', 'axis': axis, 'from': tensor, 'log_space': True, **args}, name='log_softmax') @staticmethod def softmax_cross_entropy_with_logits(*, logits: Tensor, targets: Tensor, axis: Dim): "\n Efficient cross entropy.\n\n :param logits: target estimates given as inputs to softmax (i.e. unnormalized)\n :param targets: probabilities, i.e. normalized, can also be sparse\n :param axis: class labels dim over which softmax is computed\n :return: cross entropy (same Dims as 'logits' but without 'axis')\n " if targets.sparse_dim: assert ((axis == targets.sparse_dim) and (axis in logits.dims)) return rfl.make_layer({'class': 'sparse_softmax_cross_entropy_with_logits', 'logits': logits, 'targets': targets, 'axis': axis}, name='sparse_softmax_cross_entropy_with_logits') else: assert ((axis in targets.dims) and (axis in logits.dims)) log_probs = rf.log_softmax(logits, axis=axis) return (- rf.matmul(targets, log_probs, reduce=axis)) @staticmethod def ctc_loss(*, logits: Tensor, targets: Tensor, input_spatial_dim: Dim, targets_spatial_dim: Dim, blank_index: int, max_approx: bool=False) -> Tensor: 'CTC' assert (targets.sparse_dim and (targets.sparse_dim.dimension <= logits.feature_dim.dimension)) logits = rfl.make_layer({'class': 'reinterpret_data', 'from': logits, 'set_axes': {'T': input_spatial_dim}}, name='logits') targets = rfl.make_layer({'class': 'reinterpret_data', 'from': targets, 'set_axes': {'T': targets_spatial_dim}}, name='targets') return rfl.make_layer({'class': 'ctc_loss', 'logits': logits, 'targets': targets, 'blank_index': blank_index, 'max_approx': max_approx}, name='ctc_loss') @staticmethod def create_parameter_raw(tensor: rf.Parameter, *, device: Optional[str]=None) -> Layer: 'create parameter' return rfl.make_layer({'class': 'variable', 'shape': tensor.dims, 'dtype': tensor.dtype, 'param_name': 'param', 'param_device': device}, name=tensor.name, out=tensor).raw_tensor @staticmethod def set_parameter_initial_value(param: rf.Parameter[Layer], value: Union[(None, Tensor, rf.RawTensorTypes)]) -> None: 'set parameter initial value' if (value is None): param.raw_tensor.layer_dict.pop('init', None) param.raw_tensor.layer_dict.pop('init_by_layer', None) elif isinstance(value, Tensor): param.raw_tensor.layer_dict.pop('init', None) if (not value.raw_tensor.parent.can_access_children_from_root): accessible_parent = value.raw_tensor.parent while (not accessible_parent.can_access_children_from_root): accessible_parent = accessible_parent.parent value.raw_tensor.assign_parent(accessible_parent) for dep in value.raw_tensor.get_tensor_dependencies(): assert dep.parent.can_access_children_from_root, f'dep {dep} of moved value {value} is not accessible' param.raw_tensor.layer_dict['init_by_layer'] = value else: param.raw_tensor.layer_dict.pop('init_by_layer', None) param.raw_tensor.layer_dict['init'] = value if rfl.is_debug_eager_mode_enabled(): shape = [d.get_dim_value() for d in param.dims] if isinstance(value, Tensor): assert (value.placeholder is not None) value_tf = value.placeholder else: value_tf = tf.broadcast_to(tf.convert_to_tensor(value), shape) if (param.raw_tensor.debug_layer.output.placeholder is None): var = tf.Variable(value_tf, shape=[d.get_dim_value() for d in param.dims], dtype=param.dtype) param.raw_tensor.debug_layer.output.placeholder = var else: var = param.raw_tensor.debug_layer.output.placeholder assert isinstance(var, tf.Variable) var.assign(value_tf) @staticmethod def set_parameter_trainable(param: rf.Parameter, trainable: bool) -> None: 'set parameter trainable' if trainable: param.raw_tensor.layer_dict.pop('trainable', None) else: param.raw_tensor.layer_dict['trainable'] = False @staticmethod def parameter_assign(param: rf.Parameter, value: Tensor, *, op: str='assign') -> None: 'param assign' from .parameter_assign import parameter_assign parameter_assign(param=param, value=value, op=op) @staticmethod def convert_to_tensor(value: Union[(Tensor, Layer, RawTensorTypes)], *, dims: Sequence[Dim], dtype: str, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, name: Optional[str]=None) -> Tensor[Layer]: 'convert to tensor' if isinstance(value, Tensor): return value kwargs = {} if sparse_dim: kwargs['sparse_dim'] = sparse_dim dim_deps = _dims.get_dim_deps(dims) if dim_deps: kwargs['shape_deps'] = dim_deps return rfl.make_layer({'class': 'constant', 'value': value, 'shape': dims, 'dtype': dtype, **kwargs}, name=(name or 'convert_to_tensor')) @staticmethod def full(dims: Sequence[Dim], fill_value: Union[(RawTensorTypes, Tensor)], *, dtype: str, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor: 'full' if isinstance(fill_value, Tensor): return (fill_value + rf.zeros_like(fill_value)) kwargs = {} if sparse_dim: kwargs['sparse_dim'] = sparse_dim kwargs['feature_dim'] = feature_dim dim_deps = _dims.get_dim_deps(dims) if dim_deps: kwargs['shape_deps'] = dim_deps return rfl.make_layer({'class': 'constant', 'value': fill_value, 'shape': dims, 'dtype': dtype, **kwargs}, name='full') @classmethod def compare(cls, a: Union[(Tensor, RawTensorTypes)], kind: str, b: Union[(Tensor, RawTensorTypes)], *, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None) -> Tensor: 'compare' kwargs = {} if (allow_broadcast_all_sources is not None): kwargs['allow_broadcast_all_sources'] = allow_broadcast_all_sources a = rf.convert_to_tensor(a, _backend=cls) b = rf.convert_to_tensor(b, _backend=cls) return rfl.make_layer({'class': 'compare', 'from': [a, b], 'kind': kind, **kwargs}, name=kind) @classmethod def combine(cls, a: Union[(Tensor, RawTensorTypes)], kind: str, b: Union[(Tensor, RawTensorTypes)], *, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None) -> Tensor: 'combine' kwargs = {} if (allow_broadcast_all_sources is not None): kwargs['allow_broadcast_all_sources'] = allow_broadcast_all_sources a = rf.convert_to_tensor(a, _backend=cls) b = rf.convert_to_tensor(b, _backend=cls) return rfl.make_layer({'class': 'combine', 'from': [a, b], 'kind': kind, **kwargs}, name=kind) @staticmethod def gather(source: Tensor, *, indices: Union[(Tensor, int)], axis: Dim, clip_to_valid: bool=False) -> Tensor: 'gather' args = {} if clip_to_valid: args['clip_to_valid'] = clip_to_valid return rfl.make_layer({'class': 'gather', 'from': source, 'position': indices, 'axis': axis, **args}, name='gather') @staticmethod def slice(source: Tensor, *, axis: Dim, start: Optional[Union[(int, Tensor)]]=None, end: Optional[Union[(int, Tensor)]]=None, step: Optional[Union[(int, Tensor)]]=None, size: Optional[Union[(int, Tensor, Dim)]]=None, out_dim: Dim) -> Tensor: 'slice' if (size is not None): assert (end is None) assert (step is None) assert (size is not None) return rfl.make_layer({'class': 'slice_nd', 'from': source, 'axis': axis, 'start': start, 'size': size, 'out_spatial_dim': out_dim}, name='slice_nd') assert (size is None) assert isinstance(start, (int, type(None))) assert isinstance(end, (int, type(None))) assert isinstance(step, (int, type(None))) args = {} if (start is not None): args['slice_start'] = start if (end is not None): args['slice_end'] = end if (step is not None): args['slice_step'] = step return rfl.make_layer({'class': 'slice', 'from': source, 'axis': axis, 'out_dim': out_dim, **args}, name='slice') @staticmethod def where(cond: Tensor, true_: Union[(Tensor, rf.RawTensorTypes)], false_: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor: 'where' allow_broadcast_all_sources return rfl.make_layer({'class': 'switch', 'condition': cond, 'true_from': true_, 'false_from': false_}, name='where') @staticmethod def clip_by_value(x: Tensor, clip_value_min: Union[(Tensor, rf.RawTensorTypes)], clip_value_max: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor: 'clip by value' clip_value_min = rf.convert_to_tensor(clip_value_min, _backend=ReturnnLayersBackend) clip_value_max = rf.convert_to_tensor(clip_value_max, _backend=ReturnnLayersBackend) return rfl.make_layer({'class': 'eval', 'eval': 'tf.clip_by_value(source(0), source(1), source(2))', 'from': [x, clip_value_min, clip_value_max], 'allow_broadcast_all_sources': allow_broadcast_all_sources}, name='clip_by_value') @staticmethod def matmul(a: Tensor, b: Tensor, *, reduce: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor: 'matmul' args = {} if (not use_mask): args['use_mask'] = False return rfl.make_layer({'class': 'dot', 'from': [a, b], 'reduce': reduce, **args}, name='matmul') @staticmethod def range_over_dim(dim: Dim, *, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor: 'range over dim' if ((not dtype) and dim.dyn_size_ext): dtype = dim.dyn_size_ext.dtype if (not dtype): dtype = rf.get_default_array_index_dtype() return rfl.make_layer({'class': 'range_in_axis', 'from': _dims.get_dim_deps(dim), 'axis': dim, 'dtype': dtype, 'sparse': (dtype.startswith('int') or dtype.startswith('uint'))}, name='range_over_dim') @staticmethod def replace_dim(source: Tensor, *, in_dim: Dim, out_dim: Dim) -> Tensor: '\n :param source:\n :param in_dim:\n :param out_dim:\n :return: source with in_dim replaced by out_dim.\n ' return rfl.make_layer({'class': 'reinterpret_data', 'set_dim_tags': {in_dim: out_dim}, 'from': source}, name='new_dim') @staticmethod def reduce(source: Tensor, *, mode: str, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor: 'Reduce' assert (mode in Backend._AllowedReduceModes) kwargs = {} if (not use_mask): kwargs['use_time_mask'] = False return rfl.make_layer({'class': 'reduce', 'from': source, 'mode': mode, 'axis': axis, **kwargs}, name=f'reduce_{mode}') @staticmethod def top_k(source: Tensor, *, axis: Union[(Dim, Sequence[Dim])], k: Union[(int, Tensor)], k_dim: Optional[Dim]=None, sorted: bool=True) -> Tuple[(Tensor, Union[(Tensor, Sequence[Tensor])], Dim)]: 'top_k' if (not k_dim): k_dim = Dim(k, name='top-k-dim') values = rfl.make_layer({'class': 'top_k', 'from': source, 'axis': axis, 'k': k, 'k_dim': k_dim, 'sorted': sorted}, name='top_k') if isinstance(axis, (tuple, list)): axes = axis single_axis = False else: assert isinstance(axis, Dim) axes = [axis] single_axis = True indices = [] for (i, a) in enumerate(axes): assert isinstance(a, Dim) sub_name = ('indices' if single_axis else f'indices{i}') indices_data = values.copy_template(name=f'{values.name}_{sub_name}_{a.description}') indices_data.dtype = 'int32' indices_data.sparse_dim = a indices.append(rfl._get_sub_layer(values, sub_name, data=indices_data)) if single_axis: indices = indices[0] return (values, indices, k_dim) @staticmethod @contextlib.contextmanager def random_journal_replay(journal: _random_journal.RandomJournal): '\n Replays the journal.\n At exit, the journal is cleared, and we check that we replayed everything.\n ' try: ReturnnLayersBackend._random_journal = journal (yield) finally: ReturnnLayersBackend._random_journal = None _random_journal = None @staticmethod def random(*, dims: Sequence[Dim], dtype: str, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, distribution: str, mean: Optional[Union[(int, float, Tensor)]]=None, stddev: Optional[Union[(int, float, Tensor)]]=None, bound: Optional[Union[(int, float, Tensor)]]=None, minval: Optional[Union[(int, float, Tensor)]]=None, maxval: Optional[Union[(int, float, Tensor)]]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None) -> Tensor: 'random' if ReturnnLayersBackend._random_journal: recent = ReturnnLayersBackend._random_journal.get_recent_graph_reader_node_in_accessible_ctx() out = rfl.make_layer({'class': 'eval', 'from': ([recent] if recent else []), 'eval': _random_replay_eval, 'eval_locals': {'idx': ReturnnLayersBackend._random_journal.get_graph_reader_idx()}, 'out_type': {'dims': dims, 'dtype': dtype, 'sparse_dim': sparse_dim, 'feature_dim': feature_dim}}, name='random_replay') if (out.control_flow_ctx and out.control_flow_ctx.is_loop()): out.raw_tensor.layer_dict['from'].append(rfl.PrevTensorRef.get_prev_ref(cur_layer_name_ctx=out.raw_tensor, initial=rf.zeros(dims, dtype=dtype, sparse_dim=sparse_dim, feature_dim=feature_dim))) ReturnnLayersBackend._random_journal.add_graph_reader_node(out) return out kwargs = {'mean': mean, 'stddev': stddev, 'bound': bound, 'minval': minval, 'maxval': maxval, 'seed': seed, 'algorithm': algorithm, 'explicit_state': explicit_state, 'auto_update_state': auto_update_state, 'static': static} kwargs = {k: v for (k, v) in kwargs.items() if (v is not None)} return rfl.make_layer({'class': 'random', 'shape': dims, 'shape_deps': _dims.get_dim_deps(dims), 'dtype': dtype, 'sparse_dim': sparse_dim, 'feature_dim': feature_dim, 'distribution': distribution, 'stop_grad': True, **kwargs}, name='random') @staticmethod def masked_select(tensor: Tensor, *, mask: Tensor, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: '\n :param tensor:\n :param mask:\n :param dims: the order of the dims defines the format. those dims should be exactly the dims of the mask.\n :param out_dim:\n :return: tensor where all dims in mask/dims are removed and replaced by a new dim.\n the new dim is also returned.\n if mask==True for all elements, the returned tensor would be simply the flattened input tensor.\n ' assert (mask.dtype == 'bool') assert (set(mask.dims) == set(dims)) assert set(mask.dims).issubset(set(tensor.dims)) if (not out_dim): out_dim = Dim(None, name='mask') return (rfl.make_layer({'class': 'boolean_mask', 'from': tensor, 'mask': mask, 'dims': dims, 'out_dim': out_dim}, name='boolean_mask'), out_dim) @staticmethod def batch_norm(source: Tensor, *, in_dim: Union[(Dim, Sequence[Dim])], running_mean: Tensor, running_variance: Tensor, gamma: Optional[Tensor], beta: Optional[Tensor], epsilon: float, momentum: float, affine: bool, use_mask: bool) -> Tensor: 'batch norm' reuse_params = {'batch_norm/v2_mean': running_mean, 'batch_norm/v2_variance': running_variance} if affine: reuse_params['batch_norm/v2_gamma'] = gamma reuse_params['batch_norm/v2_beta'] = beta reuse_params = {'map': {k: {'layer_output': v} for (k, v) in reuse_params.items()}} return rfl.make_layer({'class': 'batch_norm', 'from': source, 'in_dim': in_dim, 'use_std': affine, 'use_shift': affine, 'param_version': 2, 'reuse_params': reuse_params, 'momentum': momentum, 'epsilon': epsilon, 'masked_time': use_mask}, name='batch_norm') @staticmethod def conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, strides: Optional[Union[(int, Sequence[int])]]=None, dilation_rate: Optional[Union[(int, Sequence[int])]]=None, groups: Optional[int]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]: 'conv' if (not out_spatial_dims): out_spatial_dims = rf.make_conv_out_spatial_dims(description_prefix='conv', in_spatial_dims=in_spatial_dims, filter_size=[d.dimension for d in filter_size], strides=(strides or 1), dilation_rate=(dilation_rate or 1), padding=padding) layer_dict = {'class': 'conv', 'from': source, 'in_dim': in_dim, 'in_spatial_dims': in_spatial_dims, 'out_dim': out_dim, 'out_spatial_dims': out_spatial_dims, 'filter_size': filter_size, 'padding': padding} if strides: layer_dict['strides'] = strides if dilation_rate: layer_dict['dilation_rate'] = dilation_rate if groups: layer_dict['groups'] = groups layer_dict.update({'filter': filter, 'with_bias': (bias is not None)}) if (bias is not None): layer_dict['bias'] = bias out = rfl.make_layer(layer_dict, name='conv') return (out, out_spatial_dims) @staticmethod def transposed_conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, remove_padding: Union[(Sequence[int], int)]=0, output_padding: Optional[Union[(Sequence[Optional[int]], int)]]=None, strides: Optional[Sequence[int]]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]: 'transposed conv' if (not out_spatial_dims): out_spatial_dims = [Dim(None, name=f'out-spatial-dim{i}') for (i, s) in enumerate(filter_size)] for i in range(len(filter_size)): s = (filter_size[i].dimension if (not strides) else strides[i]) if ((filter_size[i].dimension == s == 1) or ((s == 1) and (padding.lower() == 'same'))): out_spatial_dims[i] = in_spatial_dims[i] layer_dict = {'class': 'transposed_conv', 'from': source, 'in_dim': in_dim, 'in_spatial_dims': in_spatial_dims, 'out_dim': out_dim, 'out_spatial_dims': out_spatial_dims, 'filter_size': filter_size, 'padding': padding} if remove_padding: layer_dict['remove_padding'] = remove_padding if output_padding: layer_dict['output_padding'] = output_padding if strides: layer_dict['strides'] = strides layer_dict.update({'filter': filter, 'with_bias': (bias is not None)}) if (bias is not None): layer_dict['bias'] = bias out = rfl.make_layer(layer_dict, name='transposed_conv') return (out, out_spatial_dims) @staticmethod def pool(source: Tensor, *, mode: str, pool_size: Sequence[int], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Sequence[int], in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]: 'pool' if (out_spatial_dims is None): out_spatial_dims = rf.make_conv_out_spatial_dims(description_prefix='pool', in_spatial_dims=in_spatial_dims, filter_size=pool_size, strides=strides, dilation_rate=dilation_rate, padding=padding) other_dims = [d for d in source.dims if ((d not in in_spatial_dims) and (not d.is_batch_dim()))] dummy_in_dim = None if (source.feature_dim and (source.feature_dim in other_dims)): in_dim = source.feature_dim elif other_dims: in_dim = other_dims[(- 1)] else: dummy_in_dim = Dim(1, name='dummy_in') in_dim = dummy_in_dim source = rf.expand_dim(source, dim=dummy_in_dim) assert source.have_batch_axis(), 'PoolLayer without batch dim not implemented' args = {'mode': mode, 'pool_size': pool_size, 'padding': padding, 'dilation_rate': dilation_rate, 'strides': strides, 'in_spatial_dims': in_spatial_dims, 'out_spatial_dims': out_spatial_dims, 'in_dim': in_dim} layer = rfl.make_layer({'class': 'pool', 'from': source, **args}, name='pool') if dummy_in_dim: layer = rf.squeeze(layer, axis=dummy_in_dim) if (source.feature_dim != in_dim): layer = rfl.make_layer({'class': 'reinterpret_data', 'from': layer, 'set_axes': {'F': source.feature_dim}}, name='pool_reset_feature') return (layer, out_spatial_dims) @staticmethod def stft(x: Tensor, *, in_spatial_dim: Dim, frame_step: int, frame_length: int, fft_length: int, window_use_frame_length: bool=True, align_window_left: bool=True, window_enforce_even: bool=True, out_spatial_dim: Dim, out_dim: Dim) -> Tensor: 'stft' if (frame_length < fft_length): assert window_use_frame_length, 'not implemented otherwise' assert align_window_left, 'not implemented otherwise' if ((fft_length % 2) != 0): assert window_enforce_even, 'not implemented otherwise' return rfl.make_layer({'class': 'stft', 'from': x, 'in_spatial_dims': [in_spatial_dim], 'out_spatial_dims': [out_spatial_dim], 'out_dim': out_dim, 'frame_shift': frame_step, 'frame_size': frame_length, 'fft_size': fft_length}) @staticmethod def lstm(source: Tensor, *, state_h: Tensor, state_c: Tensor, ff_weight: Tensor, rec_weight: Tensor, bias: Tensor, spatial_dim: Dim, in_dim: Dim, out_dim: Dim) -> Tuple[(Tensor, Tuple[(Tensor, Tensor)])]: '\n :return: output, (h, c)\n ' from ._utils import get_last_hidden_state out_dim_ = out_dim.copy(same_as_self=False, description='(out-dim)') rec_weight_ = rf.split(rec_weight, axis=(4 * out_dim), out_dims=([out_dim_] * 4)) ff_weight_ = rf.split(ff_weight, axis=(4 * out_dim), out_dims=([out_dim_] * 4)) bias_ = rf.split(bias, axis=(4 * out_dim), out_dims=([out_dim_] * 4)) (rec_weight, _) = rf.concat((rec_weight_[2], out_dim_), (rec_weight_[0], out_dim_), (rec_weight_[1], out_dim_), (rec_weight_[3], out_dim_)) (ff_weight, _) = rf.concat((ff_weight_[2], out_dim_), (ff_weight_[0], out_dim_), (ff_weight_[1], out_dim_), (ff_weight_[3], out_dim_)) (bias, _) = rf.concat((bias_[2], out_dim_), (bias_[0], out_dim_), (bias_[1], out_dim_), (bias_[3], out_dim_)) (rec_weight, _) = rf.replace_dim(rec_weight, in_dim=(4 * out_dim_), out_dim=(4 * out_dim)) (ff_weight, _) = rf.replace_dim(ff_weight, in_dim=(4 * out_dim_), out_dim=(4 * out_dim)) (bias, _) = rf.replace_dim(bias, in_dim=(4 * out_dim_), out_dim=(4 * out_dim)) output = rfl.make_layer({'class': 'rec', 'from': source, 'in_dim': in_dim, 'axis': spatial_dim, 'out_dim': out_dim, 'unit': 'lstm', 'reuse_params': {'map': {'W_re': {'layer_output': rec_weight, 'shape': (out_dim, (4 * out_dim))}, 'W': {'layer_output': ff_weight, 'shape': (in_dim, (4 * out_dim))}, 'b': {'layer_output': bias, 'shape': ((4 * out_dim),)}}}, 'initial_state': {'h': state_h, 'c': state_c}}, name='lstm') h = get_last_hidden_state(output, out_dim=out_dim, key='h') c = get_last_hidden_state(output, out_dim=out_dim, key='c') return (output, (h, c))
def _random_replay_eval(*, self, source, idx: int, **_kwargs): from returnn.tf.layers.basic import LayerBase assert isinstance(self, LayerBase) idx def _py_func() -> numpy.ndarray: elem = ReturnnLayersBackend._random_journal.get_next(new_out_template=self.output) assert isinstance(elem.out, Tensor) assert isinstance(elem.out.raw_tensor, numpy.ndarray) return elem.out.raw_tensor def _func() -> tf.Tensor: (out,) = tf.numpy_function(_py_func, [], [self.output.dtype]) assert isinstance(out, tf.Tensor) out.set_shape(self.output.batch_shape) return out with (tf.control_dependencies([source(i, auto_convert=False) for i in range(len(self.sources))]) if self.sources else contextlib.nullcontext()): return _func()
def unique_tensor_list(tensors: Iterable[Tensor]) -> List[Tensor]: '\n :param list[Tensor] tensors:\n :return: list with unique tensors\n :rtype: list[Tensor]\n ' seen = set() out = [] for tensor in tensors: if (RefIdEq(tensor) not in seen): out.append(tensor) seen.add(RefIdEq(tensor)) return out
def copy(tensor: Tensor[rfl.Layer], *, name: Union[(rfl.Layer, str)]) -> Tensor[rfl.Layer]: 'copy' return rfl.make_layer({'class': 'copy', 'from': tensor}, name=name)
def identity_with_control_deps(tensor: Tensor[rfl.Layer], control_deps: Sequence[Tensor[rfl.Layer]], *, name: Optional[Union[(str, rfl.Layer)]]=None) -> Tensor[rfl.Layer]: '\n :param tensor:\n :param control_deps:\n :param name:\n :return: tensor with control deps\n ' return rfl.make_layer({'class': 'identity', 'from': tensor, 'control_dependencies': control_deps}, name=name)
def constant(value: Union[(int, float)], *, name: Union[(str, rfl.Layer)]): 'constant' return rfl.make_layer({'class': 'constant', 'value': value, 'is_output_layer': True}, name=name)
def constant_value(x: Tensor[rfl.Layer]) -> Optional[Union[(int, float, complex, bool, str)]]: '\n If the tensor is a constant, return its value.\n ' if (x.raw_tensor.layer_dict and (x.raw_tensor.layer_dict['class'] == 'constant')): return x.raw_tensor.layer_dict['value'] return None
def zeros_like_as_output_in_scope(tensor: Tensor, *, name: rfl.Layer): '\n :param tensor:\n :param name:\n :return:\n ' args = {} if tensor.sparse_dim: args['sparse_dim'] = tensor.sparse_dim shape_deps = rfl.get_dim_deps(tensor.dims) if shape_deps: args['shape_deps'] = shape_deps res = rfl.make_layer({'class': 'constant', 'value': 0, 'shape': tensor.dims, 'dtype': tensor.dtype, **args, 'is_output_layer': True}, name=name) name.parent.marked_outputs.append(res) return res
def mark_as_output_in_scope(tensor: Tensor, scope: rfl.Layer) -> Tensor: '\n Mark this as an output.\n ' assert tensor.raw_tensor.layer_dict, f'mark_as_output can only be called on a layer, not a layer-ref {tensor}.' res = tensor if (tensor.raw_tensor is scope.children.get('output')): pass elif (tensor.raw_tensor.parent is not scope): res = copy(tensor, name=scope.get_new_child(suggested_name=tensor.raw_tensor.get_abs_name(join_str='_'))) res.raw_tensor.layer_dict['is_output_layer'] = True else: assert (tensor.raw_tensor.parent is scope) assert tensor.raw_tensor.layer_dict tensor.raw_tensor.layer_dict['is_output_layer'] = True scope.marked_outputs.append(res) return res
def get_last_hidden_state(source: Tensor, *, out_dim: Optional[Dim]=NotSpecified, combine: str=NotSpecified, key: Optional[Union[(str, int)]]=NotSpecified) -> Tensor: '\n Will combine (concat or add or so) all the last hidden states from all sources.\n\n :param nn.Tensor source:\n :param nn.Dim|None out_dim:\n :param str combine: "concat" or "add"\n :param str|int|None key: for the state, which could be a namedtuple. see :func:`RnnCellLayer.get_state_by_key`\n :return: layer\n ' if (not isinstance(source, Tensor)): raise TypeError(f'_get_last_hidden_state: unexpected type for source {source!r}, need tensor') args = {'out_dim': out_dim, 'combine': combine, 'key': key} args = {key: value for (key, value) in args.items() if (value is not NotSpecified)} return rfl.make_layer({'class': 'get_last_hidden_state', 'from': source, **args}, name='get_last_hidden_state')
class Cond(Generic[T]): '\n Conditional branching. Basically behaves like ``if ... else ...``.\n Only one branch will be executed, and the condition needs to be a bool scalar.\n This wraps to :class:`CondLayer` in RETURNN and to ``tf.cond`` in TensorFlow.\n\n Example::\n\n with Cond(cond) as cond_obj:\n cond_obj.true = mod_true_case(x)\n cond_obj.false = mod_false_case(x)\n y = cond_obj.result\n\n Corresponds to::\n\n if cond:\n y = mod_true_case(x)\n else:\n y = mod_false_case(x)\n\n The context scope has two states corresponding to the True and False computation branch.\n The initial state is the True branch.\n Assigning ``cond_obj.true`` has the side effect of switching the computation to the False branch.\n ' def __init__(self, condition: Tensor, *, name: str='cond'): self.condition = condition self._entered = False self._entered_state = True self._true_value = None self._true_value_set = False self._false_value = None self._false_value_set = False self._result_value = None self.layer_module = CondModule(cond=self) self.name_ctx = rfl.Layer(module=self.layer_module, suggested_name=name, parent=rfl.Layer.current_ctx(), can_access_children=False) self.name_ctx.custom_layer_name_scope = '' self.true_branch_control_flow_ctx = ControlFlowContext(kind=ControlFlowContext.Types.CondTrue, outer_ctx=self.name_ctx.control_flow_ctx(), identifier=self.name_ctx.get_abs_name()) self.true_branch_name_ctx = rfl.Layer(module=self.layer_module, suggested_name='true', parent=self.name_ctx, virtual=True, can_access_children=False, new_control_flow_ctx=self.true_branch_control_flow_ctx) self.true_branch_name_ctx.is_subnet = True self.true_branch_name_ctx.extend_reserved_names({'output'}) self.false_branch_control_flow_ctx = ControlFlowContext(kind=ControlFlowContext.Types.CondFalse, outer_ctx=self.name_ctx.control_flow_ctx(), identifier=self.true_branch_control_flow_ctx.identifier) self.false_branch_name_ctx = rfl.Layer(module=self.layer_module, suggested_name='false', parent=self.name_ctx, virtual=True, can_access_children=False, new_control_flow_ctx=self.false_branch_control_flow_ctx) self.false_branch_name_ctx.is_subnet = True self.false_branch_name_ctx.extend_reserved_names({'output'}) self._extra_ops_true_branch: List[Tensor] = [] self._extra_ops_false_branch: List[Tensor] = [] self._false_branch_prehooks: List[Callable[([], Any)]] = [] self._false_branch_posthooks: List[Callable[([], Any)]] = [] def __repr__(self): return f'Cond{self.name_ctx}' def __enter__(self): assert (not self._entered), f'{self} cannot enter twice' self._entered = True self._entered_state = True self.true_branch_name_ctx.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): if self._entered: if (self._true_value is None): self.true_branch_name_ctx.__exit__(exc_type, exc_val, exc_tb) elif (self._false_value is None): self.false_branch_name_ctx.__exit__(exc_type, exc_val, exc_tb) if (not exc_type): assert self._entered assert (self._true_value is not None), f'{self} you need to call else_()' assert (self._false_value is not None), f'{self} you need to call end()' self._entered = False @property def true(self) -> T: '\n The getter usually would not be used.\n ' return self._true_value @true.setter def true(self, true_value: T): '\n Defines the True branch value.\n Enter the False branch.\n Assign self.false afterwards.\n ' assert self._entered, f'{self} you need to be in the context scope' assert (self._entered_state is True), f'{self} you cannot enter the else branch twice' assert (not self._true_value_set) if isinstance(true_value, Tensor): true_value = _utils.copy(true_value, name=self.true_branch_name_ctx.get_child('output')) else: values_flat = nest.flatten(true_value) assert values_flat for (i, v) in enumerate(values_flat): if (v is None): v = rf.zeros((), dtype='int32') assert isinstance(v, Tensor), f'unexpected {true_value!r}, only expects tensors, got {type(v)}' if (i == 0): values_flat[i] = _utils.copy(v, name=self.true_branch_name_ctx.get_child('output')) else: values_flat[i] = _utils.mark_as_output_in_scope(v, scope=self.true_branch_name_ctx) if (len(values_flat) == 0): _utils.copy(rf.zeros((), dtype='int32'), name=self.true_branch_name_ctx.get_child('output')) true_value = nest.pack_sequence_as(true_value, values_flat) for op in self._extra_ops_true_branch: _utils.mark_as_output_in_scope(op, scope=self.true_branch_name_ctx) self.true_branch_name_ctx.__exit__(None, None, None) self.false_branch_name_ctx.__enter__() self.false_branch_name_ctx.extend_reserved_names({v.raw_tensor.name for v in self.true_branch_name_ctx.marked_outputs}) self._true_value = true_value self._true_value_set = True self._entered_state = False for hook in self._false_branch_prehooks: hook() @property def false(self) -> T: '\n The getter usually would not be used.\n ' return self._false_value @false.setter def false(self, false_value: T): '\n Define the False branch value.\n After this, self.result is available.\n ' assert self._entered, f'{self} you need to be in the context scope' assert (self._entered_state is False), f'{self} you need to be in the False branch, have assigned :func:`true` before' assert (not self._false_value_set) nest.assert_same_structure(self._true_value, false_value) if isinstance(false_value, Tensor): false_value = _utils.copy(false_value, name=self.false_branch_name_ctx.get_child('output')) else: true_values_flat = nest.flatten(self._true_value) false_values_flat = nest.flatten(false_value) assert (false_values_flat and (len(false_values_flat) == len(true_values_flat))) for (i, (true_v, false_v)) in enumerate(zip(true_values_flat, false_values_flat)): assert isinstance(true_v, Tensor) if (false_v is None): false_v = rf.zeros((), dtype='int32') else: assert isinstance(false_v, Tensor), f'unexpected {false_value!r}, only expects tensors, got {type(false_v)}' assert (true_v.raw_tensor.parent is self.true_branch_name_ctx) name = true_v.raw_tensor.name assert (name not in self.false_branch_name_ctx.children) false_values_flat[i] = _utils.copy(false_v, name=self.false_branch_name_ctx.get_child(name)) if (name != 'output'): false_values_flat[i].raw_tensor.layer_dict['is_output_layer'] = True if (len(false_values_flat) == 0): _utils.copy(rf.zeros((), dtype='int32'), name=self.true_branch_name_ctx.get_child('output')) false_value = nest.pack_sequence_as(false_value, false_values_flat) for true_out in self.true_branch_name_ctx.marked_outputs: name = true_out.raw_tensor.name if (name in self.false_branch_name_ctx.children): continue _utils.zeros_like_as_output_in_scope(true_out, name=self.false_branch_name_ctx.get_child(name)) for op in self._extra_ops_false_branch: op = _utils.mark_as_output_in_scope(op, scope=self.false_branch_name_ctx) name = op.raw_tensor.name assert (name not in self.true_branch_name_ctx.children) _utils.zeros_like_as_output_in_scope(op, name=self.true_branch_name_ctx.get_child(name)) self.false_branch_name_ctx.__exit__(None, None, None) self._false_value = false_value self._false_value_set = True for cb in self._false_branch_posthooks: cb() self._result_value = self.layer_module() @property def result(self) -> T: '\n :return: the result, after you assigned :func:`true` and :func:`false`.\n ' assert self._true_value_set, f'{self} you need to have defined the true value' assert self._false_value_set, f'{self} you need to have defined the false value' return self._result_value def add_op_to_current_branch(self, op: Tensor): '\n :param op: like an assign_op. the value of the tensor is irrelevant, the underlying op is relevant\n ' assert self._entered, f'{self} you need to be in the context scope' (self._extra_ops_true_branch if self._entered_state else self._extra_ops_false_branch).append(op) def add_other_branch_prehook(self, callback: Callable[([], Any)]): 'add prehook to the other branch' assert self._entered, f'{self} you need to be in the context scope' if (not self._entered_state): return self._false_branch_prehooks.insert(0, callback) def add_other_branch_posthook(self, callback: Callable[([], Any)]): 'add posthook to the other branch' assert self._entered, f'{self} you need to be in the context scope' if (not self._entered_state): return self._false_branch_posthooks.append(callback)
class CondModule(rf.Module): '\n This module is used internally by :class:`Cond` to create the RETURNN :class:`CondLayer` for the conditional code.\n This module would not be directly used by the user.\n ' def __init__(self, cond: Cond): super(CondModule, self).__init__() self.cond = cond def __call__(self): '\n Makes layer dict for this loop, i.e. a RecLayer.\n\n :return: structure like true_value/false_value\n ' name_ctx = self.cond.name_ctx (true_value, false_value) = (self.cond._true_value, self.cond._false_value) true_values_flat = nest.flatten(true_value) false_values_flat = nest.flatten(false_value) assert (len(true_values_flat) == len(false_values_flat)) res = rfl.make_layer({'class': 'cond', 'from': [], 'condition': self.cond.condition, 'true_layer': {'class': 'subnetwork', 'from': [], 'subnetwork': self.cond.true_branch_name_ctx.make_net()}, 'false_layer': {'class': 'subnetwork', 'from': [], 'subnetwork': self.cond.false_branch_name_ctx.make_net()}}, name=name_ctx, predefined_out_data=(true_values_flat[0].copy_template() if true_values_flat else Tensor('dummy', (), 'int32'))) if true_values_flat: results = [] for (i, (true_v, false_v)) in enumerate(zip(true_values_flat, false_values_flat)): assert (isinstance(true_v, Tensor) and isinstance(false_v, Tensor)) true_v: Tensor[rfl.Layer] false_v: Tensor[rfl.Layer] assert (true_v.raw_tensor.parent is self.cond.true_branch_name_ctx) name = true_v.raw_tensor.name if (i == 0): results.append(res) else: template = true_v.copy_template_set_ctx(name_ctx.control_flow_ctx()) results.append(rfl._get_sub_layer(res, name, data=template)) results[(- 1)].raw_tensor.layer_extra_dependencies.extend((self.cond.condition.raw_tensor, true_v.raw_tensor, false_v.raw_tensor)) true_v.raw_tensor.usages.append(results[(- 1)].raw_tensor) false_v.raw_tensor.usages.append(results[(- 1)].raw_tensor) res = nest.pack_sequence_as(true_value, results) if (not results): results = [res] else: results = [res] if (self.cond._extra_ops_true_branch or self.cond._extra_ops_false_branch): assert (not name_ctx.inner_control_flow()) out = results[0] with rf.control_flow_ctx(name_ctx.control_flow_ctx()): out = _utils.copy(out, name=name_ctx.root.get_new_child(out.name)) out.raw_tensor.layer_dict['is_output_layer'] = True name_ctx.root.marked_outputs.append(out) return res
def get_net_dict(*, epoch: int, step: int) -> Tuple[(Dict[(str, Any)], rf.Module)]: 'called from the RETURNN config' BehaviorVersion.set_min_behavior_version(rfl.min_returnn_behavior_version) rf.select_backend_returnn_layers_tf() rfl.Layer.reset_default_root() config = get_global_config() random_seed = config.int('random_seed', 42) random_seed = (((((epoch * 193939) + (step * 19937)) + (random_seed * 27644437)) + 479001599) % (2 ** 31)) rf.set_random_seed(random_seed) extern_data = TensorDict() extern_data_dict = config.typed_value('extern_data') extern_data.update(extern_data_dict, auto_convert=True) assert isinstance(extern_data, TensorDict) for data in extern_data.data.values(): rfl.register_extern_data(data) get_model_func = config.typed_value('get_model') model = get_model_func(epoch=epoch, step=step) task = config.value('task', None) step_tensor = rfl.make_layer({'class': 'global_train_step'}, name='global_train_step') epoch_tensor = rfl.make_layer({'class': 'eval', 'eval': _eval_func_get_epoch, 'from': (), 'out_type': {'dims': (), 'dtype': 'int32'}}, name='epoch') if (task in {'train', 'eval'}): rf.init_train_step_run_ctx(train_flag=rfl.make_layer({'class': 'train_flag'}, name='train_flag'), step=step_tensor, epoch=epoch_tensor) train_step_func = get_global_config().typed_value('train_step') train_step_func(model=model, extern_data=extern_data) elif (task in {'forward', 'search'}): rf.init_forward_step_run_ctx(step=step_tensor, epoch=epoch_tensor) forward_step_func = get_global_config().typed_value('forward_step') forward_step_func(model=model, extern_data=extern_data) else: raise ValueError(f'invalid task {task!r}') root_scope = rfl.Layer.top().root for loss in rf.get_run_ctx().losses.values(): loss_t = _utils.copy(loss.loss, name=root_scope.get_new_child(suggested_name=loss.name)) loss_t.raw_tensor.layer_dict['loss'] = 'as_is' loss_opts = {} if (loss.scale != 1): assert ('loss_scale' not in loss_t.raw_tensor.layer_dict) loss_opts['scale'] = loss.scale if loss.as_error: loss_opts['as_error'] = True if loss.use_normalized_loss: loss_opts['use_normalized_loss'] = True if (not loss.use_flatten_frames): loss_opts['use_flatten_frames'] = False if (loss.custom_inv_norm_factor is not None): loss_opts['custom_inv_norm_factor'] = loss.custom_inv_norm_factor if loss_opts: loss_t.raw_tensor.layer_dict['loss_opts'] = loss_opts root_scope.marked_losses.append(loss_t) for out in rf.get_run_ctx().outputs.data.values(): if ((out.name == 'output') and (out.name not in root_scope.children)): layer = root_scope.get_child(out.name) else: layer = root_scope.get_new_child(suggested_name=out.name) out_t = _utils.copy(out, name=layer) if (layer.name != 'output'): out_t.raw_tensor.layer_dict['is_output_layer'] = True root_scope.marked_outputs.append(out_t) net_dict = root_scope.get_returnn_config(root_module=model).get_net_dict_raw_dict() def _cleanup_net_dict_value(elem): assert (not isinstance(elem, Tensor)), f'not expected to see Tensor {elem} in net dict' if isinstance(elem, Dim): elem.reset_batch_and_raw() return elem nest.map_structure(_cleanup_net_dict_value, net_dict) _cleanup_net_dict_value(batch_dim) for data in extern_data.data.values(): for dim in data.dims: _cleanup_net_dict_value(dim) return (net_dict, model)
def _eval_func_get_epoch(self: LayerBase, **_kwargs) -> tf.Tensor: run_opts = self.network.get_root_network().get_run_opts() def _py_func_get_epoch() -> int: return run_opts['epoch'] (epoch,) = tf_compat.v1.py_func(_py_func_get_epoch, [], [tf.int32], stateful=True) assert isinstance(epoch, tf.Tensor) epoch.set_shape(()) return epoch
def enable_debug_eager_mode(): '\n For debugging.\n\n Enables TF eager mode.\n Also, all layers will directly be created, and then due to TF eager mode directly evaluated.\n ' global _debug_eager_mode_enabled import tensorflow as tf tf.compat.v1.enable_eager_execution() _debug_eager_mode_enabled = True
def disable_debug_eager_mode(): '\n For debugging.\n\n Enables TF eager mode.\n Also, all layers will directly be created, and then due to TF eager mode directly evaluated.\n ' global _debug_eager_mode_enabled import tensorflow as tf tf.compat.v1.disable_eager_execution() _debug_eager_mode_enabled = False
def is_debug_eager_mode_enabled() -> bool: '\n :return: True if debug eager mode is enabled.\n ' return _debug_eager_mode_enabled
def get_dim_deps(dim: Union[(Dim, Sequence[Dim])]) -> List[Tensor]: '\n :return: the tensors the dim tag depends on.\n This is needed for some functions (layers) such as `nn.constant` or `nn.random_...`.\n https://github.com/rwth-i6/returnn/issues/1096\n ' if isinstance(dim, (tuple, list, set)): return _utils.unique_tensor_list(itertools.chain(*(get_dim_deps(dim_) for dim_ in dim))) if (not isinstance(dim, Dim)): raise TypeError(f'expected nn.Dim, got {type(dim)}') dim = dim.get_same_base() if dim.is_static(): return [] if dim.special: raise ValueError(f'{dim} deps not defined for special tags') _register_dim_via_dyn_layer(dim) if (dim in _dim_deps): deps = _dim_deps[dim] if _deps_valid_in_cur_name_ctx(deps): return deps _dim_deps.pop(dim) if ((not dim.is_dim_known()) and (not dim.derived_from_op)): raise ValueError(f'{dim} is not defined yet') if dim.derived_from_op: deps = get_dim_deps(dim.derived_from_op.inputs) _dim_deps[dim] = deps return deps raise Exception(f'{dim} deps not defined (_register_dim_deps not called?)')
def _register_dim_deps_when_novel(dim: Dim, deps: List[Tensor]): if dim.derived_from_op: return dim = dim.get_same_base() if (dim in _dim_deps): old_deps = _dim_deps[dim] if (not _deps_valid_in_cur_name_ctx(old_deps)): _dim_deps.pop(dim) elif (any(((not dep.available_for_inference) for dep in old_deps)) and all((dep.available_for_inference for dep in deps))): _dim_deps.pop(dim) else: return if _register_dim_via_dyn_layer(dim): return if (dim.dyn_size_ext and (not isinstance(dim.dyn_size_ext.raw_tensor, rfl.Layer))): assert (dim.dyn_size_ext.raw_tensor is None) rfl.make_layer({'class': 'length', 'from': deps, 'axis': dim, 'dtype': dim.dyn_size_ext.dtype}, name=dim.dyn_size_ext.name, out=dim.dyn_size_ext) assert isinstance(dim.dyn_size_ext.raw_tensor, rfl.Layer) _dim_deps[dim] = deps
def _deps_valid_in_cur_name_ctx(deps: List[Tensor]) -> bool: cur_root = rfl.Layer.top().root for dep in deps: assert isinstance(dep, Tensor) assert isinstance(dep.raw_tensor, rfl.Layer) if (dep.raw_tensor.root != cur_root): return False return True
def _register_dim_via_dyn_layer(dim: Dim) -> bool: '\n Given is any custom length tensor (dyn layer),\n and we make a dim from that.\n We do that via the range_from_length layer.\n\n :param dim:\n :return: whether we registered the dim\n ' if (dim.is_static() or dim.is_batch_dim()): return False if (dim in _dim_deps): return False assert dim.dyn_size_ext if (dim.dyn_size_ext.raw_tensor is None): return False assert isinstance(dim.dyn_size_ext.raw_tensor, rfl.Layer) dyn_size_ext: Tensor[rfl.Layer] = dim.dyn_size_ext if (dyn_size_ext.raw_tensor.layer_dict['class'] == 'range_from_length'): assert (dyn_size_ext.raw_tensor.layer_dict['out_spatial_dim'] == dim) raise Exception('why not in _dim_deps already?') _dim_deps[dim] = [] layer_with_dim = rfl.make_layer({'class': 'range_from_length', 'from': dyn_size_ext.copy(), 'dtype': dyn_size_ext.dtype, 'out_spatial_dim': dim}, name=(dim.name or dyn_size_ext.name or 'unnamed_dyn_dim')) _dim_deps[dim] = [layer_with_dim] return True
class Layer(): '\n This is a helper class to keep track of the current name context when creating layers.\n Usually you do not need to access this directly\n except for creating the root name ctx\n and getting out the final RETURNN config or net dict.\n\n A name ctx represents one absolute layer name in the RETURNN layer hierarchy,\n except for the root name ctx.\n\n A name ctx thus can have a parent name ctx (if it is not the root),\n and potentially child name contexts.\n\n See the documentation on name hierarchies for RETURNN and RETURNN-common in the module docstring at the top.\n\n (Note: This class was previously called NameCtx in RETURNN-common.)\n ' _stack = [] _recent = None _ReservedNames = {'data', 'output'} @classmethod def reset_default_root(cls): '\n Resets the default root name ctx.\n ' cls._stack[0:1] = [cls.new_root()] cls._recent = None @classmethod def _maybe_init_default_root(cls): '\n Initialize the default root name ctx.\n ' if (not cls._stack): cls.reset_default_root() @classmethod def top(cls) -> Layer: '\n Return the top of the stack.\n Assumes that it exists.\n ' cls._maybe_init_default_root() assert cls._stack return cls._stack[(- 1)] @classmethod def recent_subnet(cls) -> Layer: '\n Return the most recent subnet.\n ' top = cls.top() recent = cls._recent if (recent and (recent.root is top.root)): while (not recent.is_subnet): assert recent.parent recent = recent.parent assert recent.is_subnet return recent return top @classmethod def current_ctx(cls, *, ignore_top_stack_frames: int=0) -> Layer: '\n Return the current context.\n This is the top from the stack with is_subnet_ctx,\n and additionally using the Python stack trace to automatically infer further subnets.\n\n :param int ignore_top_stack_frames:\n ' return _auto_setup_parent_name_ctx(ignore_top_stack_frames=(ignore_top_stack_frames + 1)) @classmethod def new_root(cls) -> Layer: '\n Create new root name context\n ' ctx = Layer(parent=None) ctx.is_subnet = True return ctx @classmethod def inner_loop(cls) -> Optional[ControlFlowContext]: '\n :return: the most inner loop in the current context, if there is one\n E.g. you can use it to access the outer spatial dim.\n ' layer = cls.top() while layer: ctx = layer.new_control_flow_ctx if (ctx and ctx.is_loop()): return ctx layer = layer.parent return None @classmethod def inner_control_flow(cls) -> Optional[ControlFlowContext]: '\n :return: the most inner loop in the current context, if there is one\n E.g. you can use it to access the outer spatial dim.\n ' return cls.top().control_flow_ctx() def __init__(self, *, module: Optional[rf.Module]=None, suggested_name: Optional[str]=None, name: Optional[str]=None, virtual: bool=False, can_access_children: bool=True, new_control_flow_ctx: Optional[ControlFlowContext]=None, parent: Optional[Layer]=NotSpecified): '\n You are not supposed to call this directly.\n Use :func:`NameCtx.new_root` or :func:`scoped`.\n ' self.module = module self.tensor = None self.tensor_remove_unused_cleanup_hooks = [] self.layer_dict = None self.layer_extra_dependencies = [] self.usages = [] self.debug_layer = None self._enter_stack_frames = None self.is_subnet = False self._subnet_main_output = None self.virtual = virtual self.can_access_children = can_access_children self.require_global_access = False self.new_control_flow_ctx = new_control_flow_ctx self.children = {} self.extern_data = {} self.global_batch = None self.extra_net_dict = {} self.marked_outputs = [] self.marked_losses = [] self.parent = (parent if (parent is not NotSpecified) else self.current_ctx()) self.name = name if (not name): if suggested_name: suggested_name = suggested_name.replace('/', '_') suggested_name = tf_util.get_valid_scope_name_from_str(suggested_name) name = self._get_unique_name(suggested_name) elif self.parent: name = self._get_unique_name() self.name = name if self.parent: self.parent._add_child(self) self.custom_layer_name_scope = None self.__class__._recent = self def __repr__(self): parts = [self.get_abs_name_repr()] if self.tensor: parts.append(('[%s]' % ','.join(self.tensor.get_batch_axes_short_description()))) return f"<{self.__class__.__name__} {' '.join(parts)}>" def __hash__(self): return hash(id(self)) def _sis_hash(self): from sisyphus.hash import sis_hash_helper if (not self.layer_dict): return sis_hash_helper(self.get_abs_name()) return sis_hash_helper(self.layer_dict) def __copy__(self): '\n Normally we would not want to get a new name ctx with ``ctx != copy(ctx)``.\n\n :return: self\n :rtype: NameCtx\n ' return self def __deepcopy__(self, memo=None): '\n Normally we would not want to get a new name ctx with ``ctx != deepcopy(ctx)``.\n\n :return: self\n :rtype: NameCtx\n ' return self def assign_parent(self, parent: Layer, suggested_name: Optional[str]=None): '\n Assign or reassign parent to this name context.\n ' if self.parent: self_ = self.parent.children.pop(self.name) assert (self_ is self) self.parent = None self.parent = parent self.name = self._get_unique_name((suggested_name or self.name)) self.parent._add_child(self) def move_tensor_here(self: Layer, tensor: Tensor): '\n Moves an existing layer ref (with assigned name ctx)\n to another name ctx (without assigned layer or layer ref).\n\n This assumes that there are no other references to tensor.raw_tensor\n because those would become invalid.\n References to tensor itself should be fine.\n ' assert ((not self.layer_dict) and (not self.tensor)) assert (tensor.raw_tensor is not None) assert isinstance(tensor.raw_tensor, Layer) if tensor.raw_tensor.parent: old_name_ctx = tensor.raw_tensor.parent.children.pop(tensor.raw_tensor.name) assert (old_name_ctx is tensor.raw_tensor) old_name_ctx: Layer = tensor.raw_tensor self.tensor = tensor tensor.raw_tensor = self self.tensor_remove_unused_cleanup_hooks = old_name_ctx.tensor_remove_unused_cleanup_hooks self.layer_dict = old_name_ctx.layer_dict self.layer_extra_dependencies = old_name_ctx.layer_extra_dependencies self.usages = old_name_ctx.usages self.is_subnet = old_name_ctx.is_subnet self._subnet_main_output = old_name_ctx._subnet_main_output for (name, child) in old_name_ctx.children.items(): child.parent = self if (name not in self.children): self.children[name] = child else: name = child._get_unique_name(name) child.name = name self.children[name] = child old_name_ctx.children = self.children if old_name_ctx.layer_dict: def _check_layer_opt_value(v): if isinstance(v, Net): assert (v.name_ctx is old_name_ctx) v.name_ctx = self nest.map_structure(_check_layer_opt_value, old_name_ctx.layer_dict) @property def root(self) -> Layer: '\n :return: root name ctx\n ' root = self while root.parent: root = root.parent return root @property def is_root(self) -> bool: '\n :return: whether this is a root ctx\n ' return (not self.parent) @property def can_access_children_from_root(self): '\n :return: whether can_access_children for self and all parents\n ' name = self while name: if (not name.can_access_children): return False name = name.parent return True def control_flow_ctx(self) -> Optional[ControlFlowContext]: '\n :return: control flow context of this name ctx\n ' ctx = self while ctx: if ctx.new_control_flow_ctx: return ctx.new_control_flow_ctx ctx = ctx.parent return None def extend_reserved_names(self, names: Set[str]): '\n Extend reserved child names.\n ' self._ReservedNames = (self._ReservedNames | names) def _assign_param_names(self, root_module: rf.Module): root = self.root for (name, param) in root_module.named_parameters(recurse=True): param = _resolve_param_tensor(param) param_layer = param.raw_tensor assert isinstance(param_layer, Layer), f'param {param} has no layer' if ((not param_layer.parent) and (param_layer is not root)): param_layer.assign_parent(root, name) def _remove_unused_and_handle_subnets(self): used_names = {self} root = self.root queue = [(tensor, []) for tensor in (self.marked_outputs + self.marked_losses)] while queue: (tensor, src) = queue.pop(0) if (tensor.raw_tensor is None): raise Exception(f'tensor {tensor} has no layer defined, via {src}') if ((not tensor.raw_tensor.parent) and (tensor.raw_tensor != root)): raise Exception(f'tensor {tensor} has no parent assigned, via {src}') if (tensor.raw_tensor in used_names): continue used_names.add(tensor.raw_tensor) src_ = (src + [tensor.raw_tensor]) for dep in tensor.raw_tensor.get_tensor_dependencies(): if ((dep.tensor is not None) and (dep not in used_names)): queue.append((dep.tensor, src_)) ctx = tensor.raw_tensor ctx.make_all_sub_networks_and_optimize() for ctx in tensor.raw_tensor.get_abs_name_ctx_list(): if (ctx in used_names): continue if ((ctx.tensor is not None) and (ctx.tensor is not tensor)): queue.append((ctx.tensor, src_)) visited = set() queue = [self] while queue: name_ctx = queue.pop(0) if (name_ctx in visited): continue visited.add(name_ctx) if (name_ctx not in used_names): assert name_ctx.parent name_ctx.parent.children.pop(name_ctx.name) if (name_ctx.tensor is not None): for hook in name_ctx.tensor_remove_unused_cleanup_hooks: hook(name_ctx.tensor) else: for (name, child) in name_ctx.children.items(): assert ((child.parent is name_ctx) and (child.name == name)) queue.append(child) def _prepare_for_config_serialization(self, *, root_module: rf.Module, name_path_cache: _NamePathCache): '\n Prepare the name ctx for RETURNN config serialization.\n This makes the root module maybe nicer and also removes unused entries.\n ' assert (self.root is self) root_module_calls = [child for child in self.root.children.values() if (child.module is root_module)] if root_module_calls: root_mod_call = root_module_calls[0] assert (root_mod_call.module is root_module) if (root_mod_call is not self): if (root_mod_call.tensor is not None): assert (not self.tensor) assert (root_mod_call.layer_dict['class'] == 'subnetwork') sub_out = root_mod_call.children.pop('output') assert (sub_out.layer_dict['class'] == 'copy') sub_real_out = sub_out.layer_dict['from'] assert isinstance(sub_real_out, Tensor) sub_out.tensor = sub_real_out root_mod_call.tensor = sub_real_out for (name, child) in list(root_mod_call.children.items()): child.assign_parent(parent=self, suggested_name=name) queue = [self.root] visited: Set[Layer] = set() mod_in_layer = {} while queue: ctx = queue.pop(0) if (ctx in visited): continue visited.add(ctx) assert ((not ctx.parent) or (ctx.parent.children[ctx.name] is ctx)) for child in ctx.children.values(): queue.append(child) if (not ctx.parent): continue if ((ctx.module is not None) and (not ctx.layer_dict)): mod_path = name_path_cache.get_name_path(ctx.module, raise_exc=False) if ((mod_path is not None) and (len(mod_path) > 0) and (mod_path[(- 1)] != ctx.name)): existing = None if ((ctx.parent, RefIdEq(ctx.module)) in mod_in_layer): existing = mod_in_layer[(ctx.parent, RefIdEq(ctx.module))] elif ((mod_path[(- 1)] in ctx.parent.children) and (ctx.parent.children[mod_path[(- 1)]].module is ctx.module)): existing = ctx.parent.children[mod_path[(- 1)]] else: for other in ctx.parent.children.values(): if ((other.module is ctx.module) and other.name.startswith(mod_path[(- 1)])): existing = other break if existing: assert (existing.is_subnet and (not existing.tensor) and (not existing.layer_dict)) if (ctx is existing): pass else: if ((ctx.parent, RefIdEq(ctx.module)) not in mod_in_layer): mod_in_layer[(ctx.parent, RefIdEq(ctx.module))] = existing for child in list(ctx.children.values()): child.assign_parent(existing) ctx.parent.children.pop(ctx.name) else: if (mod_path[(- 1)] not in ctx.parent.children): ctx.assign_parent(parent=ctx.parent, suggested_name=mod_path[(- 1)]) elif ctx.name.startswith(mod_path[(- 1)]): pass else: ctx.assign_parent(parent=ctx.parent, suggested_name=mod_path[(- 1)]) mod_in_layer[(ctx.parent, RefIdEq(ctx.module))] = ctx elif (ctx.tensor is not None): tensor_path = name_path_cache.get_name_path(ctx.tensor, raise_exc=False) if ((tensor_path is not None) and (len(tensor_path) > 0) and (tensor_path[(- 1)] != ctx.name)): if (tensor_path[(- 1)] not in ctx.parent.children): ctx.assign_parent(parent=ctx.parent, suggested_name=tensor_path[(- 1)]) elif ctx.name.startswith(tensor_path[(- 1)]): pass else: ctx.assign_parent(parent=ctx.parent, suggested_name=tensor_path[(- 1)]) self._assign_param_names(root_module=root_module) self._remove_unused_and_handle_subnets() assert (not self.parent), f'{self} get_returnn_config only makes sense in the root name ctx' def get_returnn_config(self, *, root_module: rf.Module) -> _ReturnnConfigSerializer: '\n :param root_module: there must be one root module such that all params have a well-defined name\n :return: config serializer\n ' serializer = _ReturnnConfigSerializer(name_ctx=self, root_module=root_module) self._prepare_for_config_serialization(root_module=root_module, name_path_cache=serializer.name_path_cache) return serializer def make_net(self) -> Net: '\n Create new (sub) net, an instance of :class:`Net`.\n ' return Net(name_ctx=self) def make_default_output(self, ref: Tensor) -> Tensor: '\n Assume this is a subnet, or the root net, and make a default output.\n ' assert self.is_subnet if (ref.raw_tensor is self.children.get('output', None)): return ref assert ('output' not in self.children) return _utils.copy(ref, name=self.get_child('output')) def get_abs_name_ctx_list(self) -> List[Layer]: '\n Return list [root name ctx, ..., self].\n ' ls = [] cur = self while cur: ls.append(cur) cur = cur.parent return list(reversed(ls)) def get_abs_name(self, *, join_str: str='/') -> str: '\n :return: absolute RETURNN layer name starting from root context.\n ' ls = self.get_abs_name_ctx_list() if (len(ls) == 1): return '' assert ((len(ls) >= 2) and (not ls[0].name) and (ls[(- 1)] is self) and ls[(- 1)].name) return join_str.join((ctx.name for ctx in ls[1:])) def get_abs_name_repr(self) -> str: '\n :return: Some repr for our absolute name.\n ' ls = self.get_abs_name_ctx_list() if (len(ls) == 0): debug_name = '???' elif ((len(ls) == 1) and (ls[0].name is None)): debug_name = '/' else: debug_name = '/'.join((((repr(ctx.name) if (not ctx.virtual) else f'({ctx.name!r})') if ((i > 0) or (ctx.name is not None)) else '') for (i, ctx) in enumerate(ls))) return debug_name def get_name_in_ctx(self, ctx: Layer, *, middle_prefix: str='', shorten_subnet: bool=True) -> str: '\n Get layer name valid in given scope.\n ' assert ((not self.virtual) and (not self.is_root)) if (self.parent is ctx): return (middle_prefix + self.name) if (self is ctx): return ('base:' + self.get_name_in_ctx(ctx=ctx.parent, middle_prefix=middle_prefix, shorten_subnet=shorten_subnet)) if isinstance(self.tensor, rfl.PrevTensorRef): return self.tensor.cur_layer_name_ctx.get_name_in_ctx(ctx, middle_prefix=('prev:' + middle_prefix), shorten_subnet=False) ctx_scope_abs = ctx.get_abs_name_ctx_list() self_name_abs = self.get_abs_name_ctx_list() assert (ctx_scope_abs[0] is self_name_abs[0]) common_len = 0 max_common_len = min(len(ctx_scope_abs), len(self_name_abs)) while ((common_len < max_common_len) and (ctx_scope_abs[common_len] is self_name_abs[common_len])): common_len += 1 del ctx_scope_abs[:common_len] del self_name_abs[:common_len] prefix = ''.join(['base:' for ctx_ in reversed(ctx_scope_abs) if (not ctx_.virtual)]) assert (len(self_name_abs) >= 1), f'{self} in ctx {ctx} invalid' assert (self_name_abs[(- 1)] is self) if (len(self_name_abs) == 1): return ((prefix + middle_prefix) + self.name) if ((self.tensor is None) or (not shorten_subnet)): postfix = '/'.join([ctx.name for ctx in self_name_abs if (not ctx.virtual)]) return ((prefix + middle_prefix) + postfix) while (len(self_name_abs) >= 2): (ctx_, ctx__) = self_name_abs[(- 2):] assert (isinstance(ctx_, Layer) and isinstance(ctx__, Layer)) if (ctx_.layer_dict and (ctx_.layer_dict['class'] == 'subnetwork')): if ((ctx_._subnet_main_output is ctx__.tensor) or (ctx_.children.get('output') is ctx__)): self_name_abs.pop((- 1)) continue break postfix = '/'.join([ctx_.name for ctx_ in self_name_abs if (not ctx_.virtual)]) assert postfix, f'{self} in ctx {ctx} invalid, no postfix?' return ((prefix + middle_prefix) + postfix) def _add_child(self, child: Layer): assert child.name assert (child.parent is self) assert (child.name not in self.children) self.children[child.name] = child def get_child(self, name: str) -> Layer: '\n Makes sure the child exists.\n ' if (name in self.children): return self.children[name] else: return Layer(name=name, parent=self) def get_new_child(self, suggested_name: str) -> Layer: '\n New child.\n ' return Layer(suggested_name=suggested_name, parent=self) def get_child_with_tensor(self, name: str, *, data: Tensor) -> Layer: '\n Makes sure the child exists, including a corresponding layer ref.\n Creates the child together with a layer ref if it does not exist yet.\n ' child = self.get_child(name) if (not child.tensor): child.tensor = data assert (child.tensor is data) if (data.raw_tensor is None): data.raw_tensor = child assert (data.raw_tensor is child) return child def get_child_tensor(self, name: str, *, data: Tensor) -> Tensor[Layer]: '\n Get child layer ref. Makes sure it exists.\n ' return self.get_child_with_tensor(name, data=data).tensor def get_recent_tensor(self, *, only_same_control_flow: bool=False) -> Optional[Tensor]: '\n Get recent tensor if it exists. Can go deeply through children.\n ' queue = [self] while queue: ctx = queue.pop((- 1)) if (only_same_control_flow and (ctx.control_flow_ctx() != self.control_flow_ctx())): continue if (ctx.tensor is not None): return ctx.tensor queue.extend(ctx.children.values()) return None def __enter__(self): self._maybe_init_default_root() self._stack.append(self) from returnn.util.better_exchook import get_current_frame frame = get_current_frame() self._enter_stack_frames = set() while frame: self._enter_stack_frames.add(frame) frame = frame.f_back self.__class__._recent = self return self def __exit__(self, exc_type, exc_val, exc_tb): assert (self._stack[(- 1)] is self), f'{self}.__exit__: stack {self._stack} top is not self' self._enter_stack_frames = None self._stack.pop((- 1)) def _get_parent_module(self) -> Optional[rf.Module]: parent = self.parent while parent: if parent.module: return parent.module parent = parent.parent return None def _get_suggested_name(self) -> str: assert (self.module is not None) parent_module = self._get_parent_module() if parent_module: cache = _NamePathCache() cache.register_module(parent_module, []) path = cache.get_name_path(self.module, raise_exc=False) if (path is not None): return '.'.join(path) return self.module.get_default_name() def _get_unique_name(self, suggested_name: Optional[str]=None) -> str: name = (suggested_name or self._get_suggested_name()) reserved_names = (set(self.parent.children.keys()) | self.parent._ReservedNames) if self.parent.module: if (self.module and (name not in reserved_names) and (getattr(self.parent.module, name, None) is self.module)): return name if (self.tensor and (name not in reserved_names) and (getattr(self.parent.module, name, None) is self.tensor)): return name for (key, value) in vars(self.parent.module).items(): if (not isinstance(value, (int, float, str, bool, type(None)))): reserved_names.add(key) if (name not in reserved_names): return name i = 0 while True: name_ = f'{name}_{i}' if (name_ not in reserved_names): return name_ i += 1 def get_tensor_dependencies(self, *, _extra_layer_dict=None) -> List[Layer]: '\n :return: list of tensors this tensor depends on\n ' dep_list = [] dep_name_set = set() def _maybe_add_dep(x): if isinstance(x, Layer): if (x in dep_name_set): return dep_list.append(x) dep_name_set.add(x) return if isinstance(x, Tensor): return _maybe_add_dep(x.raw_tensor) if isinstance(x, Net): return _maybe_add_dep(x.name_ctx.children['output'].tensor) if _extra_layer_dict: nest.map_structure(_maybe_add_dep, _extra_layer_dict) if self.layer_dict: nest.map_structure(_maybe_add_dep, self.layer_dict) if (self.children and ('output' in self.children)): _maybe_add_dep(self.children['output'].tensor) if (self.parent and self.parent.tensor): _maybe_add_dep(self.parent.tensor) if self.layer_extra_dependencies: dep_list.extend(self.layer_extra_dependencies) return dep_list def make_all_sub_networks_and_optimize(self): '\n Go up all parents and create subnetworks which are not initialized yet.\n Also optimize by removing obsolete subnetworks (which just consist of one child).\n ' ctx = self while True: if (ctx.tensor is not None): ctx.optimize_move_up() ctx_ = ctx ctx = ctx_.parent if ((not ctx) or ctx.is_root): break if (ctx.virtual or (ctx.tensor is not None) or (ctx_.tensor is None)): continue if ctx.new_control_flow_ctx: continue ctx._make_sub_network_layer(ctx_.tensor) assert (ctx.tensor is not None) def optimize_move_up(self): '\n If the parent is a (non-initialized) subnet where we are the only child,\n move us up.\n ' assert (self.tensor is not None) ctx = self.parent while ctx: assert isinstance(ctx, Layer) if (not ctx._is_obsolete_subnet()): break assert (set(ctx.children.values()) == {self}) ctx.parent.children[ctx.name] = self self.parent = ctx.parent self.name = ctx.name ctx = ctx.parent def _is_obsolete_subnet(self) -> bool: if (self.is_root or self.virtual or (len(self.children) > 1)): return False if (self.tensor is not None): return False if ((self.module is not None) and (not isinstance(self.module, rf.Functional))): return False return True def _make_sub_network_layer(self, sub_output: Tensor): assert ((self.tensor is None) and (self.layer_dict is None)) assert (not self._is_obsolete_subnet()) if ('output' in self.children): assert (self.children['output'].tensor is sub_output) else: if isinstance(sub_output, rfl.PrevTensorRef): assert (sub_output.cur_layer_name_ctx.tensor is not None) sub_output = sub_output.cur_layer_name_ctx.tensor _utils.copy(sub_output, name=self.get_child('output')) rfl.make_layer({'class': 'subnetwork', 'from': [], 'subnetwork': self.make_net()}, name=self, predefined_out_data=sub_output) assert (self.tensor is not None) assert (self.tensor.raw_tensor is self) self._subnet_main_output = sub_output
class _ReturnnConfigSerializer(): '\n Serializes a RETURNN config to a string.\n\n The config consists of generic RETURNN settings (behavior_version and maybe others)\n generic imports (e.g. "from returnn.tf.util.data import Data, Dim, ..."),\n dim tags, extern_data and the net dict.\n\n It is possible to first serialize only the part for extern_data (e.g. for the root config)\n including needed dim tags and imports,\n and separately serialize the net dict and remaining needed dim tags.\n ' def __init__(self, *, name_ctx: Layer, root_module: rf.Module): '\n :param name_ctx:\n :param root_module: there must be one root module such that all params have a well-defined name\n ' self.name_ctx = name_ctx self.root_module = root_module self.name_path_cache = _NamePathCache() self.name_path_cache.register_module(root_module, []) self._behavior_version = rfl.min_returnn_behavior_version self._dim_tags_proxy = ReturnnDimTagsProxy() self._base_extern_data_dim_refs = None self._net_dict_builder = _NetDictBuilderCtx(root_module=self.root_module, name_path_cache=self.name_path_cache) def get_complete_py_code_str(self): '\n :return: complete combined config as Python code str.\n basically :func:`get_base_extern_data_py_code_str` + :func:`get_ext_net_dict_py_code_str`\n ' return (self.get_base_extern_data_py_code_str() + self.get_ext_net_dict_py_code_str(with_imports=False, ref_extern_data_dims_via_global_config=False)) ImportPyCodeStr = 'from returnn.tf.util.data import (\n Dim, batch_dim, single_step_dim, SpatialDim, FeatureDim, ImplicitDynSizeDim, ImplicitSparseDim)\n\n' def get_base_extern_data_py_code_str(self) -> str: '\n :return: serialized config, i.e. Python code\n ' assert (self._base_extern_data_dim_refs is None) from returnn.util.pprint import pformat extern_data_raw = self.get_extern_data_raw_dict() extern_data_raw = self._dim_tags_proxy.collect_dim_tags_and_transform_config(extern_data_raw) self._base_extern_data_dim_refs = list(self._dim_tags_proxy.dim_refs_by_tag.values()) code_lines = [self.ImportPyCodeStr, 'use_tensorflow = True\n', f'''behavior_version = {self._behavior_version} ''', f'''{self._dim_tags_proxy.py_code_str()} ''', f'''extern_data = {pformat(extern_data_raw)} '''] return ''.join(code_lines) @classmethod def get_base_extern_data_py_code_str_direct(cls, extern_data: Dict[(str, Any)]) -> str: '\n directly get serialized Python code via extern data\n ' dim_tags_proxy = ReturnnDimTagsProxy() from returnn.util.pprint import pformat extern_data = dim_tags_proxy.collect_dim_tags_and_transform_config(extern_data) code_lines = [cls.ImportPyCodeStr, f'''{dim_tags_proxy.py_code_str()} ''', f'''extern_data = {pformat(extern_data)} '''] return ''.join(code_lines) def get_ext_net_dict_py_code_str(self, *, with_imports: bool=True, ref_extern_data_dims_via_global_config: bool=True) -> str: '\n :param with_imports: whether to include imports\n :param ref_extern_data_dims_via_global_config: Add references to the definitions for the dimension tags\n written in `get_base_extern_data_py_code_str` via `returnn.config.get_global_config`.\n :return: serialized config, i.e. Python code\n ' from returnn.util.pprint import pformat dim_tags_proxy = self._dim_tags_proxy.copy() net_dict = self.get_net_dict_raw_dict() net_dict = dim_tags_proxy.collect_dim_tags_and_transform_config(net_dict) imports = {} net_dict = self._post_process_transform(net_dict, imports=imports) code_lines = [] if with_imports: code_lines.append((self.ImportPyCodeStr + '\n')) for import_str in imports: code_lines.append((import_str + '\n')) if ref_extern_data_dims_via_global_config: code_lines += ['from returnn.config import get_global_config\n', 'config = get_global_config()\n'] for value in self._base_extern_data_dim_refs: code_lines.append(f'''{value.py_id_name()} = config.typed_dict[{value.py_id_name()!r}] ''') code_lines += [f'''{dim_tags_proxy.py_code_str(exclude_dims=self._base_extern_data_dim_refs)} ''', f'''network = {pformat(net_dict)} '''] return ''.join(code_lines) def get_net_dict_raw_dict(self) -> Dict[(str, Any)]: '\n :return: raw dict\n ' return self._net_dict_builder.make_net_dict_raw(self.name_ctx.make_net()) def get_extern_data_raw_dict(self) -> Dict[(str, Any)]: '\n :return: raw dict\n ' return {data_key: {key: getattr(data, key) for key in [*data.get_kwargs(include_special_axes=False).keys(), 'available_for_inference'] if (key not in {'name', 'batch'})} for (data_key, data) in self.name_ctx.extern_data.items()} def get_config_raw_dict(self) -> Dict[(str, Any)]: '\n :return: raw dict\n ' return {'behavior_version': self._behavior_version, 'extern_data': self.get_extern_data_raw_dict(), 'network': self.get_net_dict_raw_dict()} @classmethod def _post_process_transform(cls, obj, *, imports: Dict[(str, None)]): if isinstance(obj, (int, float, str, bool, type(None))): return obj if isinstance(obj, (Dim, ReturnnDimTagsProxy.DimRefProxy, ReturnnDimTagsProxy.SetProxy)): return obj if isinstance(obj, numpy.ndarray): imports['import numpy'] = None return obj import types if isinstance(obj, types.FunctionType): if (obj.__module__.split('.')[0] != __name__.split('.')[0]): raise ValueError(f'Function {obj} from unknown module {obj.__qualname__} cannot be serialized') imports[f'import {obj.__module__}'] = None return cls._CodeWrapper(f'{obj.__module__}.{obj.__qualname__}', obj) if isinstance(obj, dict): return {cls._post_process_transform(key, imports=imports): cls._post_process_transform(value, imports=imports) for (key, value) in obj.items()} if isinstance(obj, list): return [cls._post_process_transform(value, imports=imports) for value in obj] if (isinstance(obj, tuple) and (type(obj) is tuple)): return tuple((cls._post_process_transform(value, imports=imports) for value in obj)) if (isinstance(obj, tuple) and (type(obj) is not tuple)): return type(obj)(*(cls._post_process_transform(getattr(obj, key), imports=imports) for key in obj._fields)) class _CodeWrapper(): def __init__(self, code: str, obj: Any): self.code = code self.obj = obj def __repr__(self): return self.code
class _NetDictBuilderCtx(): '\n Context for building the net.\n ' def __init__(self, *, root_module: rf.Module, name_path_cache: _NamePathCache): self.root_module = root_module self.cache = name_path_cache class _StackInfo(): def __init__(self, *, parent: Optional[_NetDictBuilderCtx._StackInfo]=None, net: Net, layer_abs_name_scope_effective: str): self.parent = parent self.net = net self.layer_abs_name_scope_effective = layer_abs_name_scope_effective def add(self, *, net: Net, layer_abs_name_scope_effective: str) -> _NetDictBuilderCtx._StackInfo: '\n :return: new stack info\n ' return _NetDictBuilderCtx._StackInfo(parent=self, net=net, layer_abs_name_scope_effective=layer_abs_name_scope_effective) def get_parent_loop_axes(self) -> List[Dim]: '\n via control flow ctx\n ' dims = [] parent = self while parent: ctx = parent.net.name_ctx.control_flow_ctx() if ctx: if ctx.is_loop(): if ((ctx.loop_spatial_dim is not None) and (ctx.loop_spatial_dim not in dims)): dims.append(ctx.loop_spatial_dim) parent = parent.parent return list(reversed(dims)) def make_net_dict_raw(self, net: Net, *, _stack: Optional[_StackInfo]=None) -> NetDictRaw: '\n Create raw net dict, not containing any :class:`Tensor` or :class:`Net` instances anymore.\n ' import types if (_stack is None): _stack = self._StackInfo(net=net, layer_abs_name_scope_effective='') net_dict = {} for sub_name_ctx in net.name_ctx.children.values(): if (not sub_name_ctx.layer_dict): continue layer_dict = sub_name_ctx.layer_dict.copy() assert ('class' in layer_dict) data_template = sub_name_ctx.tensor.copy_template() for outer_dim in _stack.get_parent_loop_axes(): if (outer_dim in data_template.dim_tags): data_template = data_template.copy_template_excluding_axis(data_template.get_axis_from_description(outer_dim)) dim_tags = list(data_template.dim_tags) for dim in dim_tags: if (dim.is_batch_dim() or dim.is_static()): continue if (not dim.dyn_size_ext): dim.complete_dyn_size() assert dim.dyn_size_ext, f'{sub_name_ctx}: need {dim} to be defined to be able to know about implicit dims' dim_tags.extend(data_template.dim_tags_set_implicit_only_wrapped) assert (len(dim_tags) == len(set(((d, (d.match_priority if isinstance(d, Dim) else 0)) for d in dim_tags)))), f'duplicate dims in {sub_name_ctx} {sub_name_ctx.tensor}' if (len(dim_tags) == len(set(dim_tags))): if (layer_dict['class'] not in {'constant', 'variable', 'random', 'subnetwork', 'transpose'}): layer_dict['out_shape'] = set(dim_tags) assert ('name_scope' not in layer_dict) if (sub_name_ctx.custom_layer_name_scope is not None): sub_name_scope = sub_name_ctx.custom_layer_name_scope layer_dict['name_scope'] = sub_name_scope assert (sub_name_scope == '') sub_layer_abs_name_scope = _stack.layer_abs_name_scope_effective else: layer_abs_name_scope_parent = _stack.layer_abs_name_scope_effective if layer_abs_name_scope_parent: layer_abs_name_scope_parent += '/' layer_abs_name_scope_default = (layer_abs_name_scope_parent + sub_name_ctx.name) sub_layer_abs_name_scope = self._expected_layer_abs_name_scope(sub_name_ctx) if (sub_name_ctx.layer_dict['class'] == 'variable'): assert sub_layer_abs_name_scope, f'VariableLayer {sub_name_ctx} must have a unique name in {self.root_module}' if (sub_layer_abs_name_scope is not None): if (layer_abs_name_scope_default != sub_layer_abs_name_scope): if (sub_layer_abs_name_scope == _stack.layer_abs_name_scope_effective): layer_dict['name_scope'] = '' elif sub_layer_abs_name_scope.startswith(layer_abs_name_scope_parent): layer_dict['name_scope'] = sub_layer_abs_name_scope[len(layer_abs_name_scope_parent):] else: layer_dict['name_scope'] = ('/' + sub_layer_abs_name_scope) else: sub_layer_abs_name_scope = layer_abs_name_scope_default def _map_elem_resolve(obj: Any) -> Any: if isinstance(obj, Tensor): assert isinstance(obj.raw_tensor, rfl.Layer), f'unexpected tensor {obj} with raw tensor type {type(obj.raw_tensor)}, expected rfl.Layer' obj: Tensor[rfl.Layer] assert (obj.raw_tensor.parent or (net.name_ctx == obj.raw_tensor)) return obj.raw_tensor.get_name_in_ctx(ctx=net.name_ctx) if isinstance(obj, Net): return self.make_net_dict_raw(net=obj, _stack=_stack.add(net=obj, layer_abs_name_scope_effective=sub_layer_abs_name_scope)) assert isinstance(obj, (int, float, str, bool, numpy.ndarray, set, Dim, type(None), types.FunctionType)), f'unexpected type {type(obj)}' if (isinstance(obj, Dim) and obj.is_batch_dim()): return batch_dim return obj layer_dict = nest.map_structure(_map_elem_resolve, layer_dict) net_dict[sub_name_ctx.name] = layer_dict net_dict.update(net.name_ctx.extra_net_dict) return net_dict def _expected_layer_abs_name_scope(self, name_ctx: Layer) -> Optional[str]: '\n :param NameCtx name_ctx:\n :return: expected absolute name scope for this layer\n ' if (name_ctx.custom_layer_name_scope is not None): if (name_ctx.custom_layer_name_scope == ''): if name_ctx.parent: return self._expected_layer_abs_name_scope(name_ctx.parent) else: return '' raise NotImplementedError(f'custom_layer_name_scope {name_ctx.custom_layer_name_scope!r} not supported yet') if (name_ctx.tensor is not None): name_path_tensor = self.cache.get_name_path(name_ctx.tensor, raise_exc=False) if (name_path_tensor is not None): return '/'.join(name_path_tensor) if name_ctx.module: name_path_mod = self.cache.get_name_path(name_ctx.module, raise_exc=False) if (name_path_mod is not None): return '/'.join(name_path_mod) return None
class Net(): '\n Represents a RETURNN (sub) network.\n ' def __init__(self, *, name_ctx: Layer): self.name_ctx = name_ctx def __repr__(self): return f'Net{self.name_ctx!r}'
class ReturnnDimTagsProxy(): '\n When serialized via __repr__, this represents a dict unique_name -> dim tag.\n All usages in the network and extern_data will also get proxies when serialized point to this dict.\n ' class DimRefProxy(): '\n This will be a reference to the global dim_tags __repr__.\n ' def __init__(self, *, dim: Union[(Dim, _MarkedDim)], name: Optional[str], path: Tuple[(Any, ...)], parent: ReturnnDimTagsProxy): self._dim = dim self.name = name self.path = path self.parent = parent self.debug_idx = len(parent.dim_refs_by_name) def __repr__(self): return self.ref_repr() def _sis_hash(self): from sisyphus.hash import sis_hash_helper return sis_hash_helper(self.path) @property def dim(self) -> Dim: 'Dim' if isinstance(self._dim, Dim): return self._dim elif isinstance(self._dim, _MarkedDim): return self._dim.tag else: raise TypeError(f'invalid {self._dim}') def ref_repr(self) -> str: 'ref repr' return self.parent.dim_ref_repr(self._dim, brackets=False, prefer_ref=True) def py_id_name(self) -> str: '\n :return: valid Python identifier\n ' assert self.name return (self.name + '_dim') def dim_repr(self): '\n Dim repr, used for serialization of all registered dim tags.\n Any derived dims or special dims will not be registered and instead be represented\n with the same derivation referencing other registered dim tags.\n See :func:`ReturnnDimTagsProxy.dim_ref_repr`.\n ' dim = self._dim if isinstance(dim, _MarkedDim): return self.parent.dim_ref_repr(dim, brackets=False, prefer_ref=False) assert isinstance(dim, Dim) assert (not dim.is_batch_dim()) assert dim.can_be_used_as_dim() if dim.derived_from_op: return self.parent.dim_ref_repr(dim, brackets=False, prefer_ref=False) assert (not dim.match_priority) if (dim.kind == Dim.Types.Feature): return f'FeatureDim({dim.description!r}, {dim.dimension})' if (dim.kind == Dim.Types.Spatial): if (dim.dimension is not None): return f'SpatialDim({dim.description!r}, {dim.dimension})' else: return f'SpatialDim({dim.description!r})' return f'Dim(kind={dim.kind}, description={dim.description!r}, dimension={dim.dimension})' class SetProxy(): '\n This represents a set but with a predefined order.\n We want a deterministic order in the repr such that the generated code stays deterministic.\n ' def __init__(self, values: Sequence[Any]): self.values = values def __repr__(self): return f"{{{', '.join(map(repr, self.values))}}}" def __init__(self, *, reserved_names: Optional[Set[str]]=None): self.dim_refs_by_name = {} self.dim_refs_by_tag = {} self.reserved_names = (reserved_names or set()) self.reserved_names.update({'batch_dim', 'single_step_dim', 'Data', 'Dim', 'FeatureDim', 'SpatialDim', 'ImplicitSparseDim', 'ImplicitDynSizeDim'}) def __repr__(self): return '\n'.join([f'<{self.__class__.__name__}:', *(f' {value.py_id_name()} = {value.dim_repr()}' for (key, value) in self.dim_refs_by_name.items()), '>']) def copy(self) -> ReturnnDimTagsProxy: '\n :return: creates a shallow copy\n ' new = ReturnnDimTagsProxy() new.dim_refs_by_name = self.dim_refs_by_name.copy() new.dim_refs_by_tag = self.dim_refs_by_tag.copy() new.reserved_names = self.reserved_names.copy() return new def py_code_str(self, exclude_dims: Collection[ReturnnDimTagsProxy.DimRefProxy]=()): '\n :param exclude_dims: dim tags to exclude from serializing\n :return: Python code\n ' visited = set() lines = [] def _visit_tag_deps(tag: Dim): if tag.derived_from_op: for tag_ in tag.derived_from_op.inputs: if (tag_ in self.dim_refs_by_tag): _visit_ref(self.dim_refs_by_tag[tag_]) else: _visit_tag_deps(tag_) def _visit_ref(ref: ReturnnDimTagsProxy.DimRefProxy): if (ref in exclude_dims): return _visit_tag_deps(ref.dim) if (ref.name in visited): return visited.add(ref.name) lines.append(f'''{ref.py_id_name()} = {ref.dim_repr()} ''') for (_, value) in self.dim_refs_by_name.items(): _visit_ref(value) return ''.join(lines) def _sis_hash(self): raise Exception('unexpected') def dim_ref_repr(self, dim: Union[(Dim, _MarkedDim)], *, brackets: bool=True, prefer_ref: bool=True) -> str: '\n :return: for the given dim, Python code which refers to it, via ``dim_tags``\n ' if isinstance(dim, _MarkedDim): return f'{dim.__class__.__name__}({self.dim_ref_repr(dim.tag, brackets=False, prefer_ref=prefer_ref)})' assert isinstance(dim, Dim) if (dim == batch_dim): return 'batch_dim' if (dim == single_step_dim): return 'single_step_dim' if dim.match_priority: return f'{self.dim_ref_repr(dim.copy(match_priority=0))}.copy(match_priority={dim.match_priority})' if ((not dim.derived_from_op) and dim.get_same_base().derived_from_op): dim = dim.get_same_base() ref = self.dim_refs_by_tag.get(dim) if (prefer_ref and ref): return ref.py_id_name() if dim.derived_from_op: if (dim.derived_from_op.kind == 'constant'): v = dim.derived_from_op.attribs['value'] if ((v < 0) and brackets): return f'({v})' return str(v) func_map = {'truediv_left': 'div_left', 'ceildiv_left': 'ceildiv_left', 'ceildiv_right': 'ceildiv_right'} if (dim.derived_from_op.kind in func_map): assert (len(dim.derived_from_op.inputs) == 2) (a, b) = dim.derived_from_op.inputs return f'{self.dim_ref_repr(a)}.{func_map[dim.derived_from_op.kind]}({self.dim_ref_repr(b)})' op_str = {'add': '+', 'mul': '*', 'truediv_right': '//', 'floordiv_right': '//'}[dim.derived_from_op.kind] s = f' {op_str} '.join((self.dim_ref_repr(in_) for in_ in dim.derived_from_op.inputs)) return (f'({s})' if brackets else s) assert ref, f'no ref for {dim}' return ref.py_id_name() def collect_dim_tags_and_transform_config(self, config: T) -> T: '\n Go through the config and collect all dim tags, replace them by proxies (DimRefProxy or SetProxy).\n\n :return: new config\n ' import re def _sort_key(value): if isinstance(value, ReturnnDimTagsProxy.DimRefProxy): if (value.dim.kind == Dim.Types.Batch): return (- 1) return value.debug_idx return value def _unique_name(dim: Dim) -> str: assert (dim not in self.dim_refs_by_tag) name_ = dim.description name_ = re.sub('[^a-zA-Z0-9_]', '_', name_) if name_.endswith('_dim'): name_ = name_[:(- len('_dim'))] if ((not name_) or name_[:1].isdigit()): name_ = ('_' + name_) if (name_ not in self.reserved_names): return name_ i = 0 while True: name__ = f'{name_}_{i}' if (name__ not in self.reserved_names): return name__ i += 1 def _map(path, value, *, direct=True): if isinstance(value, _MarkedDim): _map(path, value.tag) return ReturnnDimTagsProxy.DimRefProxy(dim=value, name=None, path=path, parent=self) if isinstance(value, Dim): if (value in {batch_dim, single_step_dim}): return ReturnnDimTagsProxy.DimRefProxy(dim=value, name=None, path=path, parent=self) if (value.match_priority != 0): _map(path, value.copy(match_priority=0)) return ReturnnDimTagsProxy.DimRefProxy(dim=value, name=None, path=path, parent=self) value = value.get_same_base() if value.derived_from_op: for (i, child) in enumerate(value.derived_from_op.inputs): _map((path + (value.derived_from_op.kind, i)), child, direct=False) if (not direct): return ReturnnDimTagsProxy.DimRefProxy(dim=value, name=None, path=path, parent=self) if (value in self.dim_refs_by_tag): return self.dim_refs_by_tag[value] name = _unique_name(value) assert (name not in self.dim_refs_by_name) ref = ReturnnDimTagsProxy.DimRefProxy(dim=value, name=name, path=path, parent=self) self.dim_refs_by_name[name] = ref self.dim_refs_by_tag[value] = ref self.reserved_names.add(name) return ref if isinstance(value, dict): return {_map((path + (key, 'key')), key): _map((path + (key, 'value')), value_) for (key, value_) in value.items()} if isinstance(value, list): return [_map((path + (i,)), value_) for (i, value_) in enumerate(value)] if (isinstance(value, tuple) and (type(value) is tuple)): return tuple((_map((path + (i,)), value_) for (i, value_) in enumerate(value))) if (isinstance(value, tuple) and (type(value) is not tuple)): return type(value)(*(_map((path + (key,)), getattr(value, key)) for key in value._fields)) if isinstance(value, set): values = [_map((path + (value,)), value_) for value_ in value] values.sort(key=_sort_key) return ReturnnDimTagsProxy.SetProxy(values) return value config = _map((), config) return config
class _NamePathCache(): def __init__(self): self.module_to_name_path = {} self.tensor_to_name_path = {} self.name_path_to_module = {} def register_module(self, module: rf.Module, name_path: Sequence[str]): '\n Register some module (e.g. root module).\n ' assert isinstance(module, rf.Module) assert isinstance(name_path, (tuple, list)) assert (RefIdEq(module) not in self.module_to_name_path) self.module_to_name_path[RefIdEq(module)] = tuple(name_path) self.name_path_to_module[tuple(name_path)] = module queue = [module] while queue: parent = queue.pop(0) for (name, child) in parent.named_children(): if (RefIdEq(child) not in self.module_to_name_path): self.module_to_name_path[RefIdEq(child)] = (self.module_to_name_path[RefIdEq(parent)] + (name,)) self.name_path_to_module[self.module_to_name_path[RefIdEq(child)]] = child queue.append(child) for (name, param) in parent.named_parameters(recurse=False): assert isinstance(param.raw_tensor, rfl.Layer) param = _resolve_param_tensor(param) if (param.raw_tensor not in self.tensor_to_name_path): self.tensor_to_name_path[param.raw_tensor] = (self.module_to_name_path[RefIdEq(parent)] + (name,)) def get_name_path(self: _NamePathCache, child: Union[(rf.Module, Tensor)], *, raise_exc: bool=True) -> Optional[Tuple[(str, ...)]]: '\n :return: unique absolute layer name for the module hierarchy.\n https://github.com/rwth-i6/returnn_common/issues/25\n https://github.com/rwth-i6/returnn_common/issues/125\n ' assert self.module_to_name_path if isinstance(child, Tensor): if raise_exc: return self.tensor_to_name_path[child.raw_tensor] else: return self.tensor_to_name_path.get(child.raw_tensor) elif isinstance(child, rf.Module): if raise_exc: return self.module_to_name_path[RefIdEq(child)] else: return self.module_to_name_path.get(RefIdEq(child)) else: raise TypeError(f'invalid type {type(child)}')
def _resolve_param_tensor(param: rf.Parameter[rfl.Layer]) -> rf.Tensor[rfl.Layer]: '\n Get the original tensor from a parameter, pointing to the VariableLayer.\n Via parameter_assign, the current param tensor might be some variable read,\n not the original VariableLayer.\n\n :param param:\n :return: tensor pointing to the VariableLayr\n ' while True: if (param.raw_tensor.layer_dict['class'] == 'variable'): return param if (param.raw_tensor.layer_dict['class'] == 'variable_read'): param = param.raw_tensor.layer_dict['var'] assert isinstance(param, rf.Tensor) continue raise Exception(f'unexpected param tensor {param} {param.raw_tensor} with opts {param.raw_tensor.layer_dict}')
def _auto_setup_parent_name_ctx(*, ignore_top_stack_frames: int=1) -> Layer: '\n Sets up a NameCtx corresponding to the Python call stack trace.\n\n From the call stack, we consider methods from modules (rf.Module subclasses)\n or global functions on tensors.\n\n There are some heuristics involved but this should not be critical.\n\n https://github.com/rwth-i6/returnn_common/issues/159\n\n :param ignore_top_stack_frames:\n :return: name ctx for the layer\n ' global _AutoSetupNameCtxPrevTopFrame from returnn.util.better_exchook import get_current_frame frame = get_current_frame() assert frame code_blacklist = {_auto_setup_parent_name_ctx.__code__, Layer.__init__.__code__, Layer.current_ctx.__code__, rfl.make_layer.__code__} code_blacklist.update(_AutoSetupNameCtxCodeBlacklist) ignore_top_stack_frames += 1 while (ignore_top_stack_frames > 0): assert frame.f_back frame = frame.f_back ignore_top_stack_frames -= 1 while (frame.f_code in code_blacklist): assert frame.f_back frame = frame.f_back top_frame = frame prev_frames = set() frame = _AutoSetupNameCtxPrevTopFrame while frame: prev_frames.add(frame) frame = frame.f_back cur_ctx = Layer.recent_subnet() if (not cur_ctx.is_subnet): assert (cur_ctx.parent and cur_ctx.parent.is_subnet) cur_ctx = cur_ctx.parent assert cur_ctx.is_subnet cur_control_flow_ctx = cur_ctx.control_flow_ctx() cur_root_ctx = cur_ctx.root ctx = None module_ids = set() module_frames = [] frame = top_frame while frame: if (cur_ctx.parent and cur_ctx._enter_stack_frames and (frame in cur_ctx._enter_stack_frames)): break if (frame.f_code in code_blacklist): frame = frame.f_back continue mod = None if (frame.f_code.co_varnames and isinstance(frame.f_locals.get(frame.f_code.co_varnames[0]), rf.Module)): mod = frame.f_locals[frame.f_code.co_varnames[0]] if ((mod is not None) and (id(mod) not in module_ids)): calls = [layer for layer in (cur_ctx.get_abs_name_ctx_list() + list(cur_ctx.children.values())) if (layer.module is mod)] if calls: ctx = calls[0] break module_frames.append(mod) module_ids.add(id(mod)) frame = frame.f_back if (ctx is None): ctx = (cur_ctx if cur_control_flow_ctx else cur_root_ctx) for module in reversed(module_frames): ctx = Layer(module=module, parent=ctx) ctx.is_subnet = True _AutoSetupNameCtxPrevTopFrame = top_frame return ctx
def auto_setup_name_ctx_ignore_func(func: Union[(types.FunctionType, Callable)]): '\n Registers the func in the blacklist.\n ' _AutoSetupNameCtxCodeBlacklist.add(func.__code__)
class Loop(): '\n This represents a RecLayer subnetwork in RETURNN,\n i.e. where the calculation per step is defined explicitly.\n\n (For RecLayer with a predefined unit, see :class:`Rec`.\n Or for example :class:`Lstm`.)\n\n To define a loop like this pseudo Python code::\n\n x # given, shape (batch, time, dim)\n h = Zeros([batch,dim])() # initial state, shape (batch,dim)\n out = []\n for t in range(x.max_seq_len):\n x_lin = Linear(dim)(x[t])\n h_prev = h\n h = Linear(dim)(x_lin + h_prev)\n out.append(h)\n\n h # final state\n out # shape (time, batch, h_dim)\n\n You would write::\n\n dim = nn.FeatureDim(...)\n loop = nn.Loop(axis=...)\n loop.state.h = nn.zeros([batch_dim,dim]) # initial state\n with loop:\n x_t = loop.unstack(x)\n x_lin = Linear(dim)(x_t)\n loop.state.h = Linear(dim)(x_lin + loop.state.h)\n out = loop.stack(loop.state.h)\n\n ``state`` is :class:`Loop._StateHolder` and manages the recurrent state.\n\n This code must be run within a :func:`Module.forward`\n or with some active global name context (:class:`NameCtx`).\n\n This API is currently in development, and might change.\n See: https://github.com/rwth-i6/returnn_common/issues/16\n ' def __init__(self, *, max_seq_len: Optional[Tensor]=NotSpecified, optimize_move_layers_out: Optional[bool]=NotSpecified, unroll: bool=NotSpecified, axis: Optional[Dim]=NotSpecified, debug: Optional[bool]=NotSpecified, name: str='loop'): super(Loop, self).__init__() self._has_given_axis = True if ((not axis) or (axis is NotSpecified)): self._has_given_axis = False axis = Dim(None, name=f'{name}-dim') assert isinstance(axis, Dim) self.extra_opts = {{'max_seq_len': 'max_seq_len_via'}.get(key, key): value for (key, value) in locals().items() if ((value is not NotSpecified) and (value is not None) and (key not in {'self', '__class__', 'name'}))} self.layer_module = LoopModule(loop=self) parent_name_ctx = rfl.Layer.current_ctx() self.control_flow_ctx = ControlFlowContext(kind=ControlFlowContext.Types.Loop, outer_ctx=rfl.Layer.inner_control_flow(), identifier=parent_name_ctx.get_abs_name()) self.control_flow_ctx.loop_spatial_dim = axis self.name_ctx = rfl.Layer(module=self.layer_module, suggested_name=name, parent=parent_name_ctx, new_control_flow_ctx=self.control_flow_ctx, can_access_children=False) self.name_ctx.custom_layer_name_scope = '' self.name_ctx.is_subnet = True self.name_ctx.extend_reserved_names({'output', 'end'}) self._entered_scope = False self._exited_scope = False self._exited_scope_with_exception = False self._state = _LoopStateHolder(loop=self) self.unstacked_refs = [] self.outputs = [] self._last_frames = {} self.axis = axis self.end_ref = None self._iter_idx_ref = None def __repr__(self): return f'<{self.__class__.__name__} {self.name_ctx.get_abs_name_repr()}>' def __enter__(self) -> Loop: assert (not self._entered_scope), f'{self}: cannot enter twice' self._entered_scope = True self.name_ctx.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): assert (not self._exited_scope), f'{self}: cannot exit twice' try: if (not exc_type): if ((self.end_ref is None) and (not self.unstacked_refs)): raise Exception(f'{self}: call `unstack` or `end` at least once to define the loop length') if (not self.outputs): if (self.end_ref is not None): self.stack(self.end_ref) else: assert self.unstacked_refs self.stack(self.unstacked_refs[0]) assert ('output' in self.name_ctx.children) finally: self._exited_scope_with_exception = bool(exc_type) self._exited_scope = True self.name_ctx.__exit__(exc_type, exc_val, exc_tb) if (not exc_type): res = self.layer_module() if (self.end_ref is not None): res.raw_tensor.layer_extra_dependencies.append(self.end_ref.raw_tensor) @property def has_entered_scope(self) -> bool: '\n :return: whether we have entered the scope, i.e. we define the per-step calculation.\n ' return self._entered_scope @property def state(self) -> Union[(_LoopStateHolder, rf.State)]: 'state holder inside the loop' if (not self._exited_scope): return self._state if self._exited_scope_with_exception: return self._state return self._state._get_last() @state.setter def state(self, initial_state: rf.State): assert (len(self._state) == 0), f'can only assign {self}.state once for the initial state' assert (not self._entered_scope) for (key, value) in initial_state.items(): self._state[key] = value def unstack(self, source: Tensor, *, name: Optional[str]=None) -> Tensor: '\n Unrolls over the specified axis, and provides each frame in each loop iteration.\n The axis can be specified globally for the :class:`Loop` instance (recommended)\n or locally here (not recommended).\n ' assert self._has_given_axis, ('%s: unstack() requires a given axis' % self) assert (self.axis in source.dims) res = _rec_unstack(source, axis=self.axis, name=name) self.unstacked_refs.append(res) return res def stack(self, source: Tensor, *, name: Optional[str]=None) -> Tensor: '\n Accumulates the frames of source within the loop,\n to make it accessible outside the loop.\n ' if ((not name) and ('output' not in self.name_ctx.children)): name = self.name_ctx.get_child('output') if (isinstance(name, str) or (not name)): if (not name): name = source.name name = rfl.Layer(suggested_name=name, parent=self.name_ctx) assert isinstance(name, rfl.Layer) data = source.copy_template().copy_add_dim_by_tag(dim_tag=self.axis, unbroadcast=True, axis=0) data.time_dim_axis = 0 data.control_flow_ctx = self.control_flow_ctx.outer_ctx with self.name_ctx.parent: res = rfl.make_layer({'class': 'copy', 'from': source}, predefined_out_data=data, name=name) assert isinstance(res, Tensor) if (res.raw_tensor.name != 'output'): res.raw_tensor.layer_dict['is_output_layer'] = True self.outputs.append(res) return res def last(self, source: Tensor, *, name: Optional[str]=None) -> Tensor: '\n Gets the last value from source.\n ' assert isinstance(source, Tensor) if (source.raw_tensor in self._last_frames): return self._last_frames[source.raw_tensor] assert (self.name_ctx.tensor is not None), f'{self}.last(...): must call from outside' if (source.raw_tensor.parent is not self.name_ctx): assert (self.name_ctx in source.raw_tensor.get_abs_name_ctx_list()), f'invalid {self}.last({source})' sub_layer_name = source.raw_tensor.get_name_in_ctx(self.name_ctx).replace('/', '.') source = _utils.copy(source, name=self.name_ctx.get_new_child(sub_layer_name)) assert (source.raw_tensor.parent is self.name_ctx) source.raw_tensor.layer_dict['need_last'] = True sub_layer_name = source.raw_tensor.get_name_in_ctx(self.name_ctx) with self.name_ctx.parent: res = rfl.make_layer({'class': 'rec_last_output', 'rec_layer': self.name_ctx.tensor, 'sub_layer_name': sub_layer_name}, predefined_out_data=source, name=(name or sub_layer_name.replace('/', '_'))) res.raw_tensor.tensor_remove_unused_cleanup_hooks.append((lambda _: source.raw_tensor.layer_dict.pop('need_last'))) res.raw_tensor.layer_extra_dependencies.append(source.raw_tensor) source.raw_tensor.usages.append(res.raw_tensor) self._last_frames[source.raw_tensor] = res return res def end(self, source: Tensor, *, include_eos: bool) -> Tensor: '\n For loops with dynamic ending condition (which might not use unstack),\n this defines the ending condition.\n\n :param source: the ending condition\n :param include_eos: if True, the last() and stack() function include the current ending frame, otherwise not\n ' assert (not self.end_ref), f'{self}.end() can only be called once' assert (source.dtype == 'bool'), f'{self}: end expects boolean condition, got {source}' if (not self.axis.dyn_size_ext): dyn_size_ext = source.copy_template() dyn_size_ext.dtype = 'int32' if dyn_size_ext.control_flow_ctx: dyn_size_ext.control_flow_ctx = dyn_size_ext.control_flow_ctx.outer_ctx self.axis.dyn_size_ext = dyn_size_ext self.axis.batch = dyn_size_ext.batch self.axis.control_flow_ctx = dyn_size_ext.control_flow_ctx self.extra_opts['include_eos'] = include_eos self.end_ref = _utils.copy(source, name=self.name_ctx.get_child('end')) return self.end_ref @property def max_seq_len(self) -> Optional[Tensor]: 'max seq length in case the length is dynamic via :func:`end`' return self.extra_opts.get('max_seq_len_via') @max_seq_len.setter def max_seq_len(self, value: Optional[Tensor]): if (value is None): self.extra_opts.pop('max_seq_len_via', None) else: self.extra_opts['max_seq_len_via'] = value @property def iter_idx(self) -> Tensor: '\n The index of the current iteration, inside the loop. This is a scalar. This always starts with 0.\n\n ' assert (self._entered_scope and (not self._exited_scope)) if (self._iter_idx_ref is not None): return self._iter_idx_ref self._iter_idx_ref = self.name_ctx.get_child_tensor(':i', data=Tensor(':i', dtype='int32', dim_tags=(), sparse_dim=self.axis, control_flow_ctx=self.control_flow_ctx)) return self._iter_idx_ref
class LoopModule(rf.Module): '\n This module is used internally by :class:`Loop` to create the RETURNN :class:`RecLayer` for the loop.\n This module would not be directly used by the user.\n ' def __init__(self, loop: Loop): super(LoopModule, self).__init__() self.loop = loop def __call__(self) -> Tensor: '\n Makes layer dict for this loop, i.e. a RecLayer.\n ' name_ctx = self.loop.name_ctx out = name_ctx.children['output'].tensor assert (out.dim_tags[0] == self.loop.axis) return rfl.make_layer({'class': 'rec', 'from': [], 'unit': name_ctx.make_net(), **self.loop.extra_opts}, name=name_ctx, predefined_out_data=out)
class _LoopStateHolder(): def __init__(self, loop: Loop): self._loop = loop self._state = {} def __repr__(self): return f'{self._loop}.state' def _get_state(self, name: str) -> _LoopState: if (name in self._state): return self._state[name] raise AttributeError(f'{self}: Unknown state attrib {name!r}. Assign the initial state first.') def _get_last(self) -> rf.State: return rf.State({key: value.get_last() for (key, value) in self._state.items()}) def __getitem__(self, item): return self._get_state(item).get() def __setitem__(self, key, value): if (not self._loop.has_entered_scope): assert (key not in self._state), f'{self} already has state {key!r}' self._state[key] = _LoopState(name=key, loop=self._loop, initial=value) return self._get_state(key).assign(value) def __getattr__(self, item): return self[item] def __setattr__(self, key, value): if (key in {'_state', '_loop'}): return super().__setattr__(key, value) self[key] = value def keys(self) -> Iterable[str]: 'keys' return self._state.keys() def values(self) -> List[Any]: 'values' return [v.get() for v in self._state.values()] def __len__(self): return len(self._state) def deep_tensors(self) -> List[Tensor]: 'See :func:`LayerState.cls_deep_tensors`.' return rf.State.cls_deep_tensors(self)
class _LoopState(): '\n Represents some recurrent state, to be used with :class:`Loop`.\n It can also represent some nested hierarchy of states.\n ' def __init__(self, *, name: str, loop: Loop, initial: Union[(Tensor, Any)]): '\n :param name:\n :param loop:\n :param initial: some layer-ref, or any kind of nested structure of layers.\n ' super(_LoopState, self).__init__() assert (initial is not None) initial = tree.map_structure(rf.convert_to_tensor, initial) self.initial = initial self.loop = loop self.name = name self.assigned_value = None self.name_ctx = tree.map_structure_with_path((lambda path, ref: rfl.Layer(suggested_name='.'.join((str(key) for key in (('state', name) + path))), parent=loop.name_ctx)), self.initial) def __repr__(self): return f'<{self.__class__.__name__} {self.name!r}>' def assign(self, value): '\n Assign the new value of the current iteration.\n This is called (only) inside the loop.\n This will define the value for the next iteration.\n ' assert (self.name_ctx is not None) assert (value is not None) assert (self.assigned_value is None), f'Cannot assign the rec state {self.loop}/{self.name} multiple times, assigned previously to {self.assigned_value}, now to {value}' tree.assert_same_structure(self.initial, value) tree.assert_same_structure(self.name_ctx, value) self.assigned_value = value def _map_ref_to_name_ctx(tensor: Tensor, name_ctx: rfl.Layer, initial: Tensor): assert isinstance(tensor, Tensor) assert isinstance(name_ctx, rfl.Layer) assert (name_ctx.tensor is None), f'Loop state {name_ctx} already assigned' tensor.raw_tensor.make_all_sub_networks_and_optimize() layer_ctx_list = tensor.raw_tensor.get_abs_name_ctx_list() assert (self.loop.name_ctx in layer_ctx_list), f'Loop state {name_ctx} should get a value inside the loop but got {tensor}' for i in range((layer_ctx_list.index(self.loop.name_ctx) + 1), (len(layer_ctx_list) - 1)): (ctx, ctx_) = layer_ctx_list[i:(i + 2)] assert (isinstance(ctx, rfl.Layer) and isinstance(ctx_, rfl.Layer)) if isinstance(ctx.module, rfl.MaskedComputationModule): ctx_.layer_dict['is_output_layer'] = True break if tensor.raw_tensor.layer_dict: _do_const_initial_value_opt = False _const_initial_value_opt_layer_white_list = {'cum_concat', 'rec'} if (tensor.raw_tensor.layer_dict['class'] in _const_initial_value_opt_layer_white_list): _do_const_initial_value_opt = True elif (tensor.raw_tensor.layer_dict['class'] == 'get_last_hidden_state'): src = tensor.raw_tensor.layer_dict['from'] assert isinstance(src, Tensor) if src.raw_tensor.layer_dict: if (src.raw_tensor.layer_dict['class'] in _const_initial_value_opt_layer_white_list): _do_const_initial_value_opt = True if _do_const_initial_value_opt: initial_const = _utils.constant_value(initial) if (initial_const is not None): initial = initial_const if (tensor.raw_tensor.layer_dict['class'] == 'get_last_hidden_state'): used_state_eliminate_optimization = False key = tensor.raw_tensor.layer_dict.get('key', 'state') src = tensor.raw_tensor.layer_dict['from'] assert isinstance(src, Tensor) src_state_opt = (src.raw_tensor.layer_dict.get('state') if src.raw_tensor.layer_dict else None) if isinstance(src_state_opt, rf.State): src_state_for_key = src_state_opt.get(key) if isinstance(src_state_for_key, rfl.PrevTensorRef): if (src_state_for_key.cur_layer_name_ctx is name_ctx): used_state_eliminate_optimization = True src_state_opt[key] = None if all(((opt is None) for opt in tree.flatten(src_state_opt))): del src.raw_tensor.layer_dict['state'] src_initial_state_opt = src.raw_tensor.layer_dict.setdefault('initial_state', rf.State()) src_initial_state_opt[key] = initial if (not used_state_eliminate_optimization): raise NotImplementedError(f'{self}.assign to {tensor} on {src}: We need https://github.com/rwth-i6/returnn_common/issues/31 and https://github.com/rwth-i6/returnn/issues/732.') else: if (tensor.raw_tensor.layer_dict['class'] == 'cum_concat'): layer_state_opt = tensor.raw_tensor.layer_dict.get('state') if (isinstance(layer_state_opt, rf.State) and (set(layer_state_opt.keys()) == {'state'})): layer_state = layer_state_opt.state if (isinstance(layer_state, rfl.PrevTensorRef) and (layer_state.cur_layer_name_ctx is name_ctx)): tensor.raw_tensor.layer_dict.pop('state') assert ('initial_state' not in tensor.raw_tensor.layer_dict) assert ('initial_output' not in tensor.raw_tensor.layer_dict) tensor.raw_tensor.layer_dict['initial_output'] = initial else: raise NotImplementedError(f'{self}.assign to {tensor} (type {type(tensor)}) but Tensor expected') prev_name_ctx = name_ctx.parent.children.get(f'prev:{name_ctx.name}') if prev_name_ctx: prev_ref = prev_name_ctx.tensor assert isinstance(prev_ref, rfl.PrevTensorRef), f'{(name_ctx, prev_name_ctx)}' if (tensor.raw_tensor.parent != self.loop.name_ctx): _utils.copy(tensor, name=name_ctx) if tensor.raw_tensor.layer_dict: assert ('initial_state' not in tensor.raw_tensor.layer_dict) if ('initial_output' in tensor.raw_tensor.layer_dict): name_ctx.layer_dict['initial_output'] = tensor.raw_tensor.layer_dict.pop('initial_output') else: prev_ref.assign_new_cur_tensor_name_ctx(tensor.raw_tensor) return tensor.raw_tensor self.name_ctx = tree.map_structure(_map_ref_to_name_ctx, value, self.name_ctx, self.initial) @staticmethod def _map_name_ctx_to_prev_tensor(name_ctx: rfl.Layer, initial: Tensor) -> rfl.PrevTensorRef: assert isinstance(name_ctx, rfl.Layer) return rfl.PrevTensorRef.get_prev_ref(cur_layer_name_ctx=name_ctx, initial=initial) def get(self): '\n Return prev or current value of the current loop iteration,\n depending on whether assign() already has been called or not.\n This is called (only) inside a loop.\n ' assert (self.name_ctx is not None) if (not self.loop.has_entered_scope): return self.initial if (self.assigned_value is None): return tree.map_structure(self._map_name_ctx_to_prev_tensor, self.name_ctx, self.initial) return self.assigned_value def _map_name_ctx_to_last_tensor(self, name_ctx: rfl.Layer) -> Tensor: assert isinstance(name_ctx, rfl.Layer) assert name_ctx.tensor, f'{self.loop} state {name_ctx} not assigned?' assert self.loop.name_ctx.tensor, f'{self.loop} not yet exited?' return self.loop.last(name_ctx.tensor) def get_last(self): '\n Outside the loop, get the last instance.\n ' assert (self.name_ctx is not None) assert (self.assigned_value is not None) return tree.map_structure(self._map_name_ctx_to_last_tensor, self.name_ctx)
def _rec_unstack(source: Tensor, *, axis: Dim, declare_rec_time: bool=NotSpecified, name: Optional[Union[(str, rfl.Layer)]]=None) -> Tensor: '\n This is supposed to be used inside a :class:`RecLayer`.\n The input is supposed to be outside the rec layer (i.e. via ``base:``).\n Uses tf.TensorArray and then unstack on the inputs to make it available per-frame.\n This is an alternative to making some input to the rec layer,\n such that the rec layer can have multiple inputs (as long as they have the same time dim).\n\n Note that due to automatic optimization, this layer will be optimized out of the rec loop anyway,\n and then the tf.TensorArray logic happens internally in RecLayer,\n thus we do not need to care about this here.\n (See get_input_moved_out for some internal handling.)\n\n Effectively, this layer is very similar to :class:`CopyLayer`,\n with the only special behavior that it checks (or even assigns) the loop dimension of RecLayer.\n\n Due to automatic optimization, not much happens here.\n The real logic happens in :func:`get_out_data_from_opts`.\n\n Note that it is allowed to leave both `axis` and `declare_rec_time` unset,\n in case you assign `axis` to the rec layer, and the source here has the same axis (dim tag).\n\n :param nn.Tensor source:\n :param nn.Dim axis:\n :param bool declare_rec_time:\n :param str|nn.NameCtx|None name:\n :return: layer\n ' if (not isinstance(source, Tensor)): raise TypeError(f'rec_unstack: unexpected type for source {source!r}, need tensor') args = {'axis': axis, 'declare_rec_time': declare_rec_time} args = {key: value for (key, value) in args.items() if (value is not NotSpecified)} return rfl.make_layer({'class': 'rec_unstack', 'from': source, **args}, name=(name or 'rec_unstack'))
def make_layer(layer_dict: rfl.LayerDictRaw, *, name: Optional[Union[(str, rfl.Layer)]]=None, out: Optional[Tensor]=None, predefined_out_data: Optional[Tensor]=None, name_ctx_ignore_top_stack_frames: int=0) -> Tensor[rfl.Layer]: '\n Creates the layer. This also registers the layer instance in the top name ctx.\n When no name is given, this assumes that the top name ctx corresponds to this module.\n\n If a layer has params, and you want the param sharing logic,\n you should instead derive a new class from :class:`Module`.\n Usually, you do not need either of these,\n as all standard layers should already be wrapped,\n and it should be possible to define any possible logic\n using that.\n (If this is not the case, please report an issue.)\n\n :param layer_dict: can contain :class:`Tensor` instances\n :param name:\n if str: (suggested) layer name. if given, will create a new :class:`NameCtx`\n if NameCtx, will use this.\n :param out:\n :param predefined_out_data: normally we can derive the out data automatically.\n If this should be skipped, you can pass this explicitly.\n :param name_ctx_ignore_top_stack_frames: for :func:`Layer.current_ctx`.\n If your calling function creates exactly one single layer, you might want to ignore its stack frame\n and set ignore_top_stack_frames=1 and also set a name for the layer.\n If you are potentially creating multiple layers in your calling function,\n leave the default ignore_top_stack_frames=0.\n Some postprocessing step might anyway simplify obsolete subnetworks,\n see :mod:`naming`.\n ' if (isinstance(name, str) or (not name)): parent_ctx = rfl.Layer.current_ctx(ignore_top_stack_frames=(name_ctx_ignore_top_stack_frames + 1)) if (not name): name = layer_dict['class'] layer = rfl.Layer(suggested_name=name, parent=parent_ctx) created_name_ctx = True elif isinstance(name, rfl.Layer): layer = name created_name_ctx = False else: raise TypeError(f'name must be str or Layer, not {type(name)}; or you should pass a module') assert ((not layer.tensor) and (not layer.layer_dict)) layer_dict = layer_dict.copy() for value in nest.flatten(layer_dict): if isinstance(value, (Tensor, Dim, bool, int, float, str, numpy.ndarray, type(None), FunctionType, rfl.Net)): pass else: raise TypeError(f'{layer}: unexpected type {type(value)} in layer_dict: {layer_dict}') try: if (out is not None): assert isinstance(out, Tensor) elif (predefined_out_data is not None): assert isinstance(predefined_out_data, Tensor) out = predefined_out_data.copy_template() else: out = _tensor_from_layer_dict(layer_dict, layer=layer) assert (layer.tensor is None) assert (layer.layer_dict is None) assert (not layer.usages) assert (layer_dict is not None) out.control_flow_ctx = rfl.Layer.inner_control_flow() if (out.have_batch_axis() and (not out.batch)): batches = [] for dep in layer.get_tensor_dependencies(_extra_layer_dict=layer_dict): if ((dep.tensor is not None) and dep.tensor.batch and (dep.tensor.batch not in batches)): batches.append(dep.tensor.batch) if batches: out.batch = BatchInfo.get_common_batch_info(batches) elif layer.root.global_batch: out.batch = layer.root.global_batch for value in nest.flatten(layer_dict): if (isinstance(value, Tensor) and (value.raw_tensor is not None)): value: Tensor[rfl.Layer] assert isinstance(value.raw_tensor, rfl.Layer) value.raw_tensor.usages.append(layer) layer.layer_dict = layer_dict layer.tensor = out out.raw_tensor = layer except Exception: if created_name_ctx: assert layer.parent layer.parent.children.pop(layer.name) raise for tag in out.dim_tags: _dims._register_dim_deps_when_novel(tag, [out]) print(out) return out
def _get_sub_layer(layer: Tensor[rfl.Layer], name: str, *, data: Tensor) -> Tensor: '\n Like the "{layer}/{name}" syntax in RETURNN.\n Normally this should only be needed for internal usage.\n ' out = layer.raw_tensor.get_child_tensor(name, data=data) if rfl.is_debug_eager_mode_enabled(): assert layer.raw_tensor.debug_layer import returnn.tf.layers.base assert isinstance(layer.raw_tensor.debug_layer, returnn.tf.layers.base.LayerBase) sub_layer = layer.raw_tensor.debug_layer.get_sub_layer(name) assert (sub_layer and (sub_layer.output.dim_tags == out.data.dim_tags)) out.raw_tensor.debug_layer = sub_layer out.data = sub_layer.output return out
def _tensor_from_layer_dict(layer_dict: rfl.LayerDictRaw, *, layer: rfl.Layer) -> Tensor[rfl.Layer]: '\n Use RETURNN layer_class.get_out_data_from_opts to get the :class:`Data`.\n For this function, we need to set up some dummy network and dummy source layers.\n ' from returnn.tf.network import TFNetwork, ExternData from returnn.tf.layers.base import InternalLayer, LayerBase from returnn.config import get_global_config config = get_global_config() loop = rfl.Layer.inner_loop() net = TFNetwork(config=config, extern_data=ExternData(), name='dummy_net', train_flag=True, inside_rec_time_dim=(loop.loop_spatial_dim if loop else None), control_flow_ctx=rfl.Layer.inner_control_flow()) net.extern_data.set_batch_info(_init_global_batch(), init_batch_info=False) ref_to_layer_name = {} def _get_unique_name(name) -> str: name = name.replace('/', '_') name = LayerBase.cls_get_tf_scope_name(name) reserved_names = (set(net.layers.keys()) | {'data'}) if (name not in reserved_names): return name i = 0 while True: name_ = f'{name}_{i}' if (name_ not in reserved_names): return name_ i += 1 def _get_layer_name(ref: Tensor) -> str: if (ref.raw_tensor in ref_to_layer_name): return ref_to_layer_name[ref.raw_tensor] name = _get_unique_name(ref.raw_tensor.name) ref_to_layer_name[ref.raw_tensor] = name assert (name not in net.layers) data = ref.copy_template() net.layers[name] = InternalLayer(name=name, network=net, output=data) return name def _map_layer_dict_elem(value): if isinstance(value, Tensor): return _get_layer_name(value) return value layer_dict = nest.map_structure(_map_layer_dict_elem, layer_dict) out_name = _get_unique_name(layer.name) net_dict = {out_name: layer_dict, f'prev:{out_name}': {'class': 'constant', 'shape': ()}} if rfl.is_debug_eager_mode_enabled(): _add_layer = None else: def _add_layer(name: str, layer_class: Type[LayerBase], **layer_desc) -> LayerBase: layer_desc = net._create_layer_layer_desc(name=name, layer_desc=layer_desc, template=True) try: out_data = layer_class.get_out_data_from_opts(**layer_desc) out_data = layer_class.fixup_out_data(out_data, **layer_desc) except Exception as exc: msgs = ['The RETURNN call\n', f''' {layer_class.__name__}.get_out_data_from_opts( '''] for (key, v) in layer_desc.items(): msgs.append(f''' {key}={v!r}, ''') msgs += [' )\n', 'raised the exception:\n', f''' {type(exc).__name__} {exc!s} ''', '(See above for the RETURNN exception traceback.)'] raise ReturnnConstructTemplateException(''.join(msgs)).with_traceback(exc.__traceback__) from exc layer_ = InternalLayer(name=name, network=net, output=out_data) net.layers[name] = layer_ return layer_ net_layer = net.construct_layer(net_dict=net_dict, name=out_name, add_layer=_add_layer) if rfl.is_debug_eager_mode_enabled(): layer.debug_layer = net_layer return net_layer.output.copy_template()
class ReturnnConstructTemplateException(Exception): '\n In :func:`_data_from_layer_dict`, when we call layer_class.get_out_data_from_opts,\n we potentially can get errors, often due to user mistakes.\n We wrap those errors in this exception for better reporting.\n '
def _init_global_batch() -> BatchInfo: root_name_ctx = rfl.Layer.top().root if root_name_ctx.global_batch: return root_name_ctx.global_batch if rfl.is_debug_eager_mode_enabled(): root_name_ctx.global_batch = BatchInfo.make_global_batch_info(tf.constant(3, name='global_batch')) else: root_name_ctx.global_batch = BatchInfo.make_global_batch_info((- 1)) return root_name_ctx.global_batch
def _get_raw_layer_by_name(name: str, *, scope: rfl.Layer, data: Tensor): '\n Special layer can be "data:..." or whatever.\n ' scope.get_child_with_tensor(name, data=data)
def register_extern_data(data: Tensor[rfl.Layer]): '\n Register extern data from root ctx.\n As a side effect, it registers the given data as extern data,\n and this will be included when creating the RETURNN config,\n via :func:`NameCtx.get_returnn_config`.\n ' assert isinstance(data, Tensor) if (data.raw_tensor is not None): assert isinstance(data.raw_tensor, rfl.Layer) orig_dim_tags = data.dim_tags if (data.raw_tensor is None): data.batch = _init_global_batch() root_scope = rfl.Layer.top().root _get_raw_layer_by_name(f'data:{data.name}', scope=root_scope, data=data) for (i, (tag, orig_tag)) in enumerate(zip(data.dim_tags, orig_dim_tags)): if ((not tag.is_batch_dim()) and tag.is_dynamic() and (not tag.dyn_size_ext)): orig_tag.dyn_size_ext = tag.dyn_size_ext = Tensor(name=f"{(tag.name or (data.name + f'[{i}]'))}_default_dyn_size_ext", dims=[batch_dim], dtype=data.size_dtype, batch=data.batch) if (tag.is_batch_dim() and (not tag.dyn_size_ext) and (tag.dimension is None)): batch_dim.dyn_size_ext = orig_tag.dyn_size_ext = tag.dyn_size_ext = Tensor(name=f'batch_dim_default_dyn_size_ext', dims=[], dtype=data.size_dtype, batch=data.batch) _dims._register_dim_deps_when_novel(tag, [data]) if (rfl.is_debug_eager_mode_enabled() and (not data.raw_tensor.debug_layer)): from returnn.tf.layers.basic import InternalLayer data_tf = data.copy_template() data_tf.raw_tensor = _make_random_tf_tensor_for_returnn_data(data) data.raw_tensor.debug_layer = rfl.make_layer({'class': InternalLayer, 'output': data_tf, 'debug_type_name': f'data:{data.name}'})
def _make_random_tf_tensor_for_returnn_data(data: Tensor) -> tf.Tensor: shape = [] for dim in data.dim_tags: if dim.is_batch_dim(): assert data.batch shape.append(data.batch.dim) elif (dim.dimension is not None): shape.append(dim.dimension) else: dim.complete_dyn_size() if (dim.dyn_size_ext is None): assert data.batch dim.dyn_size_ext = Tensor(name=f'{data.name}_dummy_dyn_size_ext', dim_tags=[batch_dim], dtype=data.size_dtype, batch=data.batch) if (dim.dyn_size_ext.placeholder is None): dim.dyn_size_ext.placeholder = _make_random_tf_tensor_for_returnn_data(dim.dyn_size_ext) shape.append(tf.reduce_max(dim.dyn_size_ext.placeholder)) dtype = tf.as_dtype(data.dtype) if dtype.is_integer: if data.sparse: return tf.random.uniform(shape=shape, dtype=dtype, minval=0, maxval=data.dim) else: import binascii c = ((abs(binascii.crc32(data.name.encode('utf8'))) % 21) + 3) shape = tf.convert_to_tensor(shape) c_tf = tf.constant(c, name='dummy_random_const', dtype=dtype) rnd = tf.broadcast_to(c_tf, shape) rnd_diff = tf.random.uniform(shape=shape, minval=0, maxval=((2 ** 31) - 1), dtype=dtype) rnd_diff = (rnd_diff % tf.reshape(tf.minimum((tf.range(0, tf.size(rnd), dtype=dtype) + 1), (c_tf - 2)), shape)) rnd = tf.clip_by_value((rnd - rnd_diff), 1, c_tf) return rnd assert dtype.is_floating return tf.random.normal(shape=shape, dtype=dtype)
class MaskedComputation(): '\n This is expected to be inside a :class:`Loop`.\n\n Usage example::\n\n loop = nn.Loop(...)\n loop.state.y = ... # some initial output\n loop.state.h = ... # some initial state\n with loop:\n\n mask = ... # dtype bool, shape [batch] or whatever, for current (fast) frame\n with nn.MaskedComputation(mask=mask):\n loop.state.y, loop.state.h = slow_rnn(x, loop.state.h)\n y = loop.state.y # access from outside\n\n This is equivalent to::\n\n loop = nn.Loop(...)\n loop.state.y = ... # some initial output\n loop.state.h = ... # some initial state\n with loop:\n\n mask = ... # dtype bool, shape [batch] or whatever, for current frame\n y_, h_ = slow_rnn(x, loop.state.h)\n loop.state.y = nest.map(lambda a, b: nn.where(cond=mask, x=a, y=b), y_, loop.state.y)\n loop.state.h = nest.map(lambda a, b: nn.where(cond=mask, x=a, y=b), h_, loop.state.h)\n y = loop.state.y\n\n In pseudocode, non-batched (mask is just a scalar bool), it would look like::\n\n y = ... # some initial output\n h = ... # some initial state\n while True:\n\n mask = ... # bool\n if mask:\n y, h = slow_rnn(x, h)\n\n ' def __init__(self, mask: Tensor, *, name: str='masked_computation'): '\n :param Tensor mask: bool, shape [batch]\n ' self.mask = mask self.name = name self.layer_module = MaskedComputationModule(masked_computation=self) self.name_ctx = rfl.Layer(module=self.layer_module, suggested_name=name, parent=rfl.Layer.current_ctx()) self.name_ctx.custom_layer_name_scope = '' self.name_ctx.is_subnet = True def __enter__(self) -> MaskedComputation: self.name_ctx.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): try: if (not exc_type): if ('output' not in self.name_ctx.children): last_child = self.name_ctx.get_recent_tensor(only_same_control_flow=True) if (last_child is not None): _utils.copy(last_child, name=self.name_ctx.get_child('output')) else: _utils.constant(value=0, name=self.name_ctx.get_child('output')) finally: self.name_ctx.__exit__(exc_type, exc_val, exc_tb) if (not exc_type): self.layer_module()
class MaskedComputationModule(rf.Module): '\n This is for internal use by :class:`MaskedComputation`.\n ' def __init__(self, masked_computation: MaskedComputation): super().__init__() self.masked_computation = masked_computation def __call__(self) -> Tensor: '\n Makes layer dict for this loop, i.e. a RecLayer.\n ' name_ctx = self.masked_computation.name_ctx out = name_ctx.children['output'].tensor loop = rfl.Layer.inner_loop() assert loop, f'{self}: need to be inside loop' return rfl.make_layer({'class': 'masked_computation', 'mask': self.masked_computation.mask, 'in_spatial_dim': loop.loop_spatial_dim, 'unit': {'class': 'subnetwork', 'from': [], 'subnetwork': name_ctx.make_net()}}, name=name_ctx, predefined_out_data=out)
def parameter_assign(param: rf.Parameter, value: Tensor, *, op: str='assign') -> None: '\n Parameter assign.\n\n :param param:\n :param value:\n :param op:\n :return:\n ' if (param.raw_tensor.layer_dict['class'] == 'variable'): assign_helper_initial_read = _AssignHelper(param=param, read_layer_name=f'{param.name}_initial_read', read_control_deps=None) assign_helper_initial_read.reassign_param_variable_read() param.raw_tensor.usages.extend(assign_helper_initial_read.old_param_raw.usages) layer_ctx = rfl.Layer.top() root_ctx = layer_ctx.root while (layer_ctx and (not layer_ctx.new_control_flow_ctx)): layer_ctx = layer_ctx.parent with (layer_ctx or root_ctx): op_ = rfl.make_layer({'class': 'variable_assign', 'var': param, 'value': value, 'op': op}, name=f'param_{op}') if layer_ctx: assert isinstance(layer_ctx.module, rfl.CondModule) layer_ctx.module.cond.add_op_to_current_branch(op_) def _other_branch_prehook(): assign_helper.reset_to_old_param() def _other_branch_posthook(): assign_helper.map_param_usage_to_old_param_copy() if (param.raw_tensor is assign_helper.old_param_raw): assign_helper.reassign_param_variable_read() else: assert (param.raw_tensor.layer_dict['class'] == 'variable_read') param.raw_tensor.layer_dict['control_dependencies'].append(op_) layer_ctx.module.cond.add_other_branch_prehook(_other_branch_prehook) layer_ctx.module.cond.add_other_branch_posthook(_other_branch_posthook) _utils.mark_as_output_in_scope(op_, scope=root_ctx) assign_helper = _AssignHelper(param=param, read_layer_name=f'{param.name}_after_{op}', read_control_deps=[op_]) assign_helper.reassign_param_variable_read() assign_helper.map_param_usage_to_old_param_copy()
class _AssignHelper(): def __init__(self, *, param: rf.Parameter[Layer], read_layer_name: str, read_control_deps: Optional[Sequence[Tensor]]): self.param = param self.read_layer_name = read_layer_name self.read_control_deps = read_control_deps self.old_param_copy: Tensor[rfl.Layer] = param.copy() self.old_param_raw: rfl.Layer = param.raw_tensor def reassign_param_variable_read(self): 'reassign' self.old_param_raw.tensor = self.old_param_copy self.param.raw_tensor = None root_ctx = rfl.Layer.top().root with root_ctx: rfl.make_layer({'class': 'variable_read', 'var': self.old_param_copy, 'control_dependencies': self.read_control_deps}, name=self.read_layer_name, out=self.param) def map_param_usage_to_old_param_copy(self): 'map' def _map_usage_value(v): if (isinstance(v, Tensor) and (v is self.param)): return self.old_param_copy return v for usage in self.old_param_raw.usages: usage.layer_dict = nest.map_structure(_map_usage_value, usage.layer_dict) def reset_to_old_param(self): 'reset' self.param.raw_tensor = self.old_param_raw self.old_param_raw.tensor = self.param
class PrevTensorRef(Tensor): '\n Refers to a layer from the previous loop iteration.\n ' @classmethod def get_prev_ref(cls, *, cur_layer_name_ctx: rfl.Layer, initial: Tensor) -> PrevTensorRef: '\n Create prev ref.\n ' parent_name_ctx = cur_layer_name_ctx.parent prev_tensor_name_ctx = parent_name_ctx.get_child(f'prev:{cur_layer_name_ctx.name}') if prev_tensor_name_ctx.tensor: prev_tensor_ref = prev_tensor_name_ctx.tensor assert isinstance(prev_tensor_ref, PrevTensorRef) assert (prev_tensor_ref.cur_layer_name_ctx is cur_layer_name_ctx) else: prev_tensor_ref = PrevTensorRef(name_ctx=prev_tensor_name_ctx, cur_layer_name_ctx=cur_layer_name_ctx, data=initial) assert (prev_tensor_name_ctx.tensor is prev_tensor_ref) return prev_tensor_ref def __init__(self, *, name_ctx: rfl.Layer, cur_layer_name_ctx: rfl.Layer, data: Tensor): super().__init__(**data.get_kwargs()) name_ctx.tensor = self self.raw_tensor = name_ctx self.cur_layer_name_ctx = cur_layer_name_ctx self.raw_tensor.layer_extra_dependencies.append(self.cur_layer_name_ctx) def assign_new_cur_tensor_name_ctx(self, cur_tensor_name_ctx: rfl.Layer): '\n Changes self.name_ctx to new name_ctx.\n ' self.raw_tensor.layer_extra_dependencies.remove(self.cur_layer_name_ctx) prev_layer_name = f'prev:{cur_tensor_name_ctx.name}' assert (prev_layer_name not in cur_tensor_name_ctx.parent.children) prev_layer_name_ctx = cur_tensor_name_ctx.parent.get_child(prev_layer_name) prev_layer_name_ctx.move_tensor_here(self) assert (self.raw_tensor is prev_layer_name_ctx) self.cur_layer_name_ctx = cur_tensor_name_ctx self.raw_tensor.layer_extra_dependencies.append(self.cur_layer_name_ctx)
class TFBackend(Backend[tf.Tensor]): '\n TensorFlow low-level backend, operating on tf.Tensor\n ' RawTensorType = tf.Tensor is_tensorflow = True @staticmethod def executing_eagerly() -> bool: '\n :return: whether we are in eager execution mode\n ' return tf.executing_eagerly() @staticmethod def get_dtype_name_raw(raw_tensor: tf.Tensor) -> str: '\n :return: dtype of raw tensor, as string\n ' return raw_tensor.dtype.base_dtype.name @staticmethod def as_dtype_raw(dtype_name: str) -> tf.DType: '\n :param dtype_name: e.g. "float32"\n :return: dtype object\n ' dtype = getattr(tf, dtype_name) assert isinstance(dtype, tf.DType) return dtype @staticmethod def get_ndim_raw(raw_tensor: tf.Tensor) -> int: '\n :return: ndim of raw tensor. assumes it is known\n ' assert (raw_tensor.shape.ndims is not None) return raw_tensor.shape.ndims @staticmethod def get_shape_raw(raw_tensor: tf.Tensor) -> tf.Tensor: '\n :return: shape of raw tensor\n ' with tf_util.same_control_flow_ctx(raw_tensor): return tf.shape(raw_tensor) @staticmethod def get_shape_tuple_raw(raw_tensor: tf.Tensor) -> Tuple[Union[(int, tf.Tensor)]]: '\n :return: shape of raw tensor. assumes that ndim is known\n ' shape = raw_tensor.shape.as_list() if all([(dim is not None) for dim in shape]): return tuple(shape) with tf_util.same_control_flow_ctx(raw_tensor): shape_dynamic = tf.shape(raw_tensor) for (axis, dim) in enumerate(shape): if (dim is None): shape[axis] = shape_dynamic[axis] return tuple(shape) @staticmethod def get_known_shape_raw(raw_tensor: tf.Tensor) -> Tuple[Optional[int]]: '\n :return: shape of raw tensor, int for static known, None otherwise. assumes that ndim is known.\n ' return tuple(raw_tensor.shape.as_list()) @staticmethod def set_known_shape_raw(raw_tensor: tf.Tensor, shape: Tuple[Optional[int]]) -> None: '\n wrap tf.Tensor.set_shape\n ' raw_tensor.set_shape(shape) @staticmethod def fill_raw(shape: Union[(Sequence[Union[(int, tf.Tensor)]], tf.Tensor)], value: Union[(Any, tf.Tensor)]) -> tf.Tensor: '\n :param shape: shape\n :param value: value to fill\n :return: raw tensor filled with value everywhere\n ' with tf_util.same_control_flow_ctx([shape, value]): return tf.fill(shape, value) @staticmethod def compare_raw(a: tf.Tensor, kind: str, b: tf.Tensor) -> tf.Tensor: '\n :param a:\n :param kind: "equal", "less", "less_equal", "greater", "greater_equal", "not_equal"\n :param b:\n :return: a `kind` b\n ' assert ((a.shape.ndims == b.shape.ndims) or (a.shape.ndims == 0) or (b.shape.ndims == 0)) op = getattr(tf, kind) with tf_util.same_control_flow_ctx([a, b]): return op(a, b) @staticmethod def combine_raw(a: tf.Tensor, kind: str, b: tf.Tensor) -> tf.Tensor: '\n :param a:\n :param kind: "add", "sub", "mul", "truediv", "floordiv", "mod", "pow",\n "maximum", "minimum", "logical_and", "logical_or", "squared_difference"\n :param b:\n :return: a `kind` b\n ' assert ((a.shape.ndims == b.shape.ndims) or (a.shape.ndims == 0) or (b.shape.ndims == 0)) if ((kind == 'floordiv') and is_onnx_export_global()): op = tf_util.onnx_compat_floor_div else: kind = {'sub': 'subtract', 'mul': 'multiply'}.get(kind, kind) op = getattr(tf, kind, None) if (op is None): op = getattr(tf.math, kind) with tf_util.same_control_flow_ctx([a, b]): return op(a, b) @staticmethod def where(cond: Tensor, true_: Union[(Tensor, rf.RawTensorTypes)], false_: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor: 'where' if isinstance(true_, Tensor): dtype = true_.dtype elif isinstance(false_, Tensor): dtype = false_.dtype else: dtype = None true_ = rf.convert_to_tensor(true_, _backend=TFBackend, device=cond.device, dtype=dtype) false_ = rf.convert_to_tensor(false_, _backend=TFBackend, device=cond.device, dtype=dtype) out = Tensor.get_common_data([true_, false_, cond], allow_broadcast_all_sources=allow_broadcast_all_sources, name='where') out.dtype = true_.dtype out.sparse_dim = (true_.sparse_dim or false_.sparse_dim) out.feature_dim = (true_.feature_dim or false_.feature_dim) cond_bc_raw = cond.copy_compatible_to_dims_raw(out.dims) true_bc_raw = true_.copy_compatible_to_dims_raw(out.dims) false_bc_raw = false_.copy_compatible_to_dims_raw(out.dims) out.raw_tensor = tf_util.where_bc(cond_bc_raw, true_bc_raw, false_bc_raw) return out @staticmethod def reshape_raw(raw_tensor: tf.Tensor, shape: Union[(Sequence[Union[(int, tf.Tensor)]], tf.Tensor)]) -> tf.Tensor: '\n :param raw_tensor: raw tensor\n :param shape: new shape\n :return: reshaped raw tensor\n ' with tf_util.same_control_flow_ctx([raw_tensor, shape]): return tf.reshape(raw_tensor, shape) @classmethod def squeeze_raw(cls, raw_tensor: tf.Tensor, axes: Sequence[int]) -> tf.Tensor: '\n :param raw_tensor: raw tensor\n :param axes: axes to squeeze\n :return: squeezed raw tensor\n ' known_shape = raw_tensor.shape.as_list() assert all([(known_shape[axis] == 1) for axis in axes]) with tf_util.same_control_flow_ctx(raw_tensor): return tf.squeeze(raw_tensor, axis=axes) @staticmethod def transpose_raw(raw_tensor: tf.Tensor, perm: Sequence[int]) -> tf.Tensor: '\n :param raw_tensor:\n :param perm: e.g. [0, 2, 1]\n :return: permuted (transposed) raw tensor; wraps tf.transpose\n ' with tf_util.same_control_flow_ctx(raw_tensor): return tf.transpose(raw_tensor, perm) @staticmethod def expand_dims_raw(raw_tensor: tf.Tensor, axis: int) -> tf.Tensor: '\n :param raw_tensor:\n :param axis: e.g. 1\n :return: raw tensor with new axis; wraps tf.expand_dims\n ' with tf_util.same_control_flow_ctx(raw_tensor): return tf.expand_dims(raw_tensor, axis=axis) @staticmethod def expand_raw(raw_tensor: tf.Tensor, axis: int, dim: Union[(int, tf.Tensor)]) -> tf.Tensor: '\n :param raw_tensor:\n :param axis: shape[axis] must be 1\n :param dim: the new dim for shape[axis]\n :return: shape[axis] expands to dim\n ' assert (raw_tensor.shape.as_list()[axis] == 1) with tf_util.same_control_flow_ctx(raw_tensor): return tf.tile(raw_tensor, ((([1] * axis) + [dim]) + ([1] * ((raw_tensor.shape.ndims - axis) - 1)))) @staticmethod def copy(tensor: Tensor) -> Tensor: 'copy' out = tensor.copy_template() with tf_util.same_control_flow_ctx(tensor): out.raw_tensor = tf.identity(tensor.raw_tensor) return out @staticmethod def cast_raw(raw_tensor: tf.Tensor, dtype: str) -> tf.Tensor: 'cast' with tf_util.same_control_flow_ctx(raw_tensor): return tf.cast(raw_tensor, dtype) @staticmethod def activation_raw(raw_tensor: tf.Tensor, func: str) -> tf.Tensor: '\n :param raw_tensor:\n :param func: e.g. "tanh"\n :return: raw tensor after activation\n ' assert (func in Backend._AllowedActivationFuncs) if hasattr(tf.math, func): f = getattr(tf.math, func) elif hasattr(tf.nn, func): f = getattr(tf.nn, func) elif hasattr(tf, func): f = getattr(tf, func) else: raise ValueError(f'unknown activation function {func!r}') with tf_util.same_control_flow_ctx(raw_tensor): return f(raw_tensor) @staticmethod def have_sequence_mask_raw() -> bool: '\n :return: whether we have sequence_mask\n ' return True @staticmethod def sequence_mask_raw(lengths: tf.Tensor, *, batch_major: bool=True) -> tf.Tensor: "\n Wraps around tf.sequence_mask().\n It will cache the value inside the passed object so that we don't recompute it multiple times.\n\n :param lengths: shape (batch,)\n :param batch_major:\n :return: tensor mask of shape (batch,maxlen) if batch_major else (maxlen,batch) of type bool\n " if batch_major: return tf_util.sequence_mask(lengths) else: return tf_util.sequence_mask_time_major(lengths) @staticmethod @contextlib.contextmanager def name_scope_raw(name: str) -> Any: '\n :param name: name of scope\n :return: context manager\n ' with tf.name_scope(name): (yield) @staticmethod @contextlib.contextmanager def control_dependencies_raw(dependencies: Sequence[Union[(tf.Tensor, tf.Operation)]]) -> Any: '\n :param dependencies: list of tensors or operations\n :return: context manager\n ' with tf.control_dependencies(dependencies): (yield) @staticmethod def identity_with_control_dependencies_raw(raw_tensor: tf.Tensor, dependencies: Sequence[Any]) -> tf.Tensor: '\n :param raw_tensor:\n :param dependencies: list of tensors or operations\n :return: identity of tensor with control dependencies\n ' with tf.control_dependencies(dependencies), tf_util.same_control_flow_ctx(raw_tensor): return tf.identity(raw_tensor) @staticmethod def create_placeholder_raw(tensor: _TT) -> tf.Tensor: '\n :return: tf.placeholder in TF\n ' with tf.name_scope(('extern_data/placeholders/%s/' % tensor.name)): return tf_compat.v1.placeholder(**tensor.get_placeholder_kwargs(with_batch=True)) @staticmethod def runtime_sanity_checks(tensor: _TT) -> tf.Operation: '\n Runtime checks\n ' checks = [] with tf.name_scope('runtime_sanity_check'), tf_util.same_control_flow_ctx(tensor): shape = tf.shape(tensor.placeholder) batch_dim = (shape[tensor.batch_dim_axis] if tensor.have_batch_axis() else 1) rank = tf.rank(tensor.placeholder) data = ['Data.get_runtime_sanity_check_op:', str(tensor), 'shape', shape] for (i, tag) in enumerate(tensor.dim_tags): if (tag.dyn_size is not None): data += [('dyn_size[%i] (%s)' % (i, tag)), tag.dyn_size, '.shape', tf.shape(tag.dyn_size)] checks += [tf.Assert(tf.equal(rank, tensor.batch_ndim), (data + ['-> invalid rank']))] if tensor.have_batch_axis(): batch_dim_via_info = tensor.get_batch_dim() checks += [tf.Assert(tf.equal(batch_dim, batch_dim_via_info), (data + ['-> invalid batch dim info', batch_dim_via_info]))] for i in range(tensor.batch_ndim): if (tensor.batch_shape[i] is not None): checks += [tf.Assert(tf.equal(shape[i], tensor.batch_shape[i]), (data + [('-> invalid shape[%i]' % i)]))] dyn_size_ext = tensor.dim_tags[i].dyn_size_ext if (dyn_size_ext and (dyn_size_ext.placeholder is not None)): dyn_size = dyn_size_ext.placeholder if (dyn_size_ext.have_batch_axis() and tensor.have_batch_axis()): checks += [tf.Assert(tf.equal(tf.shape(dyn_size)[dyn_size_ext.batch_dim_axis], batch_dim), (data + [('-> invalid axis %i tag dyn size batch dim' % i)]))] checks += [tf.Assert(tf.logical_or(tf.logical_and(tf.less_equal(tf.reduce_max(dyn_size), shape[i]), tf.greater_equal(tf.reduce_max(dyn_size), (shape[i] - 1))), tf.equal(1, shape[i])), (data + [('-> invalid shape[%i] or max(dyn_size[%i])' % (i, i))]))] dyn_size_ext_sanity_checks_op = dyn_size_ext.get_runtime_sanity_check_op() assert (dyn_size_ext_sanity_checks_op is not None), f'{dyn_size_ext} {dyn_size_ext.raw_tensor}?' checks += [dyn_size_ext_sanity_checks_op] return tf.group(*checks) @staticmethod def is_valid_in_current_graph(tensor: _TT) -> bool: '\n :return: whether the tensor is valid in the current graph\n ' if (tensor.raw_tensor is None): return True if tf_compat.executing_eagerly(): return True g = tf_util.get_root_graph() return (tf_util.get_root_graph(tensor.raw_tensor.graph) is g) @staticmethod def format_graph_output(raw_tensor: tf.Tensor, *, max_depth: Optional[int]=None) -> str: '\n :param raw_tensor:\n :param max_depth:\n ' return tf_util.format_graph_output(raw_tensor, max_depth=max_depth) @staticmethod def convert_to_tensor(value: Union[(_TT, tf.Tensor, RawTensorTypes)], *, dims: Sequence[Dim], dtype: str, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, name: Optional[str]=None) -> _TT: '\n :param value:\n :param dims:\n :param dtype:\n :param sparse_dim:\n :param device:\n :param name:\n :return: tensor\n ' if isinstance(value, Tensor): return value with tf.control_dependencies(None): value = tf.convert_to_tensor(value, dtype=dtype) assert isinstance(value, tf.Tensor) return Tensor((name or 'const'), raw_tensor=value, dims=dims, dtype=dtype, sparse_dim=sparse_dim) @staticmethod def range_over_dim(dim: Dim, *, dtype: Optional[str]=None, device: Optional[str]=None) -> _TT: '\n :param dim:\n :param dtype:\n :param device:\n :return: range over dim\n ' if ((not dtype) and dim.dyn_size_ext): dtype = dim.dyn_size_ext.dtype if (not dtype): dtype = rf.get_default_array_index_dtype() out = Tensor(name=(dim.description or 'range_over_dim'), dims=[dim], sparse_dim=(dim if (dtype.startswith('int') or dtype.startswith('uint')) else None), dtype=dtype) dim_value = dim.get_dim_value() with tf_util.same_control_flow_ctx(dim_value): out.raw_tensor = tf.range(0, dim_value, dtype=out.dtype) return out @staticmethod def reduce(source: _TT, *, mode: str, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> _TT: 'Reduce' assert (mode in Backend._AllowedReduceModes) with tf_util.same_control_flow_ctx(source): x = source axes = x.get_axes_from_description(axis) if (use_mask in (None, NotSpecified)): use_mask = any((x.has_dynamic_size(a) for a in axes)) out_data = x.copy_template() dim_tags = [dim_tag for (i, dim_tag) in enumerate(x.dim_tags) if (i not in axes)] out_data = out_data.copy_template_new_dim_tags(dim_tags) sparse_out = mode.lower().startswith('arg') if sparse_out: assert (len(axes) == 1) out_data.sparse_dim = x.dim_tags[axes[0]] out_data.dtype = 'int32' assert isinstance(use_mask, bool) mode = mode.lower() reduce_abs_funcs = {name: getattr(tf, ('reduce_%s' % name)) for name in ['max', 'min', 'sum', 'logsumexp', 'any', 'all']} reduce_rel_func = {'mean': tf.reduce_mean} arg_funcs = {name: getattr(tf, name) for name in ['argmax', 'argmin']} funcs = dict(((list(reduce_abs_funcs.items()) + list(reduce_rel_func.items())) + list(arg_funcs.items()))) assert (mode in funcs), ('invalid mode %r. choose from: %r' % (mode, funcs)) f = funcs[mode] x_ = x.placeholder correction_factor = None if (use_mask and any((x.has_dynamic_size(a) for a in axes))): if ((x.batch_dim_axis in axes) and (x.time_dim_axis in axes) and (len(axes) == 2)): assert (mode not in arg_funcs), 'unexpected arg reduce for multiple axes' axes = [(a if (a < x.time_dim_axis) else (a - 1)) for a in axes if (a != x.time_dim_axis)] x = x.copy_time_flattened() x_ = x.placeholder else: for axis in axes: if (axis == x.batch_dim_axis): continue if (not x.has_dynamic_size(axis)): continue mask = x.get_sequence_mask_broadcast(axis=axis) zeros = tf.zeros((), dtype=x.placeholder.dtype) if (x.placeholder.dtype.is_floating or x.placeholder.dtype.is_integer): if (f in (tf.reduce_mean, tf.reduce_sum)): replacement_value = zeros elif (f in (tf.reduce_max, tf.reduce_logsumexp, tf.argmax)): replacement_value = (zeros + x.placeholder.dtype.min) elif (f in (tf.reduce_min, tf.argmin)): replacement_value = (zeros + x.placeholder.dtype.max) else: raise ValueError(('unexpected reduce function %r' % f)) elif x.placeholder.dtype.is_bool: if (f in (tf.reduce_any,)): replacement_value = zeros elif (f in (tf.reduce_all,)): replacement_value = tf.ones((), dtype=x.placeholder.dtype) else: raise ValueError(('unexpected reduce function %r' % f)) else: raise TypeError(('reduce: unexpected input type %r from input %s' % (x.placeholder.dtype, x))) x_ = tf_util.where_bc(mask, x_, replacement_value, name=('x_masked_axis_%i' % axis)) if (f == tf.reduce_mean): tag = x.dim_tags[axis] assert (tag.dyn_size_ext is not None) size_all = tf.shape(x.placeholder)[axis] size_actual = tag.dyn_size_ext while any(((d not in out_data.dim_tags) for d in size_actual.dim_tags)): (i, d) = [(i, d) for (i, d) in enumerate(size_actual.dim_tags) if (d not in out_data.dim_tags)][0] assert (not d.need_masking()) size_all *= d.get_dim_value() s = tf.reduce_sum(size_actual.placeholder, axis=i) size_actual = size_actual.copy_template_excluding_axis(i) size_actual.placeholder = s seq_len_bc = size_actual.copy_compatible_to(out_data, check_sparse=False, check_dtype=False).placeholder seq_len_bc = tf.maximum(seq_len_bc, 1) correction_factor_ = (tf.cast(size_all, tf.float32) / tf.cast(seq_len_bc, tf.float32)) correction_factor = tf_util.optional_mul(correction_factor, correction_factor_) if (mode in arg_funcs): assert (len(axes) == 1), 'For argmax/argmin, only one reduction axis is supported' y = f(x_, axis=axes[0], output_type=tf.int32) else: y = f(x_, axis=axes) y = tf_util.optional_mul(y, correction_factor) out_data.raw_tensor = y return out_data
class HorovodContext(): '\n This setups some helper functions.\n ' def __init__(self, config): '\n :param Config config:\n ' import horovod print('Horovod:', horovod.__version__, horovod.__file__) import horovod.tensorflow as hvd hvd.init() print(('Horovod initialized. Hostname %s, pid %i, rank %i / size %i, local rank %i / local size %i.' % (socket.gethostname(), os.getpid(), hvd.rank(), hvd.size(), hvd.local_rank(), hvd.local_size()))) self._config = config self._hvd_mod = hvd self._local_rank = hvd.local_rank() self._local_size = hvd.local_size() self._rank = hvd.rank() self._size = hvd.size() def should_sync_every_step(self): '\n :return: whether we should sync every step.\n This is both for the signal for more data, and also loss/error/score reduction.\n :rtype: bool\n ' if (self.is_dataset_distribution_random_seed_offset() and self.is_reduce_type_param()): return False return True def get_reduce_type(self): '\n :rtype: str\n ' reduce_type = self._config.value('horovod_reduce_type', 'grad') assert (reduce_type in {'grad', 'param'}) return reduce_type def is_reduce_type_grad(self): '\n :rtype: bool\n ' return (self.get_reduce_type() == 'grad') def is_reduce_type_param(self): '\n :rtype: bool\n ' return (self.get_reduce_type() == 'param') def get_param_sync_time_diff(self): '\n :rtype: float|None\n ' assert self.is_reduce_type_param() return self._config.float('horovod_param_sync_time_diff', None) def get_param_sync_step(self): '\n :rtype: int\n ' assert self.is_reduce_type_param() return self._config.int('horovod_param_sync_step', 1) def get_dataset_distribution_type(self): '\n :rtype: str\n ' dataset_distribution = self._config.value('horovod_dataset_distribution', 'shard') assert (dataset_distribution in {'shard', 'random_seed_offset'}) return dataset_distribution def is_dataset_distribution_shard(self): '\n :rtype: bool\n ' return (self.get_dataset_distribution_type() == 'shard') def get_dataset_shard_batch_slice(self): '\n :rtype: slice\n ' assert self.is_dataset_distribution_shard() return slice(self.rank(), None, self.size()) def is_dataset_distribution_random_seed_offset(self): '\n :rtype: bool\n ' return (self.get_dataset_distribution_type() == 'random_seed_offset') def rank(self): '\n :rtype: int\n ' return self._rank def size(self): '\n :rtype: int\n ' return self._size def local_rank(self): '\n :rtype: int\n ' return self._local_rank def local_size(self): '\n :rtype: int\n ' return self._local_size
def get_ctx(config=None): '\n :param Config|None config:\n :returns: the global context if Horovod is enabled, or None otherwise.\n If we did not setup the context yet, it will automatically create it.\n :rtype: HorovodContext|None\n ' global _is_set_up, _ctx if _is_set_up: return _ctx if (not config): from returnn.config import get_global_config config = get_global_config(raise_exception=False) if (not config): return None _is_set_up = True if (not config.is_true('use_horovod')): return None _ctx = HorovodContext(config=config) return _ctx
class HyperParam(): '\n Represents one hyper parameter.\n ' def __init__(self, dtype=None, bounds=None, classes=None, log=False, default=None): '\n :param str|type|None|list dtype: e.g. "float", "int" or "bool", or if Collection, will be classes\n :param None|list[int|float] bounds: inclusive\n :param list|None classes:\n :param bool log: if in log-scale\n :param float|int|object|None default:\n ' if isinstance(dtype, (list, tuple)): assert (classes is None) assert (bounds is None) classes = dtype dtype = None if (dtype is None): assert (classes is not None) elif (dtype == 'float'): dtype = float elif (dtype == 'int'): dtype = int elif (dtype == 'bool'): dtype = bool assert (dtype in (float, int, bool, None)) if (bounds is not None): assert (dtype in (int, float)) assert isinstance(bounds, (list, tuple)) assert (len(bounds) == 2) assert (dtype(bounds[0]) < dtype(bounds[1])) if (classes is not None): assert isinstance(classes, (list, tuple)), 'should be with a defined order' assert (len(classes) > 0) self.dtype = dtype self.bounds = bounds self.classes = classes self.log_space = log self.default = default self.unique_idx = HyperParam._get_next_unique_idx() self.usages = [] _unique_idx = 0 @classmethod def _get_next_unique_idx(cls): cls._unique_idx += 1 return cls._unique_idx def __repr__(self): if (self.classes is not None): return ('HyperParam(%r)' % self.classes) dtype_name = self.dtype.__name__ ext = '' if self.log_space: ext += ', log=True' if (self.default is not None): ext += (', default=%r' % self.default) if (self.bounds is not None): return ('HyperParam(%s, %s%s)' % (dtype_name, self.bounds, ext)) assert (self.bounds is None) return ('HyperParam(%s%s)' % (dtype_name, ext)) def get_canonical_usage(self): '\n :rtype: _AttrChain\n ' return self.get_sorted_usages()[0] def get_sorted_usages(self): '\n :rtype: list[_AttrChain]\n ' return sorted(self.usages, key=(lambda chain: min(2, len(chain.chain)))) def description(self): '\n :rtype: str\n ' if (len(self.usages) == 0): usage_str = '<no usage>' elif (len(self.usages) == 1): usage_str = str(self.usages[0]) else: usage_str = (str(self.get_canonical_usage()) + '|...') return (usage_str + (': %s' % self)) def get_num_instances(self, upper_limit=100): '\n :param int upper_limit:\n :rtype: int\n ' assert (upper_limit >= 2) if (self.classes is not None): return min(len(self.classes), upper_limit) if (self.dtype is bool): return 2 if (self.dtype is float): return upper_limit if (self.dtype is int): (x1, x2) = self.bounds x1 = numpy.ceil(x1) x2 = numpy.floor(x2) assert (x1 < x2) return min(((x2 - x1) + 1), upper_limit) raise Exception(('invalid dtype %r' % self.dtype)) def merge_values(self, value1, value2): '\n Merge two values, which are valid values for this `HyperParam`.\n\n :param T value1:\n :param T value2:\n :rtype: T\n ' if (self.dtype is bool): return value1 if self.log_space: (x0, x1) = (value1, value2) if (x0 > x1): (x0, x1) = (x1, x0) if ((x0 < 0) or (x1 < 0)): assert (x0 <= x1 <= 0) sign = (- 1) (x0, x1) = ((- x1), (- x0)) else: sign = 1 assert (x1 >= x0 >= 0) x0o = x0 if (x0 < (Eps * 0.5)): x0 = (Eps * 0.5) if (x1 < Eps): x1 = Eps x0 = numpy.log(float(x0)) x1 = numpy.log(float(x1)) y = numpy.exp((x0 + ((x1 - x0) * 0.5))) if (y <= Eps): y = x0o return (self.dtype(y) * sign) if (self.dtype is int): return ((value1 + value2) // 2) return self.dtype(((value1 + value2) * 0.5)) def get_value(self, selected, eps=Eps): '\n :param float selected: must be between 0 and 1\n :param float eps: if in log-space and you have e.g. bounds=[0,1], will be the lowest value, before 0. see code.\n :rtype: float|int|bool|object\n ' assert (0 < eps) assert (0 <= selected <= 1) if self.classes: return self.classes[int((len(self.classes) * selected))] if (self.dtype is bool): return (selected > 0.5) if self.bounds: if ((self.dtype is int) and (not self.log_space)): return (self.bounds[0] + int(((self.bounds[1] - self.bounds[0]) * selected))) if self.log_space: (x0, x1) = self.bounds if ((x0 < 0) or (x1 < 0)): assert (x0 < x1 <= 0) sign = (- 1) (x0, x1) = ((- x1), (- x0)) else: sign = 1 assert (x1 > x0 >= 0) (x0b, x1b) = (x0, x1) if (x0b < (eps * 0.5)): x0b = (eps * 0.5) if (x1b < eps): x1b = eps x0l = numpy.log(float(x0b)) x1l = numpy.log(float(x1b)) y = numpy.exp((x0l + ((x1l - x0l) * selected))) if (y <= eps): y = x0 return (self.dtype(y) * sign) return self.dtype((self.bounds[0] + ((self.bounds[1] - self.bounds[0]) * selected))) x = selected if (x < eps): x = eps if (x > (1.0 - eps)): x = (1.0 - eps) import scipy.special return self.dtype(scipy.special.ndtri(x)) def get_initial_value(self): '\n :rtype: float|int|bool|object\n ' return self.get_value(selected=0.5) def get_default_value(self): '\n :rtype: float|int|bool|object\n ' if (self.default is not None): return self.dtype(self.default) return self.get_initial_value() def get_random_value(self, seed, eps=Eps): '\n :param int seed:\n :param float eps: see get_value()\n :rtype: float|int|bool|object\n ' rnd = numpy.random.RandomState(seed=seed) x = rnd.uniform(0.0, 1.0) if (x < eps): x = 0.0 if (x > (1.0 - eps)): x = 1.0 return self.get_value(x, eps=eps) def get_random_value_by_idx(self, iteration_idx, individual_idx): '\n :param int iteration_idx:\n :param int individual_idx:\n :rtype: float|int|bool|object\n ' seed = hash_obj((self.get_canonical_usage(), iteration_idx, individual_idx)) return self.get_random_value(seed=seed)
class TrainException(Exception): '\n Exception from training.\n '
class Individual(): '\n One instance of hyper params.\n ' def __init__(self, hyper_param_mapping, name): '\n :param dict[HyperParam] hyper_param_mapping:\n :param str name:\n ' self.hyper_param_mapping = hyper_param_mapping self.cost = None self.name = name def cross_over(self, hyper_params, population, random_seed): '\n :param list[HyperParam] hyper_params:\n :param list[Individual] population:\n :param int random_seed:\n :return: copy of self, cross-overd with others\n :rtype: Individual\n ' name = self.name if (len(name) > 10): name = (name[:8] + '..') name += ('x%x' % random_seed) res = Individual(hyper_param_mapping=self.hyper_param_mapping.copy(), name=name) rnd = numpy.random.RandomState(random_seed) while True: other = population[rnd.random_integers(0, (len(population) - 1))] for p in hyper_params: x = rnd.uniform(0.0, 1.0) if (x > 0.75): res.hyper_param_mapping[p] = other.hyper_param_mapping[p] elif (x > 0.5): res.hyper_param_mapping[p] = p.merge_values(res.hyper_param_mapping[p], other.hyper_param_mapping[p]) if (rnd.uniform(0.0, 1.0) > 0.5): break return res
class Optimization(): '\n Hyper parameter optimization handler class.\n ' def __init__(self, config, train_data): '\n :param returnn.config.Config config:\n :param Dataset train_data:\n ' self.config = config self.opts = CollectionReadCheckCovered(config.get_of_type('hyper_param_tuning', dict, {})) self.log = log.v1 train_data.init_seq_order(epoch=1) self.train_data = StaticDataset.copy_from_dataset(train_data, max_seqs=self.opts.get('num_train_steps', 100)) self.hyper_params = [] self._find_hyper_params() if (not self.hyper_params): raise Exception('No hyper params found.') self.hyper_params.sort(key=(lambda p_: p_.unique_idx)) print('We have found these hyper params:') for p in self.hyper_params: print((' %s' % p.description())) self.dry_run_first_individual = self.opts.get('dry_run_first_individual', True) self.num_iterations = self.opts['num_tune_iterations'] self.num_individuals = self.opts['num_individuals'] self.num_kill_individuals = self.opts.get('num_kill_individuals', (self.num_individuals // 2)) self.num_best = self.opts.get('num_best', 10) self.num_threads = self.opts.get('num_threads', guess_requested_max_num_threads()) self.opts.assert_all_read() def _find_hyper_params(self, base=None, visited=None): '\n :param _AttrChain base:\n :param set[int] visited: set of ids\n ' from inspect import ismodule if (base is None): base = _AttrChain(base=self.config) if isinstance(base.value, HyperParam): base.value.usages.append(base) if (base.value not in self.hyper_params): self.hyper_params.append(base.value) return if (visited is None): visited = set() if (id(base.value) in visited): return visited.add(id(base.value)) if ismodule(base.value): return if isinstance(base.value, dict): col_type = _AttribOrKey.ColTypeDict keys = base.value.keys() elif isinstance(base.value, Config): col_type = _AttribOrKey.ColTypeConfig keys = base.value.typed_dict.keys() else: return for key in sorted(keys): child = base.get_extended_chain(_AttribOrKey(key=key, col_type=col_type)) self._find_hyper_params(base=child, visited=visited) def get_population(self, iteration_idx, num_individuals): '\n :param int iteration_idx:\n :param int num_individuals:\n :rtype: list[Individual]\n ' assert (num_individuals > 0) return [self.get_individual(iteration_idx=iteration_idx, individual_idx=i) for i in range(num_individuals)] def get_individual(self, iteration_idx, individual_idx): '\n :param int iteration_idx:\n :param int individual_idx:\n :rtype: Individual\n ' return Individual({p: p.get_random_value_by_idx(iteration_idx=iteration_idx, individual_idx=individual_idx) for p in self.hyper_params}, name=('%i-%i' % (iteration_idx, individual_idx))) def cross_over(self, population, iteration_idx): '\n :param list[Individual] population: modified in-place\n :param int iteration_idx:\n ' for i in range((len(population) - 1)): population[i] = population[i].cross_over(hyper_params=self.hyper_params, population=(population[:i] + population[(i + 1):]), random_seed=((iteration_idx * 1013) + (i * 17))) def create_config_instance(self, hyper_param_mapping, gpu_ids): '\n :param dict[HyperParam] hyper_param_mapping: maps each hyper param to some value\n :param set[int] gpu_ids:\n :rtype: Config\n ' assert (set(self.hyper_params) == set(hyper_param_mapping.keys())) from returnn.util.basic import deepcopy config = deepcopy(self.config) assert isinstance(config, Config) for (p, value) in hyper_param_mapping.items(): assert isinstance(p, HyperParam) for attr_chain in p.usages: attr_chain.write_attrib(base=config, new_value=value) tf_session_opts = config.typed_dict.setdefault('tf_session_opts', {}) gpu_opts = tf_session_opts.setdefault('gpu_options', tf_compat.v1.GPUOptions()) if isinstance(gpu_opts, dict): gpu_opts = tf_compat.v1.GPUOptions(**gpu_opts) gpu_opts.visible_device_list = ','.join(map(str, sorted(gpu_ids))) return config def work(self): '\n Start the optimization.\n ' print(('Starting hyper param search. Using %i threads.' % self.num_threads), file=log.v1) from returnn.tf.util.basic import get_available_gpu_devices from returnn.log import wrap_log_streams, StreamDummy from threading import Thread, Condition from returnn.util.basic import progress_bar, hms, is_tty class Outstanding(): '\n Queue of outstanding work.\n ' cond = Condition() threads = [] population = [] exit = False exception = None class WorkerThread(Thread): '\n Worker threader.\n ' def __init__(self, gpu_ids): '\n :param set[int] gpu_ids:\n ' super(WorkerThread, self).__init__(name='Hyper param tune train thread') self.gpu_ids = gpu_ids self.trainer = None self.finished = False self.start() def cancel(self, join=False): '\n :param bool join:\n ' with Outstanding.cond: if self.trainer: self.trainer.cancel_flag = True if self.trainer.runner: self.trainer.runner.cancel_flag = True if join: self.join() def get_complete_frac(self): '\n :rtype: float\n ' with Outstanding.cond: if (self.trainer and self.trainer.runner): return self.trainer.runner.data_provider.get_complete_frac() return 0.0 def run(self_thread): '\n Run thread.\n ' try: while True: with Outstanding.cond: if (Outstanding.exit or Outstanding.exception): return if (not Outstanding.population): self_thread.finished = True Outstanding.cond.notify_all() return individual = Outstanding.population.pop(0) self_thread.trainer = _IndividualTrainer(optim=self, individual=individual, gpu_ids=self_thread.gpu_ids) self_thread.name = ('Hyper param tune train thread on %r' % individual.name) self_thread.trainer.run() except Exception as exc: with Outstanding.cond: if (not Outstanding.exception): Outstanding.exception = (exc or True) Outstanding.cond.notify_all() for thread in Outstanding.threads: if (thread is not self_thread): thread.cancel() if (not isinstance(exc, CancelTrainingException)): with Outstanding.cond: sys.excepthook(*sys.exc_info()) best_individuals = [] population = [] num_gpus = len(get_available_gpu_devices()) print('Num available GPUs:', num_gpus) num_gpus = (num_gpus or 1) interactive = is_tty() try: print(('Population of %i individuals (hyper param setting instances), running for %i evaluation iterations.' % (self.num_individuals, self.num_iterations)), file=log.v2) for cur_iteration_idx in range(1, (self.num_iterations + 1)): print(('Starting iteration %i.' % cur_iteration_idx), file=log.v2) if (cur_iteration_idx == 1): population.append(Individual({p: p.get_default_value() for p in self.hyper_params}, name='default')) population.append(Individual({p: p.get_initial_value() for p in self.hyper_params}, name='canonical')) population.extend(self.get_population(iteration_idx=cur_iteration_idx, num_individuals=(self.num_individuals - len(population)))) if (cur_iteration_idx > 1): self.cross_over(population=population, iteration_idx=cur_iteration_idx) if ((cur_iteration_idx == 1) and self.dry_run_first_individual): print('Very first try with log output:', file=log.v2) _IndividualTrainer(optim=self, individual=population[0], gpu_ids={0}).run() print(('Starting training with thread pool of %i threads.' % self.num_threads)) iteration_start_time = time.time() with wrap_log_streams(StreamDummy(), also_sys_stdout=True, tf_log_verbosity='WARN'): Outstanding.exit = False Outstanding.population = list(population) Outstanding.threads = [WorkerThread(gpu_ids={(i % num_gpus)}) for i in range(self.num_threads)] try: while True: with Outstanding.cond: if (all([thread.finished for thread in Outstanding.threads]) or Outstanding.exception): break complete_frac = max(((len(population) - len(Outstanding.population)) - len(Outstanding.threads)), 0) complete_frac += sum([thread.get_complete_frac() for thread in Outstanding.threads]) complete_frac /= float(len(population)) remaining_str = '' if (complete_frac > 0): start_elapsed = (time.time() - iteration_start_time) total_time_estimated = (start_elapsed / complete_frac) remaining_estimated = (total_time_estimated - start_elapsed) remaining_str = hms(remaining_estimated) if interactive: progress_bar(complete_frac, prefix=remaining_str, file=sys.__stdout__) else: print(('Progress: %.02f%%' % (complete_frac * 100)), 'remaining:', (remaining_str or 'unknown'), file=sys.__stdout__) sys.__stdout__.flush() Outstanding.cond.wait((1 if interactive else 10)) for thread in Outstanding.threads: thread.join() finally: Outstanding.exit = True for thread in Outstanding.threads: thread.cancel(join=True) Outstanding.threads = [] print('Training iteration elapsed time:', hms((time.time() - iteration_start_time))) if Outstanding.exception: raise Outstanding.exception assert (not Outstanding.population) print('Training iteration finished.') population.sort(key=(lambda p: p.cost)) del population[(- self.num_kill_individuals):] best_individuals.extend(population) best_individuals.sort(key=(lambda p: p.cost)) del best_individuals[self.num_best:] population = (best_individuals[:(self.num_kill_individuals // 4)] + population) print(('Current best setting, individual %s' % best_individuals[0].name), 'cost:', best_individuals[0].cost) for p in self.hyper_params: print((' %s -> %s' % (p.description(), best_individuals[0].hyper_param_mapping[p]))) except KeyboardInterrupt: print('KeyboardInterrupt, canceled search.') print(('Best %i settings:' % len(best_individuals))) for individual in best_individuals: print(('Individual %s' % individual.name), 'cost:', individual.cost) for p in self.hyper_params: print((' %s -> %s' % (p.description(), individual.hyper_param_mapping[p])))
class _IndividualTrainer(): def __init__(self, optim, individual, gpu_ids): '\n :param Optimization optim:\n :param Individual individual:\n :param set[int] gpu_ids:\n ' self.optim = optim self.individual = individual self.runner = None self.gpu_ids = gpu_ids self.cancel_flag = False def run(self): '\n Run the trainer.\n ' if (self.individual.cost is not None): return self.individual.cost start_time = time.time() hyper_param_mapping = self.individual.hyper_param_mapping print(('Training %r using hyper params:' % self.individual.name), file=log.v2) for p in self.optim.hyper_params: print((' %s -> %s' % (p.description(), hyper_param_mapping[p])), file=log.v2) config = self.optim.create_config_instance(hyper_param_mapping, gpu_ids=self.gpu_ids) config.set('task', 'train') engine = Engine(config=config) train_data = StaticDataset.copy_from_dataset(self.optim.train_data) engine.init_train_from_config(config=config, train_data=train_data) engine.epoch = 1 train_data.init_seq_order(epoch=engine.epoch) batches = train_data.generate_batches(recurrent_net=engine.network.recurrent, batch_size=engine.batch_size, max_seqs=engine.max_seqs, max_seq_length=int(engine.max_seq_length), seq_drop=engine.seq_drop, shuffle_batches=engine.shuffle_batches, used_data_keys=engine.network.used_data_keys) engine.updater.set_learning_rate(engine.learning_rate, session=engine.tf_session) trainer = Runner(engine=engine, dataset=train_data, batches=batches, train=True) self.runner = trainer if self.cancel_flag: raise CancelTrainingException('Trainer cancel flag is set') trainer.run(report_prefix=('hyper param tune train %r' % self.individual.name)) if (not trainer.finalized): print('Trainer exception:', trainer.run_exception, file=log.v1) raise trainer.run_exception cost = trainer.score['cost:output'] print(('Individual %s:' % self.individual.name), 'Train cost:', cost, 'elapsed time:', hms_fraction((time.time() - start_time)), file=self.optim.log) self.individual.cost = cost
class _AttribOrKey(): ColTypeConfig = Config ColTypeDict = dict ColTypeObj = object def __init__(self, key, col_type): '\n :param str|object key:\n :param type[object]|type[dict] col_type:\n ' self.key = key self.col_type = col_type def __str__(self): if (self.col_type is self.ColTypeConfig): return ('%s' % self.key) if (self.col_type is self.ColTypeDict): return ('[%r]' % self.key) if (self.col_type is self.ColTypeObj): return ('.%s' % self.key) raise Exception(('invalid col_type %r' % self.col_type)) def get(self, parent): '\n :param object|dict|Config parent:\n :rtype: dict|object|HyperParam\n ' if (self.col_type is self.ColTypeConfig): return parent.typed_dict[self.key] if (self.col_type is self.ColTypeDict): return parent[self.key] if (self.col_type is self.ColTypeObj): return getattr(parent, self.key) raise Exception(('invalid col_type %r' % self.col_type)) def set(self, parent, new_value): '\n :param object|dict|Config parent:\n :param new_value:\n ' if (self.col_type is self.ColTypeConfig): parent.typed_dict[self.key] = new_value return if (self.col_type is self.ColTypeDict): parent[self.key] = new_value return if (self.col_type is self.ColTypeObj): setattr(parent, self.key, new_value) return raise Exception(('invalid col_type %r' % self.col_type))
class _AttrChain(): def __init__(self, base): '\n :param object|dict base:\n ' self.base = base self.chain = [] self.value = base def __str__(self): return ''.join(map(str, self.chain)) def __repr__(self): return ('<%s %r %r>' % (self.__class__.__name__, self.chain, self.value)) def get_extended_chain(self, attr): '\n :param _AttribOrKey attr:\n :rtype: _AttrChain\n ' sub_chain = _AttrChain(base=self.base) sub_chain.chain = list(self.chain) sub_chain.chain.append(attr) sub_chain.value = attr.get(self.value) return sub_chain def write_attrib(self, base, new_value): '\n :param object|dict|Config base:\n :param new_value:\n ' obj = base assert (len(self.chain) >= 1) for attr in self.chain[:(- 1)]: obj = attr.get(obj) self.chain[(- 1)].set(obj, new_value)
def hash_str_djb2(s): '\n :param str s:\n :rtype: int\n ' v = 5381 for x in s: v = (((v << 5) + v) + ord(x)) v = (v & 4294967295) return v
def hash_seq(ls): '\n :param list|tuple ls:\n :rtype: int\n ' v = 5381 for x in ls: v = ((1000003 * v) + hash_obj(x)) v = (v & 4294967295) return v
def hash_int(x): '\n :param int x:\n :rtype: int\n ' return (((x << 11) + x) & 4294967295)
def hash_obj(x): '\n :param tuple|list|str|_AttribOrKey|_AttrChain x:\n :rtype: int\n ' if isinstance(x, (list, tuple)): return hash_seq(x) if isinstance(x, str): return hash_str_djb2(x) if isinstance(x, _AttribOrKey): return hash_str_djb2(x.key) if isinstance(x, _AttrChain): return hash_seq(x.chain) if isinstance(x, int): return hash_int(x) raise TypeError(('invalid type %s' % type(x)))
class LayerBase(object): '\n This is the base class for all layers.\n Every layer by default has a list of source layers `sources`\n and defines `self.output` which is of type :class:`Data`.\n It shares some common functionality across all layers, such as explicitly defining the output format,\n some parameter regularization, and more.\n\n If you want to implement your own layer::\n\n class YourOwnLayer(_ConcatInputLayer): # e.g. either _ConcatInputLayer or LayerBase as a base\n " some docstring "\n layer_class = "your_layer_name"\n\n def __init__(self, your_kwarg1, your_opt_kwarg2=None, **kwargs):\n " docstring, document the args! "\n super(YourOwnLayer, self).__init__(**kwargs)\n # Now we need to set self.output, which must be of type :class:`Data`.\n # It is set at this point to whatever we got from `self.get_out_data_from_opts()`,\n # so it is enough if we set self.output.placeholder and self.output.size_placeholder,\n # but we could also reset self.output.\n self.output.placeholder = self.input_data.placeholder + 42 # whatever you want to do\n # If you don\'t modify the sizes (e.g. sequence-length), just copy the input sizes.\n self.output.size_placeholder = self.input_data.size_placeholder.copy()\n\n @classmethod\n def get_out_data_from_opts(cls, **kwargs):\n " This is supposed to return a :class:`Data` instance as a template, given the arguments. "\n # example, just the same as the input:\n return get_concat_sources_data_template(kwargs["sources"], name="%s_output" % kwargs["name"])\n\n ' layer_class = None recurrent = False allow_inf_in_output = False def __init__(self, name, network, output, n_out=NotSpecified, out_dim=None, out_type=None, out_shape=None, sources=(), in_dim=None, target=None, _target_layers=None, loss=None, size_target=None, reuse_params=None, name_scope=None, param_device=None, is_output_layer=None, only_on_eval=False, only_on_search=False, copy_output_loss_from_source_idx=None, batch_norm=False, L2=None, darc1=None, spatial_smoothing=0.0, param_variational_noise=None, param_dropout=None, param_dropout_min_ndim=None, updater_opts=None, initial_output=None, state=None, need_last=False, rec_previous_layer=None, encapsulate=False, collocate_with=None, trainable=True, custom_param_importer=None, register_as_extern_data=None, control_dependencies_on_output=None, debug_print_layer_output=None, _network=None, _name=None, _src_common_search_choices=None): '\n Usually the arguments, when specified in the network dict,\n are going through :func:`transform_config_dict`, before they are passed to here.\n See :func:`TFNetwork.construct_from_dict`.\n\n :param str name:\n :param returnn.tf.network.TFNetwork network:\n :param Data output: Set a specific output instead of using :func:`get_out_data_from_opts`\n :param NotSpecified|None|int n_out: output dim\n :param returnn.tensor.Dim|None out_dim: output feature dim tag\n :param dict[str] out_type: kwargs for Data class. more explicit than n_out.\n :param set[returnn.tensor.Dim|returnn.tf.util.data._MarkedDim]|tuple|list|None out_shape:\n verifies the output shape (dim tags). See :func:`Data.verify_out_shape`.\n :param list[LayerBase] sources: via self.transform_config_dict()\n :param returnn.tensor.Dim|None in_dim: input feature dim tag\n :param str|list[str]|None target: if some loss is set, this is the target data-key,\n i.e. network.extern_data.get_data(target). alternatively, this also can be a layer name.\n :param dict[str,LayerBase]|None _target_layers: if target.startswith("layer:"), then this is target -> layer\n :param str|None size_target: like target but this is only used to set our output size in case of training\n :param Loss|None loss: via :func:`transform_config_dict`.\n Every layer can have one loss (of type :class:`Loss`), or none loss.\n In the net dict, it is specified as a string.\n In :class:`TFNetwork`, all losses from all layers will be collected.\n That is what :class:`TFUpdater.Updater` will use for training.\n :param ReuseParams|None reuse_params: if given, will opt reuse the params. see :func:`self.var_creation_scope`.\n See also the ``name_scope`` option as an alternative.\n :param str|None name_scope: If set, uses this custom (relative) name scope.\n If it starts with a "/", it will be the absolute name scope.\n It should not end with a "/".\n It can be empty, in which case it will not consume a new name scope.\n This can also be used for parameter sharing.\n The default is the layer name in most cases,\n but this logic is in :func:`get_absolute_name_scope_prefix` and :func:`TFNetwork.layer_creation_scope`.\n :param str|None param_device: e.g. "CPU", etc. any valid name for tf.device.\n see https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/util/device_name_utils.h\n :param float|None L2: for constraints\n :param float|None darc1: for constraints. see Generalization in Deep Learning, https://arxiv.org/abs/1710.05468\n :param float|None spatial_smoothing: see :func:`returnn.tf.util.basic.spatial_smoothing_energy`\n :param float|None param_variational_noise: adds variational noise to the params during training\n :param float|None param_dropout: dropout on params (weight dropout) during training\n :param int|None param_dropout_min_ndim: if param dropout is enabled, only use if for params whose ndim >= this.\n E.g. it might make sense to disable it for bias params or scalars, so set param_dropout_min_ndim=2.\n :param dict[str]|None updater_opts: accepts similar opts as TFUpdater, e.g. "optimizer", "learning_rate", ...\n :param bool|None is_output_layer: triggers the construction of this layer in the root net.\n Inside a :class:`RecLayer`, it triggers the explicit accumulation of all frames.\n Also see the ``need_last`` option.\n :param bool only_on_eval: if True, this layer will only be calculated in eval\n :param bool only_on_search: if True, this layer will only be calculated when search is done\n :param int|None copy_output_loss_from_source_idx: if set, will copy output_loss from this source\n :param bool|dict batch_norm: see self.batch_norm()\n :param str|float initial_output: used for recurrent layer, see self.get_rec_initial_output()\n :param state: explicitly defines the rec state.\n initial_state would define the initial state (in the first frame)\n :param bool need_last: Inside :class:`RecLayer`, make sure that we can access the last frame.\n Similar to ``is_output_layer, but this is specifically about the last frame,\n i.e. it does not trigger accumulation.\n :param LayerBase|None rec_previous_layer: via the recurrent layer,\n layer (template) which represents the past of us.\n You would not explicitly set this in a config.\n This is automatically, internally, via :class:`RecLayer`.\n :param bool encapsulate: mostly relevant for SubnetworkLayer and similar:\n If True, all sub layers will be created,\n and covered in functions like :func:`get_rec_initial_extra_outputs`,\n and the logic in :func:`cls_get_sub_network` will not be used.\n If False, the logic in :func:`cls_get_sub_network` will be used.\n :param list[str]|None collocate_with: in the rec layer, collocate with the specified other layers\n :param bool trainable: whether the parameters of this layer will be trained.\n Default is True.\n However, if this is inside a subnetwork, all the parent layers must be set to trainable,\n otherwise the parameters will not be trainable.\n :param str|callable|None custom_param_importer: used by :func:`set_param_values_by_dict`\n :param str|None register_as_extern_data: registers output in network.extern_data\n :param None|((LayerBase)->list[tf.Operation]) control_dependencies_on_output:\n This is mostly to perform some checks *after* the layer output has been computed,\n *before* the layer output is used anywhere else.\n There is also the :class:`IdentityLayer` with the option ``control_dependencies``.\n :param None|bool|dict[str] debug_print_layer_output: same as global config option but per layer\n :param str _name: just for internal construction, should be the same as ``name``\n :param returnn.tf.network.TFNetwork _network: just for internal construction, should be the same as ``network``\n :param None|SearchChoices _src_common_search_choices:\n set via :func:`SearchChoices.translate_to_common_search_beam`\n ' debug_print_layer_output self.name = name self.network = network self._register_layer() self.kwargs = None self.target = None self.targets = None if target: if isinstance(target, list): self.targets = target self.target = target[0] else: assert isinstance(target, str) self.targets = [target] self.target = target self._target_layers = _target_layers self.size_target = size_target self.loss = loss if (self.loss and self.loss.recurrent): self.recurrent = True self.output = output if (n_out is not NotSpecified): assert (self.output.dim == n_out) if isinstance(out_type, dict): if ('shape' in out_type): assert (self.output.shape == out_type['shape']) if ('dim' in out_type): assert (self.output.dim == out_type['dim']) if out_dim: assert (out_dim in self.output.dim_tags_set_implicit), ('%s: out_dim handling not implemented correctly for this layer' % self) out_shape self.output_before_activation = None self.output_loss = None if (copy_output_loss_from_source_idx is not None): self.output_loss = sources[copy_output_loss_from_source_idx].output_loss self.rec_vars_outputs = {} self.search_choices = None self._src_common_search_choices = _src_common_search_choices self._initial_output = initial_output self.need_last = need_last self._rec_previous_layer = rec_previous_layer self._encapsulate = encapsulate self.collocate_with = (collocate_with or []) self.post_init_hooks = [] self.sources = list(sources) if (in_dim and (len(sources) == 1)): assert sources[0].output.have_dim_tag(in_dim, unique=True), ('%s: in_dim %s not found or unique in input %s' % (self, in_dim, sources[0])) self.have_params = False self.params = {} self.saveable_param_replace = {} self.reuse_params = reuse_params self.name_scope = name_scope self.param_device = param_device self.L2 = L2 self.darc1 = darc1 self.spatial_smoothing = spatial_smoothing self.param_variational_noise = param_variational_noise self.param_dropout = param_dropout self.param_dropout_min_ndim = param_dropout_min_ndim self.updater_opts = CollectionReadCheckCovered((updater_opts or {})) self._is_output_layer = is_output_layer self.only_on_eval = only_on_eval self.only_on_search = only_on_search self.use_batch_norm = batch_norm self.trainable = trainable self.custom_param_importer = custom_param_importer self.control_dependencies_on_output = control_dependencies_on_output self.register_as_extern_data = register_as_extern_data self.stats = {} self._set_prev_state(state) def _set_prev_state(self, state): if (state is None): return from tensorflow.python.util import nest if (not self._rec_previous_layer): def _map_to_state_tensor_simple(state_layer): assert isinstance(state_layer, LayerBase) assert state_layer.output.have_batch_axis() assert (state_layer.output.batch_ndim <= 2), ('%s with state %s expects to operate a single step' % (self, state)) return state_layer.output.copy_as_batch_major().placeholder self._rec_previous_layer = InternalLayer(name=('prev-dummy:%s' % self.name), network=self.network, output=self.output) self._rec_previous_layer.rec_vars_outputs['state'] = nest.map_structure(_map_to_state_tensor_simple, state) return def _map_to_state_tensor(orig_state, state_layer): assert isinstance(orig_state, tf.Tensor) if (state_layer is None): return orig_state assert isinstance(state_layer, LayerBase) assert (orig_state.shape.as_list() == list(state_layer.output.batch_shape)) return state_layer.output.placeholder if (set(self._rec_previous_layer.rec_vars_outputs.keys()) == {'state'}): rec_prev_layer_state = self._rec_previous_layer.rec_vars_outputs['state'] nest.assert_same_structure(rec_prev_layer_state, state) self._rec_previous_layer.rec_vars_outputs['state'] = nest.map_structure(_map_to_state_tensor, rec_prev_layer_state, state) return raise NotImplementedError(("%s: explicit 'state' %r, internal states %r" % (self, state, self._rec_previous_layer.rec_vars_outputs))) def post_init(self, layer_desc): '\n This gets called right after self.__init__().\n\n :param dict[str] layer_desc: kwargs as they are passed to self.__init__\n ' self.kwargs = layer_desc.copy() assert ('output' in self.kwargs) self.kwargs.setdefault('name', self.name) if (self.output.placeholder is not None): if self.use_batch_norm: opts = {} if isinstance(self.use_batch_norm, dict): opts = self.use_batch_norm self.output.placeholder = self.batch_norm(self.output, **opts) if self.control_dependencies_on_output: control_deps = self.control_dependencies_on_output(self) if (not isinstance(control_deps, (list, tuple))): assert isinstance(control_deps, (tf.Operation, tf.Tensor)) control_deps = [control_deps] assert all([isinstance(dep, (tf.Operation, tf.Tensor)) for dep in control_deps]) if control_deps: with tf.control_dependencies(control_deps): self.output.placeholder = tf.identity(self.output.placeholder) if self.register_as_extern_data: self.network.extern_data.extra_added_keys.add(self.register_as_extern_data) self.network.extern_data.data[self.register_as_extern_data] = self.output for func in self.post_init_hooks: func() def __repr__(self): return ('<%s %s%r out_type=%s>' % (self.__class__.__name__, self.network.get_absolute_name_prefix(), self.name, (self.output.get_description(with_name=False) if self.output else None))) @classmethod def get_out_data_from_opts(cls, **kwargs): '\n Gets a Data template (i.e. shape etc is set but not the placeholder) for our __init__ args.\n The purpose of having this as a separate classmethod is to be able to infer the shape information\n without having to construct the layer.\n This function should not create any nodes in the computation graph.\n\n :param kwargs: all the same kwargs as for self.__init__()\n :return: Data template (placeholder not set)\n :rtype: Data\n ' return cls._base_get_out_data_from_opts(**kwargs) @classmethod def _base_get_out_data_from_opts(cls, network, name, out_type=None, out_dim=None, n_out=NotSpecified, out_shape=None, target=None, _target_layers=None, size_target=None, sources=(), in_dim=None, loss=None, **kwargs): '\n Called via BaseLayer.get_out_data_from_opts().\n\n :param returnn.tf.network.TFNetwork network:\n :param str name:\n :param dict[str]|None|(()->Data) out_type:\n :param returnn.tensor.Dim|None out_dim:\n :param int|None|NotSpecified n_out:\n :param set[Dim|_MarkedDim]|tuple|list|None out_shape: verifies the output shape (dim tags).\n :param str|list[str]|None target:\n :param dict[str,LayerBase]|None _target_layers: if target.startswith("layer:"), then this is target -> layer\n :param str|None size_target:\n :param list[LayerBase] sources:\n :param Dim|None in_dim:\n :param Loss|None loss:\n :param kwargs: remaining kwargs of self.__init__(), ignored here\n :return: Data template (placeholder not set)\n :rtype: Data\n ' if callable(out_type): return out_type(network=network, name=name, n_out=n_out, target=target, size_target=size_target, sources=sources, loss=loss, **kwargs) if (out_type is None): out_type = {} else: out_type = out_type.copy() out_type.setdefault('name', ('%s_output' % name)) if (('dim' not in out_type) and (n_out is not NotSpecified)): out_type['dim'] = n_out if (('dim' not in out_type) and target and (not out_dim)): out_type['dim'] = cls._static_get_target_value(target=(target[0] if isinstance(target, list) else target), _target_layers=_target_layers, network=network, mark_data_key_as_used=False).dim if (n_out is not NotSpecified): assert (out_type['dim'] == n_out) sources_data_list = [src.output for src in sources if src] if in_dim: assert (len(sources_data_list) == 1), ('%r: with specific in_dim %s, there must be a single source' % (name, in_dim)) if (sources_data_list[0].feature_dim_or_sparse_dim != in_dim): assert (in_dim in sources_data_list[0].dim_tags) axis = sources_data_list[0].get_axis_from_description(in_dim) sources_data_list = [sources_data_list[0].copy()] sources_data_list[0].feature_dim_axis = axis allow_broadcast_all_sources = NotSpecified if (('shape' in out_type) or ('dim_tags' in out_type) or ('dims' in out_type) or (out_shape is not None)): allow_broadcast_all_sources = True sources_data = (Data.get_common_data(sources_data_list, ignore_feature_dim=True, allow_broadcast_all_sources=allow_broadcast_all_sources, name=('%s_sources' % name)) if sources_data_list else None) if (sources_data and (not sources_data.sparse) and (not out_type.get('sparse', False))): out_type.setdefault('dtype', sources_data.dtype) if all([(k not in out_type) for k in (Data.SpecialAxesNames + ('dim_tags', 'dims', 'shape'))]): if sources_data: out_type.setdefault('batch_dim_axis', sources_data.batch_dim_axis) out_type.setdefault('time_dim_axis', sources_data.time_dim_axis) if ((not out_type.get('sparse', False)) and (not out_type.get('sparse_dim', None)) and (sources_data.feature_dim_axis_or_unspecified is not NotSpecified)): if (sources_data.feature_dim_axis_or_unspecified is not None): out_type.setdefault('feature_dim_axis', sources_data.feature_dim_axis_or_unspecified) elif (out_type.get('dim', None) is None): out_type.setdefault('feature_dim_axis', None) elif (network.is_inside_rec_layer() and (None not in out_type.get('shape', ()))): out_type.setdefault('time_dim_axis', None) if (('shape' not in out_type) and ('dim_tags' not in out_type) and ('dims' not in out_type)): if sources_data: out_type.setdefault('version', sources_data.version) if (out_type.get('sparse', False) or out_type.get('sparse_dim', None)): out_type['dims'] = sources_data.dim_tags_sparse else: feature_dim_axis = out_type.get('feature_dim_axis', NotSpecified) dim_tags = list(sources_data.dim_tags_sparse) if out_dim: feature_dim_tag = out_dim else: dim = out_type.get('dim', None) feature_dim_tag = FeatureDim(('%s:feature-dense' % name), dim, auto_generated=True) if (feature_dim_axis in (NotSpecified, None)): if (sources_data.feature_dim_axis is None): feature_dim_axis = len(dim_tags) else: feature_dim_axis = sources_data.feature_dim_axis dim_tags.insert(feature_dim_axis, feature_dim_tag) out_type['dims'] = dim_tags elif network.is_inside_rec_layer(): if out_type.get('sparse', False): out_type.setdefault('shape', ()) else: out_type.setdefault('shape', (out_type.get('dim', None),)) if (sources_data and sources_data.batch): out_type.setdefault('batch', sources_data.batch) if (sources_data and sources_data.beam): out_type.setdefault('beam', sources_data.beam) if out_dim: out_type.setdefault('dim', out_dim.dimension) output = Data(**out_type) if ((not out_dim) and sources_data and sources_data.feature_dim_or_sparse_dim and (sources_data.dim == output.dim)): if (output.feature_dim_or_sparse_dim and output.feature_dim_or_sparse_dim.auto_generated): out_dim = sources_data.feature_dim_or_sparse_dim if out_dim: assert (out_dim.dimension == output.dim), f'Layer {name!r} out_dim {out_dim} does not match Data {output} via out_type {out_type}' if output.sparse: output.sparse_dim = out_dim else: output = output.copy_template_replace_dim_tag(axis=output.feature_dim_axis, new_dim_tag=out_dim) output = cls._post_init_output(output=output, network=network, target=target, size_target=size_target, _target_layers=_target_layers, sources=sources, **kwargs) return output @classmethod def _post_init_output(cls, output, network, target=None, size_target=None, _target_layers=None, sources=(), _src_common_search_choices=None, **kwargs): '\n :param Data output:\n :param returnn.tf.network.TFNetwork network:\n :param str|list[str]|None target:\n :param str|None size_target:\n :param dict[str,LayerBase]|None _target_layers: if target.startswith("layer:"), then this is target -> layer\n :param None|SearchChoices _src_common_search_choices:\n set via :func:`SearchChoices.translate_to_common_search_beam`\n :param list[LayerBase] sources:\n :return: output, maybe changed\n :rtype: Data\n ' sources_data = [output] if (network.eval_flag and size_target): sources_data.append(cls._static_get_target_value(target=size_target, _target_layers=_target_layers, search_choices=_src_common_search_choices, network=network, mark_data_key_as_used=network.eval_flag)) elif ((network.train_flag is not False) and target): sources_data.append(cls._static_get_target_value(target=(target[0] if (target and isinstance(target, list)) else target), _target_layers=_target_layers, search_choices=_src_common_search_choices, network=network, mark_data_key_as_used=(network.train_flag is not False))) sources_data += [s.output for s in sources if s] is_equal_opts = dict(treat_feature_as_spatial=True, allow_same_spatial_dim=True, undefined_matches=True, derived_matches=True) (_, tags_dict) = Dim.get_all_dimension_tags(sources_data, is_equal_opts=is_equal_opts) new_dim_tags = tags_dict[output] if (tuple(new_dim_tags) != output.dim_tags): output_ = output.copy_template_new_dim_tags(new_dim_tags, keep_special_axes=True) if (output.placeholder is not None): output_.placeholder = output.placeholder output = output_ if any([(src and (not src.output.available_for_inference)) for src in sources if src]): output.available_for_inference = False return output @classmethod def fixup_out_data(cls, output, network, out_shape=None, **kwargs): '\n This is called after get_out_data_from_opts, to fixup incomplete information.\n E.g. we can patch batch or beam information here\n but maybe also other things.\n\n Other layer classes might overwrite this but then should call this super method.\n Usually this should not be needed though.\n\n :param Data output:\n :param returnn.tf.network.TFNetwork network:\n :param set[Dim|_MarkedDim]|tuple|list|None out_shape: verifies the output shape (dim tags).\n See :func:`Data.verify_out_shape`.\n :rtype: Data\n ' from tensorflow.python.util import nest from ..util.data import BatchInfo from ..network import ExternData if (not output.batch): global_batch = network.get_root_network().extern_data.get_batch_info(allow_none=True) if global_batch: assert global_batch.is_global_batch() def _set_global_batch_by_data(data): '\n :param Data data:\n :rtype: returnn.tf.util.data.BatchInfo\n ' assert ((data.placeholder is not None) and (not data.beam)) extern_data = ExternData() extern_data.data[('_fixup_out_data_dummy_input_' + data.name)] = data assert data.available_for_inference extern_data.init_batch_info() assert data.batch network.get_root_network().extern_data.set_batch_info(data.batch) return data.batch dep_layers = [v for v in nest.flatten(kwargs) if isinstance(v, LayerBase)] dep_batches = [dep.output.batch for dep in dep_layers if dep.output.batch] dyn_dim_tags_with_batch = [dim_tag for dim_tag in output.dim_tags if (dim_tag.dyn_size_ext and dim_tag.dyn_size_ext.have_batch_axis() and (dim_tag.dyn_size_ext.placeholder is not None))] for dim_tag in output.dim_tags: dim_tag._validate_in_current_graph() dim_tags_with_batch_info = [dim_tag for dim_tag in output.dim_tags if dim_tag.batch] if dep_batches: output.batch = BatchInfo.get_common_batch_info(dep_batches).copy_set_beam(output.beam) elif network.extern_data.get_batch_info(allow_none=True): output.batch = network.extern_data.get_batch_info().copy_set_beam(output.beam) elif (network.parent_net and network.get_root_network().extern_data.get_batch_info(allow_none=True)): output.batch = network.get_root_network().extern_data.get_batch_info().copy_set_beam(output.beam) elif dim_tags_with_batch_info: output.batch = dim_tags_with_batch_info[0].batch.copy_set_beam(output.beam) elif dyn_dim_tags_with_batch: for tag in dyn_dim_tags_with_batch: if tag.dyn_size_ext.batch: output.batch = tag.dyn_size_ext.batch.copy_set_beam(output.beam) break batch_dim_tag = tag.dyn_size_ext.dim_tags[tag.dyn_size_ext.batch_dim_axis] if batch_dim_tag.batch: output.batch = batch_dim_tag.batch break if ((not output.batch) and dyn_dim_tags_with_batch[0].dyn_size_ext.have_batch_axis()): output.batch = _set_global_batch_by_data(dyn_dim_tags_with_batch[0].dyn_size_ext) elif ((output.placeholder is not None) and output.have_batch_axis()): output.batch = _set_global_batch_by_data(output) if (output.batch and global_batch): assert (global_batch == output.batch.get_global_base()) if output.batch: output.batch = output.batch.copy_set_beam(output.beam) if (output.control_flow_ctx != network.get_control_flow_ctx()): x = output.placeholder output = output.copy_template_set_ctx(network.get_control_flow_ctx()) if (x is not None): output.placeholder = x if (out_shape is not None): output.verify_out_shape(out_shape) return output @classmethod def get_global_layer_list(cls): '\n :rtype: list[LayerBase]\n ' from returnn.tf.util.basic import CollectionKeys coll = tf_compat.v1.get_collection_ref(CollectionKeys.RETURNN_LAYERS) assert isinstance(coll, list) return coll @classmethod def get_recent_layer(cls): '\n :rtype: LayerBase\n ' coll = cls.get_global_layer_list() assert coll return coll[(- 1)] def _register_layer(self): self.get_global_layer_list().append(self) @classmethod def transform_config_dict(cls, d, network, get_layer): '\n :param dict[str] d: will modify inplace\n :param returnn.tf.network.TFNetwork network:\n :param returnn.tf.network.GetLayer|((str)->LayerBase) get_layer: function to get or construct another layer\n The name `get_layer` might be misleading, as this should return an existing layer,\n or construct it if it does not exist yet.\n `network.get_layer` would just return an existing layer.\n\n Will modify `d` inplace such that it becomes the kwargs for `self.__init__()`.\n Mostly leaves `d` as-is.\n This is used by :func:`TFNetwork.construct_from_dict`.\n It resolves certain arguments,\n e.g. it resolves the `"from"` argument which is a list of strings,\n to make it the `"sources"` argument in kwargs, with a list of :class:`LayerBase` instances.\n Subclasses can extend/overwrite this.\n Usually the only reason to overwrite this is when some argument might be a reference to a layer\n which should be resolved.\n ' from .basic import get_loss_class from ..network import LayerNotFound BehaviorVersion.require(condition=('from' in d), message=('Missing "from" in layer definition: %s/%s' % (network.name, d.get('_name', '<UNKNOWN>'))), version=1) src_names = d.pop('from', ['data']) if (not isinstance(src_names, (list, tuple))): src_names = [src_names] d['sources'] = [get_layer(src_name) for src_name in src_names if (not (src_name == 'none'))] if ('collocate_with' in d): collocate_with = d['collocate_with'] if (not isinstance(collocate_with, (list, tuple))): collocate_with = [collocate_with] d['collocate_with'] = collocate_with if ('reuse_params' in d): d['reuse_params'] = ReuseParams.from_config_dict(d['reuse_params'], network=network, get_layer=get_layer) if (d.get('loss', None) and ('target' not in d)): target = get_loss_class(d['loss']).get_default_target(network.extern_data) if target: d['target'] = target targets = None target_layers = {} assert ('_target_layers' not in d) if d.get('target', None): targets = d['target'] if isinstance(targets, str): targets = [targets] d['_target_layers'] = target_layers for target in targets: assert isinstance(target, str) if target.startswith('layer:'): target_layers[target] = get_layer(target[len('layer:'):]) else: try: target_layers[target] = get_layer(target) except LayerNotFound: if (network.is_inside_rec_layer() and (not network.search_flag)): network.get_extern_data(target, mark_data_key_as_used=network.eval_flag) if (not network.search_flag): target_layers[target] = get_layer(('data:%s' % target)) if d.get('initial_output', None): initial_output = d['initial_output'] if isinstance(initial_output, str): if (initial_output not in ['zeros', 'ones', 'var', 'keep_over_epoch', 'keep_over_epoch_no_init', 'apply(0)']): d['initial_output'] = get_layer(initial_output) if (('n_out' not in d) and ('out_dim' not in d) and targets): target = targets[0] guessed_out_dim = cls._guess_out_dim_from_target_and_opt_loss(network=network, target=target, target_layers=target_layers, loss_class_name=d.get('loss', None), get_layer=get_layer) if guessed_out_dim: if (cls.layer_class in {'linear', 'softmax'}): d['out_dim'] = guessed_out_dim else: d['n_out'] = guessed_out_dim.dimension if ('out_shape' in d): inside_rec_time_dim = network.get_inside_rec_time_dim(inside_loop=True) over_rec_time_dim = network.get_inside_rec_time_dim(inside_loop=False) if (over_rec_time_dim and (not inside_rec_time_dim)): from returnn.tf.util.data import OptionalDim, _MarkedDim out_shape = d['out_shape'] if (not isinstance(out_shape, set)): assert (not out_shape), ('out_shape %r must be empty if not a set' % (out_shape,)) out_shape = set(out_shape) out_shape.add(OptionalDim(over_rec_time_dim)) if over_rec_time_dim.dyn_size_ext: for tag in over_rec_time_dim.dyn_size_ext.dim_tags: if (tag not in [(d.tag if isinstance(d, _MarkedDim) else d) for d in out_shape]): out_shape.add(OptionalDim(tag)) d['out_shape'] = out_shape if (d.pop('loss_only_on_non_search', None) and network.search_flag): d.pop('loss', None) d.pop('loss_scale', None) d.pop('loss_opts', None) if d.get('loss', None): loss_opts = d.pop('loss_opts', None) loss_opts = (loss_opts.copy() if loss_opts else {}) loss_scale = d.pop('loss_scale', 1.0) if (loss_scale != 1.0): if ('scale' in loss_opts): assert (loss_opts['scale'] == loss_scale), "do not use loss_scale and loss with 'scale' option together" loss_opts['scale'] = loss_scale d['loss'] = cls._make_loss(class_name=d.pop('loss', None), opts=loss_opts, network=network, get_layer=get_layer) else: if (('loss_scale' in d) and (d['loss_scale'] is None)): d.pop('loss_scale') if (('loss_opts' in d) and (d['loss_opts'] is None)): d.pop('loss_opts') assert ('loss_scale' not in d), 'loss not defined, do not set loss_scale' assert ('loss_opts' not in d), 'loss not defined, do not set loss_opts' (root_ctx_net, prefix) = d['_network'].get_root_ctx_network() rec_previous_layer = root_ctx_net.layers.get(('prev:%s%s' % (prefix, d['_name']))) if rec_previous_layer: d['rec_previous_layer'] = rec_previous_layer if (d.get('state', None) is not None): from tensorflow.python.util import nest d['state'] = nest.map_structure(get_layer, d['state']) @classmethod def _guess_out_dim_from_target_and_opt_loss(cls, network, target, target_layers, loss_class_name, get_layer): '\n :param returnn.tf.network.TFNetwork network:\n :param str target: e.g. "classes"\n :param dict[str,LayerBase] target_layers:\n :param str|None loss_class_name: e.g. "ce" or None\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n :return: out_dim value\n :rtype: returnn.tensor.Dim|None\n ' from .basic import get_loss_class if (target in target_layers): target_data = target_layers[target].output else: target_data = cls._static_get_target_value(target=target, network=network, mark_data_key_as_used=False, get_layer=get_layer, _target_layers=target_layers) if (not target_data): return FeatureDim('dummy-unk-target-out', 1) out_dim = target_data.feature_dim_or_sparse_dim if (not out_dim): return None if loss_class_name: out_dim = get_loss_class(loss_class_name).get_auto_output_layer_dim(out_dim) return out_dim @classmethod def _make_loss(cls, class_name, opts, network, get_layer, always_make=False): '\n :param str|None class_name:\n :param dict[str]|None opts:\n :param returnn.tf.network.TFNetwork network:\n :param ((str) -> LayerBase) get_layer: function to get or construct another layer\n :param bool always_make:\n :rtype: Loss|None\n ' from .basic import get_loss_class if ((not network.eval_flag) and (not always_make)): return None if (not class_name): assert (not always_make) return None if (not opts): opts = {} opts = opts.copy() loss_class = get_loss_class(class_name) assert issubclass(loss_class, Loss) loss_class.transform_config_dict(opts, network=network, get_layer=get_layer) loss = loss_class(base_network=network, **opts) assert isinstance(loss, Loss) return loss def get_full_ctx_name(self): '\n :return: name w.r.t. root ctx network\n ' (_, prefix) = self.network.get_root_ctx_network() return (prefix + self.name) @classmethod def cls_get_tf_scope_name(cls, name): '\n :param str name: layer name\n :return: valid scope name, might be just name. see tf._VALID_SCOPE_NAME_REGEX and tf._VALID_OP_NAME_REGEX\n :rtype: str\n ' from returnn.tf.util.basic import get_valid_scope_name_from_str return get_valid_scope_name_from_str(name) @classmethod @contextlib.contextmanager def cls_setup_scope(cls, name, name_scope=None, **_kwargs): '\n :param str name:\n :param str|None name_scope:\n :param _kwargs: other layer kwargs after being transformed\n ' scope = cls.cls_get_tf_scope_name(name) name_scope_abs = None if (name_scope is not None): if (name_scope == ''): scope = tf_compat.v1.get_variable_scope() elif name_scope.startswith('/'): name_scope_abs = True scope = name_scope[1:] else: scope = name_scope if isinstance(scope, str): assert (not scope.endswith('/')), ('invalid name_scope %r' % name_scope) with reuse_name_scope(scope, absolute=name_scope_abs): (yield) @property def tf_scope_name(self): '\n :rtype: str\n :return: normally just self.name, but make it a valid TF scope name.\n this is meant mostly to extend TF names. see func:`get_base_absolute_name_scope_prefix` otherwise.\n ' if (self.name_scope and (not self.name_scope.startswith('/'))): assert (not self.name_scope.endswith('/')) return self.name_scope return self.cls_get_tf_scope_name(name=self.name) def get_base_absolute_name_scope_prefix(self): '\n :return: e.g. "output/", always with "/" at end, or "". this is for the TF name scope or variable scope\n :rtype: str\n ' if (self.name_scope is not None): if (self.name_scope == ''): return self.network.get_absolute_name_scope_prefix() elif (self.name_scope == '/'): return '' elif self.name_scope.startswith('/'): assert (not self.name_scope[1:].endswith('/')) return (self.name_scope[1:] + '/') else: assert (not self.name_scope.endswith('/')) return ((self.network.get_absolute_name_scope_prefix() + self.name_scope) + '/') return ((self.network.get_absolute_name_scope_prefix() + self.tf_scope_name) + '/') def get_absolute_name_scope_prefix(self): '\n :return: e.g. "output/", always with "/" at end, or "". this is for the TF name scope or variable scope.\n This is the same as :func:`get_base_absolute_name_scope_prefix` in most cases,\n but some layers like :class:`RecLayer` extend this by an additional postfix.\n :rtype: str\n ' return self.get_base_absolute_name_scope_prefix() def get_absolute_name(self): '\n :return: e.g. "output" or "subnet/output". This is mostly for representation.\n See also :func:`get_absolute_name_scope_prefix`.\n :rtype: str\n ' return (self.network.get_absolute_name_prefix() + self.name) def is_output_layer(self): '\n Some code differs between an output layer and other layers.\n It is a bit arbitrary what we define as output layer.\n This should be consistent with :func:`TFNetwork.construct_from_dict`.\n\n :rtype: bool\n ' if (self._is_output_layer is not None): return self._is_output_layer if self.loss: return True if (self.get_full_ctx_name() == 'output'): return True return False def get_dep_layers(self): '\n :return: list of layers this layer depends on.\n normally this is just self.sources but e.g. the attention layer in addition has a base, etc.\n :rtype: list[LayerBase]\n ' layers = list(self.sources) if self._target_layers: layers += [layer for (_, layer) in sorted(self._target_layers.items())] return layers @classmethod def cls_get_sub_network(cls, name, network, layer_desc): '\n A layer class can override this to return a custom :class:`Subnetwork`,\n which just sets another namespace (and possibly variable sharing)\n for contained layers but otherwise shares the same construction logic\n via root network :func:`TFNetwork.construct_layer`.\n\n When not overriding this, a layer still can have sub layers\n via :func:`LayerBase.get_sub_layer`, but they belong to the root layer\n (collocated) and can not be decoupled.\n\n :param str name:\n :param returnn.tf.network.TFNetwork network:\n :param dict[str] layer_desc:\n :rtype: returnn.tf.network.Subnetwork|None\n ' return None def get_sub_layer(self, layer_name): "\n The default behavior for any layer is to return None.\n Returned layers belong to the root layer (self).\n\n Also see :func:`LayerBase.cls_get_sub_network`.\n\n Also see :func:`get_available_sub_layer_names`.\n\n :param str layer_name: name of the sub_layer (right part of '/' separated path)\n :return: the sub_layer addressed in layer_name or None if no sub_layer exists\n :rtype: LayerBase|None\n " return None @classmethod def get_available_sub_layer_names(cls, parent_layer_kwargs): '\n :param dict[str] parent_layer_kwargs: kwargs for the parent layer (as kwargs in cls.get_out_data_from_opts())\n :return: list of layer names which can be accessed via :func:`get_sub_layer`\n :rtype: list[str]\n ' return [] @classmethod def get_sub_layer_out_data_from_opts(cls, layer_name, parent_layer_kwargs): "\n Called by _TemplateLayer.get_sub_layer(). Gets a Data template for the sub-layer with name 'layer_name'.\n Also returns the network the sub-layer is in and the class type of the sub-layer. There is no good default\n behaviour here, as this heavily depends on how the current layer uses sub-layers.\n\n :param str layer_name: name of the sub_layer (right part of '/' separated path)\n :param dict[str] parent_layer_kwargs: kwargs for the parent layer (as kwargs in cls.get_out_data_from_opts())\n :return: Data template, class type of sub-layer, layer opts (transformed)\n :rtype: (Data, type, dict[str])|None\n " return None def get_sub_networks(self): '\n :return: All subnetworks, including those which might be in a different ctx.\n If this returns a non-empty list, we expect that all layers via get_sub_layers\n can be reached via the subnetworks.\n :rtype: list[returnn.tf.network.TFNetwork]\n ' return [] def get_sub_layers(self): '\n :return: All (direct) (non-temporary) sub layers, including those which might be in a different ctx.\n This is mostly intended to collect params.\n :rtype: list[LayerBase]\n ' return [] def _get_abs_layer_path(self) -> List[LayerBase]: ls = [self] net = self.network while net: if net.parent_layer: ls.append(net.parent_layer) net = net.parent_net ls.reverse() return ls def get_search_choices(self): '\n :rtype: SearchChoices|None\n ' if self.search_choices: return self.search_choices if self._src_common_search_choices: return self._src_common_search_choices if (not self.output.beam): return None layer = self.network.get_search_choices(src=self) if layer: assert layer.search_choices return layer.search_choices return None def get_search_beam_size(self): '\n :return: beam size if there was a choice layer and we do search\n :rtype: int|None\n ' if self.output.beam: return self.output.beam.beam_size return None def get_normalized_layer(self): '\n :return: e.g. if prev layer in :class:`RecLayer`, return current layer\n :rtype: LayerBase\n ' return self def get_batch_dim(self): '\n The batch dim by this layer, not taken from our output placeholder but calculated.\n Normally it is self.network.get_batch_dim()\n but if we do search and there was a choice layer, it it multiplied by the beam size.\n\n :return: batch dim * beam size\n :rtype: tf.Tensor|int\n ' return self.get_batch_info().dim def get_batch_info(self): '\n :rtype: returnn.tf.util.data.BatchInfo\n ' if (self.output.batch and (self.output.batch.beam == self.output.beam)): return self.output.batch batch = self.network.get_global_batch_info() if self.output.beam: batch = batch.copy_extend_with_beam(self.output.beam) return batch @contextlib.contextmanager def var_creation_scope(self, **kwargs): '\n This takes care of setting up a scope where variables can be created.\n This handles multiple things:\n\n * the param sharing logic, to reuse existing variables from elsewhere\n * variational noise and param weight dropout\n * Note: :func:`default_control_flow_ctx` is not needed for tf.get_variable.\n But it might be needed for other code which uses custom inits and tf.Variable,\n e.g. tf.random.Generator.\n However, always using this could be a problem if we use other input tensors inside this scope,\n so we do not enable this here.\n\n :param kwargs: passed to variable_scope\n :return: yields the variable_scope\n ' from returnn.tf.util.basic import get_current_var_scope_name, reuse_name_scope from returnn.tf.util.basic import default_control_flow_ctx, reuse_name_scope_of_tensor from returnn.tf.util.gradient_checkpoint import gradient_checkpoint_scope self_base_scope = self.get_base_absolute_name_scope_prefix() assert (self_base_scope.endswith('/') or (self_base_scope == '')) cur_scope = get_current_var_scope_name() assert (cur_scope + '/').startswith(self_base_scope) kwargs = kwargs.copy() kwargs.setdefault('reuse', getattr(tf_compat.v1, 'AUTO_REUSE', None)) param_variational_noise = self.param_variational_noise if (param_variational_noise is None): param_variational_noise = self.network.get_config().float('param_variational_noise', 0) param_dropout = self.param_dropout if (param_dropout is None): param_dropout = self.network.get_config().float('param_dropout', 0) param_dropout_min_ndim = self.param_dropout_min_ndim if (param_dropout_min_ndim is None): param_dropout_min_ndim = self.network.get_config().int('param_dropout_min_ndim', 0) if (self.network.train_flag is False): param_variational_noise = None param_dropout = None need_custom_getter = (bool(param_variational_noise) or bool(param_dropout)) kwargs_custom_getter = kwargs.get('custom_getter', None) def layer_custom_getter(getter, **getter_kwargs): '\n See TF docs :func:`_VariableStore.get_variable`.\n\n :param (...)->tf.Variable getter:\n :rtype: tf.Variable|tf.Tensor\n ' if kwargs_custom_getter: def getter(*, _getter=getter, **kwargs_): 'getter' return kwargs_custom_getter(getter=_getter, **kwargs_) getter.__qualname__ += f'(kwargs_custom_getter={kwargs_custom_getter})' if base_var_scope.custom_getter: def getter(*, _getter=getter, **kwargs_): 'getter' return base_var_scope.custom_getter(getter=_getter, **kwargs_) getter.__qualname__ += f'(base_var_scope.custom_getter={base_var_scope.custom_getter})' param = getter(**getter_kwargs) if (param_variational_noise and param.dtype.is_floating and isinstance(param, tf.Variable)): with default_control_flow_ctx(): with reuse_name_scope_of_tensor(param, postfix='_variational_noise', add_tensor_name=True): def _apply_var_noise(): rnd_state = tf_util.StatelessRandomSeed.create(shape=tf_util.get_shape(param)) with gradient_checkpoint_scope(): noise = rnd_state.normal(stddev=param_variational_noise, dtype=param.dtype.base_dtype) return (param + noise) param = self.network.cond_on_train(fn_train=_apply_var_noise, fn_eval=(lambda : param)) if (param_dropout and param.dtype.is_floating and isinstance(param, tf.Variable) and (param.shape.ndims >= param_dropout_min_ndim)): with default_control_flow_ctx(): with reuse_name_scope_of_tensor(param, postfix='_weight_dropout', add_tensor_name=True): param = self.network.cond_on_train(fn_train=(lambda : tf_util.dropout(param, keep_prob=(1.0 - param_dropout), grad_checkpointing=True, seed=self.network.random.randint((2 ** 31)))), fn_eval=(lambda : param)) return param @contextlib.contextmanager def _optional_param_device(): if self.param_device: device_name = self.param_device if (':' not in device_name): device_name = ('%s:*' % device_name) if ('/' not in device_name): device_name = ('/device:%s' % device_name) with tf.device(device_name): (yield) else: (yield) with _optional_param_device(): if self.reuse_params: base_var_scope = self.reuse_params.get_variable_scope(base_layer=self) else: base_var_scope = tf_compat.v1.get_variable_scope() if need_custom_getter: kwargs['custom_getter'] = layer_custom_getter with reuse_name_scope(base_var_scope, **kwargs) as scope_: (yield scope_) def add_param(self, param, custom_update=None, trainable=None, saveable=None, axes_split_info=None, non_critical_for_restore=False): '\n :param tf.Variable|tf.Tensor param:\n :param None|CustomUpdate custom_update: will be applied in training, instead of taking the gradient\n :param bool|None trainable:\n :param bool|None saveable:\n :param list[list[int]]|None axes_split_info: e.g. [[n],[n]*4] for LSTM matrices\n :param bool non_critical_for_restore: if True, and it cannot be found in a checkpoint, it will not be an error\n :return: param\n :rtype tf.Variable\n ' self.have_params = True _param = param if isinstance(param, tf.Tensor): from returnn.extern import graph_editor import re possible_params = tf_compat.v1.get_collection(tf_compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=re.escape(self.get_absolute_name_scope_prefix())) if (not possible_params): return param all_ops = graph_editor.get_backward_walk_ops([param.op], inclusive=False, control_inputs=False) all_1st_tensors = [op.outputs[0] for op in all_ops if (len(op.outputs) == 1)] possible_params = [p for p in possible_params if (tf_util.var_handle_or_ref(p) in all_1st_tensors)] if (not possible_params): return param assert (len(possible_params) == 1) param = possible_params[0] assert isinstance(param, tf.Variable) if (getattr(param, 'RETURNN_layer', None) is None): if any(((not layer.trainable) for layer in self._get_abs_layer_path())): trainable_collection_ref = tf_compat.v1.get_collection_ref(tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES) if (param in trainable_collection_ref): trainable_collection_ref.remove(param) if (trainable is None): trainable = (param in tf_compat.v1.get_collection_ref(tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES)) if (saveable is None): saveable = True if custom_update: assert trainable custom_update.set_on_var(param) if axes_split_info: tf_util.set_param_axes_split_info(param, axes_split_info) name_scope_prefix = self.get_absolute_name_scope_prefix() if self.reuse_params: if (not param.name.startswith(name_scope_prefix)): return _param else: name_scope_prefix = self.get_absolute_name_scope_prefix() assert param.name assert (param.name[:len(name_scope_prefix)] == name_scope_prefix) assert (param.name[(- 2):] == ':0') param_name = param.name[len(name_scope_prefix):(- 2)] if (param_name not in self.params): self.params[param_name] = param else: assert (self.params[param_name] is param) if (not saveable): self.saveable_param_replace[param] = None if (getattr(param, 'RETURNN_layer', None) is None): param.RETURNN_layer = self if ((getattr(param, 'RETURNN_updater_opts', None) is None) and self.updater_opts.truth_value): param.RETURNN_updater_opts = self.updater_opts if non_critical_for_restore: param.RETURNN_non_critical_for_restore = True return _param def set_param_values_by_dict(self, values_dict, session, ignore_wrong_shape=False, copy_param_mode=None): '\n :param dict[str,numpy.ndarray] values_dict:\n :param bool ignore_wrong_shape:\n :param str|None copy_param_mode:\n :param tf.compat.v1.Session session:\n ' if callable(self.custom_param_importer): self.custom_param_importer(layer=self, values_dict=values_dict, session=session) return if self.custom_param_importer: copy_param_mode = self.custom_param_importer if (copy_param_mode == 'reset'): return assert (copy_param_mode in [None, 'ifpossible', 'subset']) if copy_param_mode: ignore_wrong_shape = True for (param_name, values) in values_dict.items(): assert (param_name in self.params), ('%s: param %r unknown' % (self, param_name)) param = self.params[param_name] assert isinstance(param, tf.Variable) shape = param.get_shape() assert isinstance(shape, tf.TensorShape) assert shape.is_fully_defined(), ('%s: shape of param %r %r not fully defined?' % (self, param_name, param)) param_shape = tuple(shape.as_list()) if (not ignore_wrong_shape): assert (param_shape == values.shape), ('var %r: shape %s != %s' % (param, shape.as_list(), values.shape)) if (param_shape != values.shape): if (copy_param_mode == 'subset'): assert (len(param_shape) == len(values.shape)), ('param %r ndim must match' % param) new_values = session.run(param) param_axes_split_info = tf_util.get_param_axes_split_info(param) if param_axes_split_info: tf_util.check_param_axes_split_info(param.get_shape().as_list(), param_axes_split_info) old_axes_splits = tf_util.transform_param_axes_split_info_to_new_shape(param_axes_split_info, values.shape, debug_name=('param %r' % param.name)) print(('Param %r: transform old values of shape parts %r into new shape parts %r.' % (param, old_axes_splits, param_axes_split_info)), file=log.v3) values = tf_util.copy_with_new_split_axes(old_axis_splits=old_axes_splits, new_axis_splits=param_axes_split_info, old_values=values, new_values=new_values) else: print(('Param %r: transform old values of shape %r into new shape %r.' % (param, values.shape, param_shape)), file=log.v3) values = tf_util.copy_with_new_split_axes(old_axis_splits=[[d] for d in values.shape], new_axis_splits=[[d] for d in param_shape], old_values=values, new_values=new_values) else: print(('Will not set param %r because its shape %s != %s.' % (param, shape.as_list(), values.shape)), file=log.v3) continue self.network.get_var_assigner(param).assign(values, session=session) def get_param_values_dict(self, session) -> Dict[(str, numpy.ndarray)]: '\n :param tf.compat.v1.Session session:\n :return: dict name -> values\n ' d = {} for (param_name, param) in self.get_saveable_params_dict().items(): d[param_name] = param.eval(session) return d def get_saveable_params_dict(self): '\n :return: params and saveable_param_replace resolved\n :rtype: dict[str,tf.Variable|tensorflow.python.training.saver.BaseSaverBuilder.SaveableObject]\n ' if (not self.saveable_param_replace): return self.params.copy() d = {} for (param_name, param) in self.params.items(): if (param in self.saveable_param_replace): param = self.saveable_param_replace[param] if (param is None): continue d[param_name] = param return d @staticmethod def _static_get_target_value(target, network, mark_data_key_as_used=True, _target_layers=None, get_layer=None, search_choices=None): '\n :param str target:\n :param dict[str,LayerBase]|None _target_layers: if target.startswith("layer:"), then this is target -> layer\n :param returnn.tf.network.TFNetwork network:\n :param bool mark_data_key_as_used: forwarded self.network.get_extern_data()\n :param None|((str) -> LayerBase) get_layer: function to get or construct another layer\n :param SearchChoices|None search_choices:\n :rtype: Data | None\n ' if ((not target) or (target == 'none')): return None from .basic import SelectSearchSourcesLayer if (_target_layers and (target in _target_layers)): return SelectSearchSourcesLayer.select_if_needed(_target_layers[target], search_choices=search_choices).output if target.startswith('layer:'): if (not get_layer): get_layer = network.get_layer layer = get_layer(target[len('layer:'):]) if (not layer): return None return SelectSearchSourcesLayer.select_if_needed(layer, search_choices=search_choices).output assert network.extern_data.has_data(target), ('target %r unknown' % target) data = network.get_extern_data(target, mark_data_key_as_used=mark_data_key_as_used) if search_choices: data = data.copy_extend_with_beam(search_choices.get_beam_info()) return data def _get_target_value(self, target=None, mark_data_key_as_used=True, search_choices=NotSpecified): '\n :param str|None target:\n :param bool mark_data_key_as_used: forwarded self.network.get_extern_data()\n :param SearchChoices|NotSpecified|None search_choices:\n :rtype: Data | None\n ' if (target is None): target = self.target if (search_choices is NotSpecified): search_choices = self.get_search_choices() return self._static_get_target_value(target=target, _target_layers=self._target_layers, search_choices=search_choices, network=self.network, mark_data_key_as_used=mark_data_key_as_used) def _cond_only_on_eval_opt(self, on_eval_func, default_value): '\n :param ()->(tf.Tensor|None) on_eval_func:\n :param float|tf.Tensor default_value:\n :return: tensor (coming from tf.cond if needed) if on_eval_func returned a tensor, otherwise None\n :rtype: tf.Tensor|None\n ' if (not isinstance(default_value, tf.Tensor)): default_value = tf.constant(default_value, name='only_on_eval_dummy_zero') class OnEval(): '\n Closure.\n ' have_output = True @classmethod def get_value(cls): '\n :rtype: tf.Tensor\n ' res_ = on_eval_func() if (res_ is None): cls.have_output = False return default_value return res_ res = self.network.cond_on_train((lambda : default_value), OnEval.get_value) if (not OnEval.have_output): return None return res @classmethod def get_losses(cls, name, network, output, loss=None, reduce_func=None, layer=None, **kwargs): '\n Losses will get constructed here.\n This gets called inside a loss name scope of the layer.\n When overriding this, make sure that it works both with `layer` set and unset.\n\n :param str name: layer name\n :param returnn.tf.network.TFNetwork network:\n :param Loss|None loss: argument just as for __init__\n :param Data output: the output (template) for the layer\n :param LayerBase|None layer:\n The real layer instance, if it exists at the current point.\n If not given, init() must be called at a later point.\n :param ((tf.Tensor)->tf.Tensor)|None reduce_func: if given, will overwrite the reduce func for the loss.\n By default, every loss_value and error_value is a scalar\n (sum or average over the batches, and over the frames for frame-wise losses).\n However, if you provide reduce_func = returnn.tf.util.basic.identity, you can get the unreduced tensor.\n :param kwargs: all the remaining __init__ args\n :return: the losses defined by this layer\n :rtype: list[returnn.tf.network.LossHolder]\n ' if (not loss): return [] from returnn.tf.network import LossHolder return [LossHolder(name=name, network=network, loss=loss, layer_output=output, layer=layer, reduce_func=reduce_func)] def get_losses_initialized(self, reduce_func=None): '\n As self.get_losses, but here we return them all initialized (i.e. the layer is set).\n You should not override this method but rather :func:`get_losses`.\n\n :param ((tf.Tensor)->tf.Tensor)|None reduce_func: as in get_losses\n :return: the losses defined by this layer\n :rtype: list[returnn.tf.network.LossHolder]\n ' return self.__class__.get_losses(reduce_func=reduce_func, layer=self, **self.kwargs) def get_params_l2_norm(self): '\n :return: scalar\n :rtype: tf.Tensor\n ' return (2 * sum([tf.nn.l2_loss(param) for (name, param) in sorted(self.params.items())])) def get_output_spatial_smoothing_energy(self): '\n :return: scalar. see :func:`returnn.tf.util.basic.spatial_smoothing_energy`\n :rtype: tf.Tensor\n ' from returnn.tf.util.basic import spatial_smoothing_energy, flatten_with_seq_len_mask energy = spatial_smoothing_energy(self.output.placeholder, dim=self.output.dim) assert self.output.have_time_axis() energy = flatten_with_seq_len_mask(energy, seq_lens=self.output.size_placeholder[self.output.time_dim_axis_excluding_batch], time_major=self.output.is_time_major) energy = tf.reduce_sum(energy) return energy def get_darc1(self): '\n DARC1, simplified Directly Approximately Regularizing Complexity (DARC), via\n Generalization in Deep Learning, https://arxiv.org/abs/1710.05468\n\n :return: scalar\n :rtype: tf.Tensor\n ' with tf.name_scope('darc1'): if self.output_before_activation: x = self.output_before_activation.x else: x = self.output.placeholder mask = self.output.get_sequence_mask() size = tf.size(mask) mask = tf.reshape(mask, (size,)) x = tf.reshape(x, ((size,) + self.output.shape[1:])) x = tf.abs(x) x = tf.where(mask, x, tf.zeros_like(x)) x = tf.reduce_sum(x, axis=0) assert isinstance(x, tf.Tensor) assert (x.get_shape().ndims == 1) x = tf.reduce_max(x) return x def get_constraints_value(self): '\n :return: None or scalar\n :rtype: tf.Tensor|None\n ' decouple_constraints = self.network.get_config().bool('decouple_constraints', False) c = 0 if self.L2: if decouple_constraints: for (_, param) in self.params.items(): assert isinstance(param, tf.Variable) setattr(param, 'RETURNN_constraint_L2', self.L2) else: c += (self.L2 * self.get_params_l2_norm()) if self.spatial_smoothing: c += (self.spatial_smoothing * self.get_output_spatial_smoothing_energy()) if self.darc1: c += (self.darc1 * self.get_darc1()) if (c is 0): return None return c def batch_norm(self, data, use_shift=True, use_std=True, use_sample=0.0, force_sample=False, momentum=NotSpecified, epsilon=0.001, update_sample_only_in_training=NotSpecified, delay_sample_update=NotSpecified, param_version=NotSpecified, gamma_init=1.0, beta_init=0.0, masked_time=NotSpecified): '\n :param Data data:\n :param bool use_shift:\n :param bool use_std:\n :param float use_sample: defaults to 0.0 which is used in training\n :param bool force_sample: even in eval, use the use_sample factor\n :param float momentum: for the running average of sample_mean and sample_std\n :param bool update_sample_only_in_training:\n :param bool delay_sample_update:\n :param int param_version: 0 or 1 or 2\n :param float epsilon:\n :param str|float gamma_init: see :func:`returnn.tf.util.basic.get_initializer`, for the scale\n :param str|float beta_init: see :func:`returnn.tf.util.basic.get_initializer`, for the mean\n :param bool masked_time: flatten and mask input tensor\n :rtype: tf.Tensor\n\n https://arxiv.org/abs/1502.03167\n\n With our default settings:\n\n - In training: use_sample=0, i.e. not using running average, using current batch mean/var.\n - Not in training (e.g. eval): use_sample=1, i.e. using running average, not using current batch mean/var.\n - The running average includes the statistics of the current batch.\n - The running average is also updated when not training.\n\n Also see:\n tf.nn.batch_normalization()\n https://github.com/deepmind/sonnet/blob/master/sonnet/python/modules/batch_norm.py\n ' from returnn.util import BehaviorVersion if (momentum is NotSpecified): momentum = (0.1 if (BehaviorVersion.get() >= 12) else 0.99) if (update_sample_only_in_training is NotSpecified): update_sample_only_in_training = (True if (BehaviorVersion.get() >= 12) else False) if (delay_sample_update is NotSpecified): delay_sample_update = (True if (BehaviorVersion.get() >= 12) else False) if (param_version is NotSpecified): param_version = (2 if (BehaviorVersion.get() >= 12) else 0) BehaviorVersion.require((masked_time is not NotSpecified), message='batch_norm masked_time should be specified explicitly', version=12) if (masked_time is NotSpecified): assert (BehaviorVersion.get() <= 11) masked_time = True with reuse_name_scope((self.get_absolute_name_scope_prefix() + 'batch_norm'), absolute=True): if (param_version == 0): param_name_prefix = ('%s_%s_' % (self.name, data.name)) elif (param_version == 1): param_name_prefix = '' elif (param_version == 2): param_name_prefix = 'v2_' else: raise NotImplementedError(('%s: batch_norm param_version %r' % (self, param_version))) stats_shape = (data.get_bc_spatial_batch_shape() if (param_version <= 1) else [data.dim]) with self.var_creation_scope(): sample_mean = self.add_param(tf_compat.v1.get_variable(shape=stats_shape, initializer=tf_compat.v1.zeros_initializer(), name=('%smean' % param_name_prefix), trainable=False)) with self.var_creation_scope(): sample_variance = self.add_param(tf_compat.v1.get_variable(shape=stats_shape, initializer=tf_compat.v1.ones_initializer(), name=('%svariance' % param_name_prefix), trainable=False)) if use_std: with self.var_creation_scope(): from returnn.tf.util.basic import get_initializer gamma_initializer = get_initializer(gamma_init, seed=(self.network.random.randint((2 ** 31)) if gamma_init else 0), eval_local_ns={'layer': self}) gamma = self.add_param(tf_compat.v1.get_variable(shape=stats_shape, initializer=gamma_initializer, name=('%sgamma' % param_name_prefix), trainable=True)) else: gamma = None if use_shift: with self.var_creation_scope(): from returnn.tf.util.basic import get_initializer beta_initializer = get_initializer(beta_init, seed=(self.network.random.randint((2 ** 31)) if beta_init else 0), eval_local_ns={'layer': self}) beta = self.add_param(tf_compat.v1.get_variable(shape=stats_shape, initializer=beta_initializer, name=('%sbeta' % param_name_prefix), trainable=True)) else: beta = None use_fused = ((tf_util.tf_version_tuple() >= (2, 0, 0)) and (param_version >= 2) and (not masked_time) and use_shift and use_std and (use_sample == 0) and (not force_sample)) def _calc_batch_norm_fused(train_flag): '\n :param bool train_flag:\n :return: like data, optional grouped update op or no_op\n :rtype: (tf.Tensor, tf.Operation)\n ' from returnn.util import basic as util x = data.placeholder x_shape = tf_util.get_shape(x) if (data.feature_dim_axis == (data.batch_ndim - 1)): data_format = 'NHWC' x = tf.reshape(x, [(- 1), 1, 1, x_shape[(- 1)]]) else: data_format = 'NCHW' x = tf.reshape(x, [util.prod(x_shape[:data.feature_dim_axis]), x_shape[data.feature_dim_axis], util.prod(x_shape[(data.feature_dim_axis + 1):]), 1]) (bn_, sample_mean_, sample_variance_) = tf_compat.v1.nn.fused_batch_norm(x, scale=gamma, offset=beta, mean=sample_mean, variance=sample_variance, epsilon=epsilon, exponential_avg_factor=momentum, data_format=data_format, is_training=train_flag) bn_ = tf.reshape(bn_, x_shape) update_ops = [] if train_flag: updated_sample_mean = tf_compat.v1.assign(sample_mean, sample_mean_) updated_sample_variance = tf_compat.v1.assign(sample_variance, sample_variance_) update_ops += [updated_sample_mean.op, updated_sample_variance.op] op_ = tf.group(*update_ops) return (bn_, op_) def _calc_batch_norm(train_flag): '\n :param bool train_flag:\n :return: like data, optional grouped update op or no_op\n :rtype: (tf.Tensor, tf.Operation)\n ' update_sample = ((not update_sample_only_in_training) or train_flag) need_mean_var_cur_batch = (update_sample or ((use_sample != 1) and (force_sample or train_flag))) if need_mean_var_cur_batch: data_ = data if masked_time: data_ = data.copy_time_flattened() (mean_cur_batch, variance_cur_batch) = tf_compat.v1.nn.moments(data_.placeholder, axes=data_.get_axes(exclude_feature=True)) mean_cur_batch = tf.reshape(mean_cur_batch, stats_shape) variance_cur_batch = tf.reshape(variance_cur_batch, stats_shape) else: (mean_cur_batch, variance_cur_batch) = (None, None) update_ops = [] (sample_mean_, sample_variance_) = (sample_mean, sample_variance) if update_sample: updated_sample_mean = tf_compat.v1.assign_add(sample_mean, ((mean_cur_batch - sample_mean) * momentum)) updated_sample_variance = tf_compat.v1.assign_add(sample_variance, ((variance_cur_batch - sample_variance) * momentum)) update_ops += [updated_sample_mean.op, updated_sample_variance.op] if (not delay_sample_update): sample_mean_ = updated_sample_mean sample_variance_ = updated_sample_variance if (force_sample or train_flag): if (use_sample == 1): (mean, variance) = (sample_mean_, sample_variance_) elif (use_sample == 0): (mean, variance) = (mean_cur_batch, variance_cur_batch) else: mean = (((1.0 - use_sample) * mean_cur_batch) + (use_sample * sample_mean_)) variance = (((1.0 - use_sample) * variance_cur_batch) + (use_sample * sample_variance_)) else: (mean, variance) = (sample_mean_, sample_variance_) if (param_version >= 2): mean = tf.reshape(mean, data.get_bc_spatial_batch_shape()) variance = tf.reshape(variance, data.get_bc_spatial_batch_shape()) bn_ = ((data.placeholder - mean) * tf_compat.v1.rsqrt(tf_util.optional_add(variance, epsilon))) op_ = tf.group(*update_ops) return (bn_, op_) if use_fused: (bn, op) = self.network.cond_on_train((lambda : _calc_batch_norm_fused(True)), (lambda : _calc_batch_norm_fused(False))) else: (bn, op) = self.network.cond_on_train((lambda : _calc_batch_norm(True)), (lambda : _calc_batch_norm(False))) if tf_compat.executing_eagerly(): assert (op is None) else: if isinstance(op, tf.Tensor): op = op.op assert isinstance(op, tf.Operation) tf_util.add_control_input(op, control_input=bn.op) if op._control_flow_context: with tf.control_dependencies([op]): bn = tf.identity(bn) else: self.network.register_post_control_dependencies([op]) if (not use_fused): if use_std: if (param_version >= 2): gamma = tf.reshape(gamma, data.get_bc_spatial_batch_shape()) bn *= gamma if use_shift: if (param_version >= 2): beta = tf.reshape(beta, data.get_bc_spatial_batch_shape()) bn += beta return bn def get_hidden_state(self): '\n If this is a recurrent layer, this would return the hidden state.\n This is used e.g. for the RnnCellLayer class.\n\n :rtype: tf.Tensor | list[tf.Tensor] | None\n :return: optional tensor(s) with shape (time, batch, dim)\n ' return None def get_last_hidden_state(self, key): '\n If this is a recurrent layer, this would return the last hidden state.\n Otherwise, we return None.\n\n :param int|str|None key: also the special key "*"\n :rtype: tf.Tensor | None\n :return: optional tensor with shape (batch, dim)\n ' if (key in self.rec_vars_outputs): return self.rec_vars_outputs[key] if ((key is None) and (len(self.rec_vars_outputs) == 1)): return list(self.rec_vars_outputs.values())[0] assert (not self.rec_vars_outputs) return None def post_process_final_rec_vars_outputs(self, rec_vars_outputs, seq_len): '\n :param dict[str,tf.Tensor] rec_vars_outputs:\n :param tf.Tensor seq_len: shape (batch,)\n :rtype: dict[str,tf.Tensor]\n ' return rec_vars_outputs @classmethod def get_rec_initial_output(cls, batch_dim, name, output, rec_layer, initial_output=None, **kwargs): '\n If this layer is used inside a recurrent layer, this function specifies the\n output of frame t=-1, if it is needed.\n As arguments, we get the usual layer arguments.\n batch_dim is added because it might be special because of beam search.\n\n Note: This could maybe share code with :func:`RnnCellLayer.get_rec_initial_state`.\n\n :param tf.Tensor batch_dim: including beam size in beam search\n :param str name: layer name\n :param Data output: template\n :param returnn.tf.layers.rec.RecLayer rec_layer:\n :param str|float|int|tf.Tensor|None initial_output:\n :rtype: tf.Tensor\n ' import numpy v = initial_output data = output if isinstance(v, tf.Tensor): return v if isinstance(v, LayerBase): v = v.output.copy_compatible_to(output) if output.beam: v = v.copy_extend_with_beam(output.beam) return v.placeholder if ((v is None) and data.sparse): raise Exception((('You must explicitly provide an initial output value for sparse data %r.' % data) + (" E.g. '%s': {'initial_output': 'zeros'}." % name))) if (v is None): v = 'zeros' shape = [] for dim in data.dims: if dim.is_batch_dim(): shape.append(batch_dim) else: shape.append(dim.get_dim_value()) if isinstance(v, (float, int)): with tf.name_scope(('init_%s_const' % name)): from returnn.tf.util.basic import constant_with_shape return tf.cast(constant_with_shape(v, shape=shape), dtype=data.dtype) assert isinstance(v, str) if (v == 'zeros'): return tf.zeros(shape, dtype=data.dtype, name=('init_%s_zeros' % name)) elif (v == 'ones'): return tf.ones(shape, dtype=data.dtype, name=('init_%s_ones' % name)) elif (v == 'var'): assert (not data.sparse) assert (all(data.shape) and (numpy.prod(data.shape) == data.dim)) with rec_layer.var_creation_scope(): x = tf_compat.v1.get_variable(('init_%s_var' % name), shape=(data.dim,), dtype=data.dtype, initializer=tf.zeros_initializer()) x = tf.reshape(x, [(d or 1) for d in data.batch_shape], name=('init_%s_var_bc' % name)) x = tf.tile(x, [(batch_dim if (i == data.batch_dim_axis) else 1) for i in range(data.batch_ndim)], name=('init_%s_var_batch_bc' % name)) return x elif (v == 'apply(0)'): kwargs = kwargs.copy() sources = kwargs.pop('sources') zeroed_sources = [] for src in sources: assert isinstance(src, LayerBase) src_output = src.output.copy() if (src_output.placeholder is not None): zeroed_src_shape = tf_util.get_shape(src_output.placeholder) zeroed_src_shape = [zeroed_src_shape[i] for i in range(src_output.batch_ndim)] else: zeroed_src_shape = [] for (i, d) in enumerate(src_output.batch_shape): if (d is None): if src_output.has_dynamic_size(i): d = tf.reduce_max(src_output.get_dynamic_size(i)) if (d is None): d = 1 zeroed_src_shape.append(d) if (src_output.batch_dim_axis is not None): zeroed_src_shape[src_output.batch_dim_axis] = batch_dim if ((not src_output.beam) and output.beam): src_output = src_output.copy_extend_with_beam(output.beam) src_output.placeholder = tf.zeros(zeroed_src_shape, dtype=src_output.dtype, name=('init_%s_zeros' % tf_util.get_valid_scope_name_from_str(src.name))) src_output.name += '_zeroed' src_output.sanity_check() if rec_layer.network.get_config().bool('debug_runtime_sanity_checks', False): with tf.name_scope(tf_util.get_valid_scope_name_from_str((src.name + '_zeroed'))): src_output.placeholder = src_output.get_placeholder_with_runtime_sanity_checks() zeroed_src = InternalLayer(name=('%s_zeroed' % src.name), output=src_output, network=src.network) zeroed_sources.append(zeroed_src) layer = cls(name=name, output=output.copy(), sources=list(zeroed_sources), **kwargs) out = layer.output.placeholder out.set_shape(data.batch_shape) return out else: raise Exception(('invalid initial output type %r for sub-layer %r' % (v, name))) @classmethod def get_rec_initial_extra_outputs(cls, batch_dim, rec_layer, **kwargs): '\n :param tf.Tensor batch_dim: for this layer, might be with beam\n :param returnn.tf.layers.rec.RecLayer|LayerBase|None rec_layer: for the scope\n :rtype: dict[str,tf.Tensor]\n ' return {} @classmethod def get_rec_initial_extra_outputs_shape_invariants(cls, rec_layer, **kwargs): '\n :param returnn.tf.layers.rec.RecLayer|LayerBase|None rec_layer: for the scope\n :return: optional shapes for the tensors by get_rec_initial_extra_outputs\n :rtype: dict[str,tf.TensorShape]\n ' return {}
class InternalLayer(LayerBase): '\n This is not supposed to be used by the user.\n It is used by some code to construct a wrapper layer or so.\n ' def __init__(self, output: Data, debug_type_name: Optional[str]=None, **kwargs): '\n :param output:\n :param debug_type_name: just for repr\n ' output = self.fixup_out_data(output=output, **kwargs) super(InternalLayer, self).__init__(output=output, **kwargs) self.debug_type_name = debug_type_name def __repr__(self): return ('<%s%s %s%r out_type=%s>' % (self.__class__.__name__, (f'({self.debug_type_name})' if self.debug_type_name else ''), self.network.get_absolute_name_prefix(), self.name, (self.output.get_description(with_name=False) if self.output else None))) @classmethod def transform_config_dict(cls, d, network, get_layer): '\n :param dict[str] d:\n :param returnn.tf.network.TFNetwork network:\n :param get_layer:\n ' d.setdefault('from', []) super(InternalLayer, cls).transform_config_dict(d, network=network, get_layer=get_layer)