code stringlengths 17 6.64M |
|---|
def mod(a: Tensor, b: Tensor) -> Tensor:
'mod'
return combine(a, 'mod', b)
|
def pow(a: Tensor, b: Tensor) -> Tensor:
'pow'
return combine(a, 'pow', b)
|
def squared_difference(a: Tensor, b: Tensor) -> Tensor:
'squared_difference'
return combine(a, 'squared_difference', b)
|
def logical_and(a: Tensor, b: Tensor) -> Tensor:
'logical_and'
return combine(a, 'logical_and', b)
|
def logical_or(a: Tensor, b: Tensor) -> Tensor:
'logical_or'
return combine(a, 'logical_or', b)
|
def logical_not(a: Tensor) -> Tensor:
'logical_not'
return a._raw_backend.activation(a, 'logical_not')
|
@overload
def opt_logical_or(a: bool, b: bool) -> bool:
'logical or'
|
def maximum(a: Tensor, b: Union[(Tensor, _RawTensorTypes)], *other_tensors) -> Tensor:
'maximum'
if (not other_tensors):
return combine(a, 'maximum', b)
res = combine(a, 'maximum', b)
for t in other_tensors:
res = combine(res, 'maximum', t)
return res
|
def minimum(a: Tensor, b: Union[(Tensor, _RawTensorTypes)], *other_tensors) -> Tensor:
'minimum'
if (not other_tensors):
return combine(a, 'minimum', b)
res = combine(a, 'minimum', b)
for t in other_tensors:
res = combine(res, 'minimum', t)
return res
|
def clip_by_value(x: Tensor, clip_value_min: Union[(Tensor, _RawTensorTypes)], clip_value_max: Union[(Tensor, _RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor:
'clip by value'
return x._raw_backend.clip_by_value(x, clip_value_min, clip_value_max, allow_broadcast_all_sources=allow_broadcast_all_sources)
|
def identity(x: Tensor) -> Tensor:
'\n Identity function. Just to have one canonical. Does nothing, returns the input.\n '
return x
|
def exp(a: Tensor) -> Tensor:
'exp'
return a._raw_backend.activation(a, 'exp')
|
def expm1(a: Tensor) -> Tensor:
'expm1'
return a._raw_backend.activation(a, 'expm1')
|
def log(a: Tensor) -> Tensor:
'log'
return a._raw_backend.activation(a, 'log')
|
def safe_log(a: Tensor, *, eps: Optional[float]=None) -> Tensor:
'safe_log'
if (eps is None):
eps = {'float16': 6e-08, 'bfloat16': 9.1835e-41, 'float32': 1.4013e-45, 'float64': 5e-324}[a.dtype]
return a._raw_backend.safe_log(a, eps=eps)
|
def log1p(a: Tensor) -> Tensor:
'log1p'
return a._raw_backend.activation(a, 'log1p')
|
def sqrt(a: Tensor) -> Tensor:
'sqrt'
return a._raw_backend.activation(a, 'sqrt')
|
def rsqrt(a: Tensor) -> Tensor:
'rsqrt'
return a._raw_backend.activation(a, 'rsqrt')
|
def square(a: Tensor) -> Tensor:
'square'
return a._raw_backend.activation(a, 'square')
|
def abs(a: Tensor) -> Tensor:
'abs'
return a._raw_backend.activation(a, 'abs')
|
def tanh(a: Tensor) -> Tensor:
'tanh'
return a._raw_backend.activation(a, 'tanh')
|
def sigmoid(a: Tensor) -> Tensor:
'sigmoid'
return a._raw_backend.activation(a, 'sigmoid')
|
def log_sigmoid(a: Tensor) -> Tensor:
'log_sigmoid'
return a._raw_backend.activation(a, 'log_sigmoid')
|
def sin(a: Tensor) -> Tensor:
'sin'
return a._raw_backend.activation(a, 'sin')
|
def cos(a: Tensor) -> Tensor:
'cos'
return a._raw_backend.activation(a, 'cos')
|
def ceil(a: Tensor) -> Tensor:
'ceil'
return a._raw_backend.activation(a, 'ceil')
|
def floor(a: Tensor) -> Tensor:
'floor'
return a._raw_backend.activation(a, 'floor')
|
def round(a: Tensor) -> Tensor:
'round'
return a._raw_backend.activation(a, 'round')
|
def relu(a: Tensor) -> Tensor:
'relu'
return a._raw_backend.activation(a, 'relu')
|
def elu(a: Tensor) -> Tensor:
'elu'
return a._raw_backend.activation(a, 'elu')
|
def selu(a: Tensor) -> Tensor:
'selu'
return a._raw_backend.activation(a, 'selu')
|
def silu(a: Tensor) -> Tensor:
'silu / swish.\n\n The SiLU activation function was introduced in "Gaussian Error Linear Units\n (GELUs)" [Hendrycks et al. 2016](https://arxiv.org/abs/1606.08415) and\n "Sigmoid-Weighted Linear Units for Neural Network Function Approximation in\n Reinforcement Learning"\n [Elfwing et al. 2017](https://arxiv.org/abs/1702.03118) and was independently\n discovered (and called swish) in "Searching for Activation Functions"\n [Ramachandran et al. 2017](https://arxiv.org/abs/1710.05941)\n '
return a._raw_backend.activation(a, 'silu')
|
def softmax(a: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor:
'softmax'
return a._raw_backend.softmax(a, axis=axis, use_mask=use_mask)
|
def log_softmax(a: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor:
'log_softmax'
return a._raw_backend.log_softmax(a, axis=axis, use_mask=use_mask)
|
def gating(x: Tensor, *, axis: Optional[Dim]=None, gate_func=sigmoid, act_func=identity, out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Like in gated linear unit (GLU): https://arxiv.org/abs/1612.08083\n GLU refers also to the linear transformation before the gating -- this is why this function is not called GLU.\n GLU uses gate_func=sigmoid and act_func=identity (the defaults here).\n\n There are other potential gating variants you might be interested at.\n See for example: https://arxiv.org/abs/2002.05202, e.g. gate_func=gelu.\n '
if (axis is None):
assert (x.feature_dim is not None), f'gating {x}: need tensor with feature dim set, or explicit `axis`'
axis = x.feature_dim
assert (axis.is_static() and ((axis.dimension % 2) == 0)), f'gating {x}: need static dim, and even, got {axis}'
if (not out_dim):
out_dim = axis.div_left(2)
(a, b) = rf.split(x, axis=axis, out_dims=[out_dim, out_dim])
return ((act_func(a) * gate_func(b)), out_dim)
|
def matmul(a: Tensor[T], b: Tensor[T], *, reduce: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
"\n This performs a batched matmul of two sources a and b\n (non-batched matmul and dot product are special cases).\n The underlying operation is a batched matmul (shared..., I, J) * (shared..., J, K) -> (shared..., I, K).\n The inputs a and b are transformed internally into the required shapes in the following way:\n The axis J is specified via the Dim given as 'reduce'. If multiple reduce Dims are given the corresponding axes\n are merged into one before the matmul via a reshape. All other matching Dims in a and b will be treated as\n batch dimensions ('shared...'). Dims unique to a and b define the axes I and K, respectively. (Multiple or no\n unique axes in a and b are supported too.)\n\n Depending on which Dims exist in a, b and reduce this dot operation can be used to compute scaling, scalar\n product, outer product, matrix-vector multiplication, matrix-matrix multiplication etc. (all possibly batched).\n\n :param a:\n :param b:\n :param reduce: Dims over which to perform the product, have to be present in both a and b\n :param use_mask: If the reduction is over dynamic axes, to get the correct sum reduction,\n we need to apply masking to one of the inputs. This is done automatically.\n By disabling this flag, this would be disabled.\n :return: result of dot product, Dim order: common axes as sorted in a, unique axes of a (in order),\n unique axes of b (in order)\n "
return a._raw_backend.matmul(a=a, b=b, reduce=reduce, use_mask=use_mask)
|
class Module():
'\n This can represent a subnetwork in RETURNN.\n\n You can write PyTorch-like code here, like::\n\n class MyModule(rf.Module):\n\n def __init__(self, dim: Dim, activation=tanh):\n super().__init__()\n self.layer_norm = rf.LayerNorm(dim)\n self.linear = rf.Linear(dim, dim)\n self.activation = activation\n\n def __call__(self, x: Tensor) -> Tensor:\n x_ = x\n x = self.layer_norm(x)\n x = self.linear(x)\n x = self.activation(x)\n return x_ + x\n\n A module (here, just like in PyTorch or Keras)\n has params, but getting some output for some input\n requires an additional `forward` or `__call__` call,\n which can be called multiple times.\n Every such call would then share the same module parameters.\n\n The :func:`__init__` would usually get module-level arguments\n which describe the parameters.\n As a module might be called multiple times,\n any input-specific arguments such as spatial dims\n are usually arguments of :func:`__call__`.\n Other arguments which might vary between calls\n would also be arguments of :func:`__call__`\n such as epsilon\n although there are no strict rules.\n '
def __init__(self):
'\n By convention, any options to the module are passed to __init__,\n and potential changing inputs (other tensors)\n are passed to :func:`__call__`.\n '
def __repr__(self):
return f'<{self.__class__.__name__}>'
def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> Optional[rf.State]:
'\n :return: default initial state, to be used if the module has recurrent (hidden) state.\n When a module has recurrent state,\n the convention is to return a tuple with instance :class:`State` as the last item,\n and to accept the ``state`` argument with a :class:`State` with the same nested structure.\n This can be a nested structure and should match the structure of the ``state`` argument and returned value.\n '
state = rf.State()
for (key, mod) in self.named_children():
sub_state = mod.default_initial_state(batch_dims=batch_dims)
if sub_state:
state[key] = sub_state
if state:
return state
return None
def get_default_name(self) -> str:
'\n Get a default layer name (used when we do not have a Module attribute pointing to this).\n This is used by :class:`NameCtx` for the RETURNN layer naming\n (but only when the RETURNN layer name is not implied by other the module attribute hierarchy).\n '
name = self.__class__.__name__
if name.startswith('_'):
name = name[1:]
if name[:1].isupper():
from returnn.util.basic import camel_case_to_snake_case
name = camel_case_to_snake_case(name)
return name
def __call__(self, *args, **kwargs) -> Union[(Tensor, Tuple[(Tensor, rf.State)], Any)]:
"\n Main module call.\n\n Note that there is nothing really specific about this method.\n Your module can have other methods as well,\n and you don't necessarily need to define this.\n Only certain other functions or modules like Sequential make use of it.\n "
raise OptionalNotImplementedError
def get_deep(self, target: str) -> Any:
'\n Returns the deep attrib given by ``target`` if it exists, otherwise throws an error.\n '
if (target == ''):
return self
atoms: List[str] = target.split('.')
mod: Module = self
for item in atoms[:(- 1)]:
if (not hasattr(mod, item)):
raise AttributeError(f'{mod} has no attribute `{item}`')
mod = getattr(mod, item)
if (not isinstance(mod, Module)):
raise AttributeError(f'`{item}` is not an rf.Module')
return getattr(mod, atoms[(- 1)])
def set_deep(self, target: str, value: Any) -> None:
'\n Sets the deep attrib given by ``target`` to ``value``.\n '
if (target == ''):
raise AttributeError('Cannot set root module')
if ('.' in target):
(prefix, target) = target.rsplit('.', 2)
mod = self.get_deep(prefix)
if (not isinstance(mod, Module)):
raise AttributeError(f'{self}: `{prefix}` is not an rf.Module')
else:
mod = self
setattr(mod, target, value)
def children(self) -> Iterator[rf.Module]:
'\n Get all immediate children modules, excluding self.\n '
return self.modules(recurse=False, include_self=False)
def named_children(self) -> Iterator[Tuple[(str, rf.Module)]]:
'\n Get all immediate children modules, excluding self.\n '
return self.named_modules(recurse=False, include_self=False)
def modules(self, *, recurse: bool=True, include_self: bool=True) -> Iterator[rf.Module]:
'\n Get all children modules, optionally recursively, maybe including self.\n '
for (name, child) in self.named_modules(recurse=recurse, include_self=include_self):
(yield child)
def named_modules(self, *, recurse: bool=True, include_self: bool=True, memo: Optional[Set[RefIdEq[rf.Module]]]=None, prefix: str='') -> Iterator[Tuple[(str, rf.Module)]]:
'\n Get all children modules (including self iff include_self=True (default)), optionally recursively.\n '
if (memo is None):
memo = set()
if (self in memo):
return
memo.add(RefIdEq(self))
if include_self:
(yield (prefix, self))
queue = [(prefix, self)]
while queue:
(prefix, mod) = queue.pop(0)
for (name, module) in vars(mod).items():
if (not isinstance(module, Module)):
continue
if (RefIdEq(module) in memo):
continue
sub_prefix = ((prefix + ('.' if (prefix and (not prefix.endswith('.'))) else '')) + name)
memo.add(RefIdEq(module))
(yield (sub_prefix, module))
if recurse:
queue.append((sub_prefix, module))
def named_parameters(self, *, recurse: bool=True) -> Iterator[Tuple[(str, rf.Parameter)]]:
'\n Get all children parameters, together with their names.\n\n With recurse=True (default), this iterates over all children modules\n and iterates through their parameters as well.\n '
memo: Set[RefIdEq[Tensor]] = set()
for (prefix, module) in (self.named_modules() if recurse else [('', self)]):
for (key, value) in vars(module).items():
if (isinstance(value, rf.Parameter) and (RefIdEq(value) not in memo)):
sub_prefix = ((prefix + ('.' if prefix else '')) + key)
memo.add(RefIdEq(value))
(yield (sub_prefix, value))
def parameters(self, *, recurse: bool=True) -> Iterator[rf.Parameter]:
'\n Get all children parameters. Also see :func:`named_parameters` for some more documentation.\n '
for (name, param) in self.named_parameters(recurse=recurse):
(yield param)
@property
def has_parameters(self):
'\n Whether this module has variables\n '
for (_, _) in self.named_parameters(recurse=True):
return True
return False
def apply(self: T, fn: Callable[([rf.Module], None)]) -> T:
'\n Applies the function ``fn`` to all children modules and self.\n\n :return: self\n '
for child in self.children():
child.apply(fn)
fn(self)
return self
|
class Functional(Module):
'\n Used for functions (pure functional, i.e. not methods of another module)\n and via :class:`ModuleList` to wrap up any functions or lambdas as modules.\n\n (This is often not necessary, but sometimes useful.)\n '
def __init__(self, func):
super().__init__()
assert callable(func)
self.func = func
def __repr__(self):
return f'{self.__class__.__name__}({self.func.__qualname__})'
def get_default_name(self) -> str:
'default name'
import re
name = self.func.__qualname__
assert isinstance(name, str)
if name.startswith('Tensor.__'):
m = re.match('^Tensor\\.__(.*)__$', name)
if m:
return m.group(1)
if ('.<locals>.' in name):
name = name.replace('.<locals>.', '.')
return name
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
|
def moments(x: Tensor, axis: Union[(Dim, Sequence[Dim])]) -> Tuple[(Tensor, Tensor)]:
'\n :param x: input\n :param axis: the axis to be reduced, to calculate statistics over\n :return: mean, variance. it has the same shape as the input with the axis removed\n '
mean = rf.reduce_mean(x, axis=axis)
variance = rf.reduce_mean(rf.squared_difference(x, rf.stop_gradient(mean)), axis=axis)
return (mean, variance)
|
class LayerNorm(rf.Module):
'\n `Layer normalization <https://arxiv.org/abs/1607.06450>`__.\n\n Note that we *just* normalize over the feature-dim axis here.\n This is consistent to the default behavior of :class:`tf.keras.layers.LayerNormalization`\n and also how it is commonly used in many models, including Transformer.\n\n However, there are cases where it would be common to normalize over all axes except batch-dim,\n or all axes except batch and time.\n For a more generic variant, see :func:`norm`.\n '
def __init__(self, in_dim: Union[(rf.Dim, Sequence[rf.Dim])], *, eps: float=1e-06):
super().__init__()
self.in_dim = in_dim
self.eps = eps
self.scale = rf.Parameter(([self.in_dim] if isinstance(self.in_dim, rf.Dim) else self.in_dim))
self.scale.initial = 1.0
self.bias = rf.Parameter(self.scale.dims)
self.bias.initial = 0.0
def __call__(self, x: Tensor) -> Tensor:
(mean, variance) = rf.moments(x, axis=self.in_dim)
norm_x = ((x - mean) * rf.rsqrt((variance + self.eps)))
return ((norm_x * self.scale) + self.bias)
|
class BatchNorm(rf.Module):
'\n Batch normalization. https://arxiv.org/abs/1502.03167\n\n Note that the default arguments differ from corresponding batch norm in RETURNN.\n See here for discussion on defaults: https://github.com/rwth-i6/returnn/issues/522\n\n We calculate statistics over all axes except the given in_dim.\n I.e. all other axes are reduced for the statistics.\n\n To compensate the normalization, there are learnable parameters gamma and beta\n (optional, used when option `affine` is True).\n\n The usual behavior depends on whether this is used in training or evaluation,\n although this often configurable in other frameworks.\n The usual behavior, in training::\n\n # Using statistics from current batch.\n mean_cur_batch, variance_cur_batch = moments(source, reduce_dims)\n y = (x - mean_cur_batch) / sqrt(variance_cur_batch + epsilon)\n y = gamma * y + beta\n\n # Updating running statistics for later use.\n mean = (1 - momentum) * mean + momentum * mean_cur_batch\n variance = (1 - momentum) * variance + momentum * variance_cur_batch\n\n The usual behavior, not in training (i.e. in evaluation)::\n\n # Using collected statistics. Not using statistics from current batch.\n y = (x - mean) / sqrt(variance + epsilon)\n y = gamma * y + beta\n\n '
def __init__(self, in_dim: Dim, *, affine: bool=True, momentum: float=0.1, eps: float=0.001, track_running_stats: bool=True, use_mask: Optional[bool]=None):
'\n :param in_dim: the feature dimension of the input\n :param affine: whether to use learnable parameters gamma and beta\n :param momentum: momentum for the running mean and variance\n :param eps: epsilon for the variance\n :param track_running_stats:\n If True, uses statistics of the current batch for normalization during training,\n and the tracked statistics (running mean and variance) during evaluation.\n If False, uses statistics of the current batch for normalization during both training and evaluation.\n :param use_mask: whether to use a mask for dynamic spatial dims.\n This must be specified if the input has dynamic spatial dims.\n True would use the correct masking then. However, that is inconsistent to all other frameworks\n which ignore the masking, and also slower, and the fused op would not be used.\n False would be consistent to all other frameworks,\n and potentially allows for the use of an efficient fused op internally.\n '
super().__init__()
assert isinstance(in_dim, Dim)
self.in_dim = in_dim
self.use_mask = use_mask
self.momentum = momentum
self.eps = eps
if track_running_stats:
self.running_mean = rf.Parameter([in_dim], auxiliary=True)
self.running_mean.initial = 0.0
self.running_variance = rf.Parameter([in_dim], auxiliary=True)
self.running_variance.initial = 1.0
else:
self.running_mean = None
self.running_variance = None
self.affine = affine
self.gamma = None
self.beta = None
if self.affine:
self.gamma = rf.Parameter([in_dim])
self.gamma.initial = 1.0
self.beta = rf.Parameter([in_dim])
self.beta.initial = 0.0
def __call__(self, source: Tensor) -> Tensor:
assert (self.in_dim in source.dims)
if any((d.need_masking() for d in source.dims if (d != self.in_dim))):
if (self.use_mask is None):
raise ValueError(f'{self}: use_mask must be specified if the input {source} has any dynamic spatial dims')
use_mask = self.use_mask
else:
use_mask = False
if use_mask:
use_current_batch_stats = ((self.running_mean is None) or rf.get_run_ctx().train_flag)
update_running_stats = ((self.running_mean is not None) and rf.get_run_ctx().train_flag)
need_current_batch_stats = rf.opt_logical_or(use_current_batch_stats, update_running_stats)
(mean_cur_batch, variance_cur_batch) = rf.cond(need_current_batch_stats, (lambda : rf.moments(source, axis=[d for d in source.dims if (d != self.in_dim)])), (lambda : (self.running_mean, self.running_variance)))
def _update_running_stats():
self.running_mean.assign_add(((mean_cur_batch - self.running_mean) * self.momentum))
self.running_variance.assign_add(((variance_cur_batch - self.running_variance) * self.momentum))
rf.cond(update_running_stats, _update_running_stats, (lambda : None))
(mean, variance) = rf.cond(use_current_batch_stats, (lambda : (mean_cur_batch, variance_cur_batch)), (lambda : (self.running_mean, self.running_variance)))
bn = ((source - mean) * rf.rsqrt((variance + self.eps)))
if (self.gamma is not None):
bn *= self.gamma
if (self.beta is not None):
bn += self.beta
return bn
return source._raw_backend.batch_norm(source=source, in_dim=self.in_dim, use_mask=use_mask, affine=self.affine, momentum=self.momentum, epsilon=self.eps, running_mean=self.running_mean, running_variance=self.running_variance, gamma=self.gamma, beta=self.beta)
|
def normalize(a: Tensor, *, axis: Union[(Dim, Sequence[Dim])], epsilon: float=1e-06) -> Tensor:
'\n Mean- and variance-normalize some input in the given input dimension(s),\n such that the resulting tensor has mean 0 and variance 1.\n\n If you want that this can be shifted and scaled again,\n you need additional parameters, cf. :class:`Normalize`.\n\n :param a: input\n :param axis: axis over which the mean and variance are computed\n :param epsilon: epsilon for numerical stability\n :return: (a - mean) / sqrt(variance + epsilon)\n '
(mean, variance) = rf.moments(a, axis=axis)
return ((a - mean) * rf.rsqrt((variance + epsilon)))
|
class Normalize(rf.Module):
'\n :func:`normalize` with additional scale and bias\n '
def __init__(self, *, param_dims: Union[(Dim, Sequence[Dim])], epsilon: float=1e-06, scale: bool=True, bias: bool=True):
'\n :param param_dims: shape of the scale and bias parameters\n :param epsilon: epsilon for numerical stability\n :param scale: whether to include a trainable scale\n :param bias: whether to include a trainable bias\n '
super(Normalize, self).__init__()
self.epsilon = epsilon
if isinstance(param_dims, Dim):
param_dims = [param_dims]
self.scale = None
if scale:
self.scale = rf.Parameter(dims=param_dims)
self.scale.initial = 1.0
self.bias = (rf.Parameter(dims=param_dims) if bias else None)
def __call__(self, a: Tensor, *, axis: Union[(Dim, Sequence[Dim])]):
norm = normalize(a, axis=axis, epsilon=self.epsilon)
if (self.scale is not None):
norm = (self.scale * norm)
if (self.bias is not None):
norm = (norm + self.bias)
return norm
|
class Parameter(Tensor[T]):
'\n This represents a (potential trainable) parameter,\n aka ``tf.Variable`` in TensorFlow,\n wrapping to ``VariableLayer`` in RETURNN.\n '
def __init__(self, dims: Sequence[Dim], dtype: Optional[str]=None, *, sparse_dim: Optional[Dim]=None, trainable: Optional[bool]=None, auxiliary: bool=False, non_critical_for_restore: bool=False, weight_decay: Optional[float]=0.0, initial: Optional[rf.init.ParamInitType]=None, raw_tensor: Optional[T]=None, device: Optional[str]=None):
'\n :param dims:\n :param dtype:\n :param sparse_dim:\n :param trainable: if True, and optimizer would do updates to this parameter in training mode\n :param auxiliary: if True, this indicates that this parameter should not be transformed by transformations\n such as weight normalization. One example are running statistics, as used for batch normalization.\n This usually implies that the parameter is not trainable, i.e. not to be updated by the optimizer,\n but usually has some custom update.\n This flag is not passed on to RETURNN but just used here for returnn-common logic.\n :param non_critical_for_restore: if True, this parameter is not critical for restoring a model.\n :param weight_decay:\n :param initial:\n :param raw_tensor:\n :param device:\n '
if (not all((isinstance(dim, Dim) for dim in dims))):
raise TypeError(f'shape {dims} must be a sequence of Dim')
if (not all((isinstance(dim.dimension, int) for dim in dims))):
raise ValueError(f'shape {dims} must be static')
if (len(dims) != len(set(((d, d.match_priority) for d in dims)))):
raise ValueError(f'shape {dims} dims must be unique')
super(Parameter, self).__init__('parameter', dims=dims, dtype=(dtype or (rf.get_default_float_dtype() if (not sparse_dim) else rf.get_default_array_index_dtype())), sparse_dim=sparse_dim)
if (raw_tensor is not None):
self.raw_tensor = raw_tensor
else:
self.raw_tensor = _global_backend.create_parameter_raw(self, device=device)
self._trainable = None
self._auxiliary = auxiliary
self._non_critical_for_restore = non_critical_for_restore
self._weight_decay = weight_decay
self._initial = None
self.trainable = trainable
self.initial = initial
def __copy__(self):
res = type(self)(dims=self.dims, dtype=self.dtype, trainable=self.trainable, auxiliary=self.auxiliary, non_critical_for_restore=self.non_critical_for_restore, weight_decay=self.weight_decay)
res.initial = self.initial
return res
def __deepcopy__(self, memo=None):
from copy import deepcopy
res = self.__copy__()
if isinstance(self.initial, rf.init.ParamInit):
res.initial = deepcopy(self.initial, memo=memo)
else:
res.initial = self.initial
return res
@property
def initial(self) -> Optional[rf.init.ParamInitType]:
'initial value of the parameter'
return self._initial
@initial.setter
def initial(self, value: Optional[rf.init.ParamInitType]):
self._initial = value
if isinstance(value, rf.init.ParamInit):
value = value(dims=self.dims, dtype=self.dtype, device=self.device)
self._raw_backend.set_parameter_initial_value(self, value)
def assign(self, value: Union[(Tensor, rf.RawTensorTypes)]):
'\n Assign new value to this parameter.\n This will also update the allocated raw tensor inplace.\n\n For graph-based backends, handling the control flow is up to the backend,\n e.g.~making sure it is being executed in the right order,\n in the right control flow context, and at all.\n There is no op or anything like that returned here which the user needs to take care of.\n So the user can think of it just as imperative eager-style code.\n '
self._raw_backend.parameter_assign(self, rf.convert_to_tensor(value, _backend=self._raw_backend, device=self.device), op='assign')
def assign_add(self, value: Union[(Tensor, rf.RawTensorTypes)]):
'\n Add value to this parameter.\n This will also update the raw tensor.\n See :func:`assign`.\n '
self._raw_backend.parameter_assign(self, rf.convert_to_tensor(value, _backend=self._raw_backend, device=self.device), op='add')
def assign_key(self, axis: Union[(Dim, Sequence[Dim])], key: rf.ItemKeyType, key_dim: Optional[Union[(Dim, Sequence[Optional[Dim]])]], value: Union[(Tensor, rf.RawTensorTypes)]):
'\n Basically var[key] = value, if axis is the first axis, or otherwise accordingly.\n Note that the __setitem__ API is not supported because it depends on the order of axes,\n but this here is the equivalent function.\n See :func:`assign`.\n '
self._raw_backend.parameter_assign_key(self, axis=axis, key=key, key_dim=key_dim, value=rf.convert_to_tensor(value, _backend=self._raw_backend, device=self.device), op='assign')
@property
def weight_decay(self) -> float:
'\n Weight decay, which is equivalent to L2 loss on the parameters for SGD.\n On RETURNN side, whether this is handled separately or is part of the main loss,\n can be controlled via the ``decouple_constraints`` config option.\n https://github.com/rwth-i6/returnn_common/issues/59#issuecomment-1073913421\n '
return (self._weight_decay or 0.0)
@weight_decay.setter
def weight_decay(self, value: Optional[float]):
self._weight_decay = value
@property
def trainable(self) -> Optional[bool]:
'trainable'
return self._trainable
@trainable.setter
def trainable(self, trainable: Optional[bool]):
self._trainable = trainable
if (trainable is None):
if self.auxiliary:
trainable = False
elif self.dtype.startswith('int'):
trainable = False
else:
trainable = True
self._raw_backend.set_parameter_trainable(self, trainable)
@property
def auxiliary(self) -> bool:
'auxiliary'
return self._auxiliary
@auxiliary.setter
def auxiliary(self, value: bool):
self._auxiliary = value
@property
def non_critical_for_restore(self) -> bool:
'non_critical_for_restore'
return self._non_critical_for_restore
@non_critical_for_restore.setter
def non_critical_for_restore(self, value: bool):
self._non_critical_for_restore = value
|
def set_random_seed(seed: int):
'\n Call this at the beginning of the program\n (after the RF backend was selected),\n or when the model and computation graph is supposed to be reinitialized.\n\n This initializes the random state of the backend and also the step-based random state.\n\n This is *not* expected to be called after each epoch or step.\n\n :param seed: should depend on epoch or step\n '
_global_backend.set_random_seed(seed)
global _step_rnd_seed
_step_rnd_seed = (((seed * 5393) + 1187) % (2 ** 31))
reset_step_random_state()
|
def get_random_state() -> Dict[(str, bytes)]:
'\n :return: current random state, for serialization, to be able to restore it later\n '
return _global_backend.get_random_state()
|
def set_random_state(state: Dict[(str, bytes)]):
'\n Recovers the random state.\n\n There are many potential cases where we cannot recover the state\n (e.g. different backend version, different hardware, ...),\n In this case, a run without interruption is not the same as a run with interruption.\n\n We still assume that :func:`set_random_seed` was called before in any case.\n\n :param state: as returned by :func:`get_random_state`\n '
_global_backend.set_random_state(state)
|
def reset_step_random_state():
'\n When ``static=True`` is used in :func:`random`,\n the random state is reset to the beginning of the step.\n So this should be called in the beginning of each step.\n Also see the module docstring.\n '
_step_rnd.seed(_step_rnd_seed)
|
def get_static_step_based_seed(*, size=None) -> Union[(int, numpy.ndarray)]:
'\n :return: from the static step-based random state, get a seed\n '
return _step_rnd.randint((2 ** 31), size=size)
|
def random(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, distribution: str, mean: Optional[Union[(int, float, Tensor)]]=None, stddev: Optional[Union[(int, float, Tensor)]]=None, bound: Optional[Union[(int, float, Tensor)]]=None, minval: Optional[Union[(int, float, Tensor)]]=None, maxval: Optional[Union[(int, float, Tensor)]]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None) -> Tensor:
'\n Generates random numbers from uniform or normal or truncated normal distribution.\n\n There will be no gradients to mean, stddev, bound, minval, maxval!\n\n In case of TensorFlow:\n This uses the TensorFlow stateless random ops internally, i.e. all the state handling is explicit.\n The state var can be explicitly provided and initialized via :class:`RandomStateInitLayer`,\n or when not provided it will be automatically created.\n\n There are two possible distinct use cases:\n\n - For any randomness in the model, e.g. dropout. So each ``session.run`` step will produce a new random number\n and advance the random state.\n - To initialize parameters via the config, using :class:`VariableLayer` with the ``init_by_layer`` option.\n This will only be called once when initializing the parameters.\n For this use case, we do not want to keep a random state var.\n You can just pass ``static=False``.\n Alternatively you could also pass the output of a :class:`RandomStateInitLayer` as ``state``.\n\n :param dims:\n :param dtype:\n :param device:\n :param sparse_dim:\n :param feature_dim:\n :param str distribution: "uniform", "normal" or "truncated_normal"\n :param int|float|Tensor|None mean:\n :param int|float|Tensor|None stddev:\n :param int|float|Tensor|None bound: for uniform, defining the range [-bound, bound)\n :param int|float|Tensor|None minval: for uniform\n :param int|float|Tensor|None maxval: for uniform\n :param int|list[int]|numpy.ndarray|None seed: If not given, uses self.network.random.randint,\n i.e. then it is controlled by the global seed setting, and every layer would get its own seed.\n If you specify it explicitly, make sure every :class:`RandomLayer` uses a different seed,\n otherwise you would get the same random numbers everywhere.\n :param str|tf.random.Algorithm|None algorithm: see :class:`RandomStateInitLayer`\n :param Tensor|None explicit_state: You can pass the state explicitly here.\n If not given, will be created automatically, and updated automatically.\n You could pass a :class:`VariableLayer` with initial value via :class:`RandomStateInitLayer`,\n or directly a :class:`RandomStateInitLayer`.\n If auto_update_state is True, it must be a variable,\n and every time a new random number is created, this variable is updated.\n Otherwise (default), it will not be updated automatically.\n :param bool|None auto_update_state: only used when you pass an explicit state\n :param bool|None static: if no state at all should be used. it just relies on the seed then.\n :param out: if given, will directly write into it, if possible by backend\n :return: random values\n '
if (explicit_state is None):
if (static is None):
static = False
assert isinstance(static, bool)
if static:
if (seed is None):
seed = get_static_step_based_seed()
if (dtype is None):
dtype = rf.get_default_float_dtype()
return _global_backend.random(dims=dims, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim, distribution=distribution, mean=mean, stddev=stddev, bound=bound, minval=minval, maxval=maxval, seed=seed, algorithm=algorithm, explicit_state=explicit_state, auto_update_state=auto_update_state, static=static, out=out)
|
def random_uniform(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, minval: Union[(int, float, Tensor)]=0, maxval: Union[(int, float, Tensor)]=1, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None):
'\n See :func:`random`. :func:`random` with ``distribution="uniform"``.\n '
return random(dims=dims, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim, distribution='uniform', minval=minval, maxval=maxval, seed=seed, algorithm=algorithm, explicit_state=explicit_state, auto_update_state=auto_update_state, static=static, out=out)
|
def random_normal(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, mean: Optional[Union[(int, float, Tensor)]]=0.0, stddev: Optional[Union[(int, float, Tensor)]]=1.0, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None):
'\n See :func:`random`. :func:`random` with ``distribution="normal"``.\n '
return random(dims=dims, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim, distribution='normal', mean=mean, stddev=stddev, seed=seed, algorithm=algorithm, explicit_state=explicit_state, auto_update_state=auto_update_state, static=static, out=out)
|
def random_truncated_normal(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, mean: Optional[Union[(int, float, Tensor)]]=0.0, stddev: Optional[Union[(int, float, Tensor)]]=1.0, minval: Union[(int, float, Tensor)]=None, maxval: Union[(int, float, Tensor)]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None):
'\n See :func:`random`. :func:`random` with ``distribution="truncated_normal"``.\n '
return random(dims=dims, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim, distribution='truncated_normal', mean=mean, stddev=stddev, minval=minval, maxval=maxval, seed=seed, algorithm=algorithm, explicit_state=explicit_state, auto_update_state=auto_update_state, static=static, out=out)
|
class LSTM(rf.Module):
'\n LSTM module.\n '
def __init__(self, in_dim: Dim, out_dim: Dim, *, with_bias: bool=True):
'\n Code to initialize the LSTM module.\n '
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.ff_weight = rf.Parameter(((4 * self.out_dim), self.in_dim))
self.ff_weight.initial = rf.init.Glorot()
self.rec_weight = rf.Parameter(((4 * self.out_dim), self.out_dim))
self.rec_weight.initial = rf.init.Glorot()
self.bias = None
if with_bias:
self.bias = rf.Parameter(((4 * self.out_dim),))
self.bias.initial = 0.0
def __call__(self, source: Tensor, *, state: LstmState, spatial_dim: Dim) -> Tuple[(Tensor, LstmState)]:
'\n Forward call of the LSTM.\n\n :param source: Tensor of size {...,in_dim} if spatial_dim is single_step_dim else {...,spatial_dim,in_dim}.\n :param state: State of the LSTM. Both h and c are of shape {...,out_dim}.\n :return: output of shape {...,out_dim} if spatial_dim is single_step_dim else {...,spatial_dim,out_dim},\n and new state of the LSTM.\n '
if ((not state.h) or (not state.c)):
raise ValueError(f'{self}: state {state} needs attributes ``h`` (hidden) and ``c`` (cell).')
if (self.in_dim not in source.dims):
raise ValueError(f'{self}: input {source} does not have in_dim {self.in_dim}')
(result, (new_state_h, new_state_c)) = source._raw_backend.lstm(source=source, state_c=state.c, state_h=state.h, ff_weight=self.ff_weight, rec_weight=self.rec_weight, bias=self.bias, spatial_dim=spatial_dim, in_dim=self.in_dim, out_dim=self.out_dim)
new_state = LstmState(h=new_state_h, c=new_state_c)
return (result, new_state)
def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> LstmState:
'initial state'
return LstmState(h=rf.zeros((list(batch_dims) + [self.out_dim]), feature_dim=self.out_dim), c=rf.zeros((list(batch_dims) + [self.out_dim]), feature_dim=self.out_dim))
|
class LstmState(rf.State):
'LSTM state'
def __init__(self, *_args, h: Tensor=None, c: Tensor=None):
super().__init__(*_args)
if (not _args):
self.h = h
self.c = c
|
class ZoneoutLSTM(LSTM):
'\n Zoneout LSTM module.\n '
def __init__(self, in_dim: Dim, out_dim: Dim, *, with_bias: bool=True, zoneout_factor_cell: float=0.0, zoneout_factor_output: float=0.0, use_zoneout_output: bool=True, forget_bias: float=0.0, parts_order: str='ifco'):
'\n :param in_dim:\n :param out_dim:\n :param with_bias:\n :param zoneout_factor_cell: 0.0 is disabled. reasonable is 0.15.\n :param zoneout_factor_output: 0.0 is disabled. reasonable is 0.05.\n :param use_zoneout_output: True is like the original paper. False is like older RETURNN versions.\n :param forget_bias: 1.0 is default in RETURNN/TF ZoneoutLSTM.\n 0.0 is default in :class:`LSTM`, or RETURNN NativeLSTM, PyTorch LSTM, etc.\n :param parts_order:\n i: input gate.\n f: forget gate.\n o: output gate.\n c|g|j: input.\n icfo: like RETURNN/TF ZoneoutLSTM.\n ifco: PyTorch (cuDNN) weights, standard for :class:`LSTM`.\n cifo: RETURNN NativeLstm2 weights.\n '
super().__init__(in_dim, out_dim, with_bias=with_bias)
self.zoneout_factor_cell = zoneout_factor_cell
self.zoneout_factor_output = zoneout_factor_output
self.use_zoneout_output = use_zoneout_output
self.forget_bias = forget_bias
self.parts_order = parts_order.replace('c', 'j').replace('g', 'j')
self.dropout_broadcast = rf.dropout_broadcast_default()
assert ((len(self.parts_order) == 4) and (set(self.parts_order) == set('ijfo')))
def _inner_step(self, x: Tensor, *, state: LstmState) -> Tuple[(Tensor, LstmState)]:
prev_c = state.c
prev_h = state.h
rec = rf.dot(prev_h, self.rec_weight, reduce=self.out_dim)
x = (x + rec)
parts = rf.split(x, axis=(4 * self.out_dim), out_dims=([self.out_dim] * 4))
parts = {k: v for (k, v) in zip(self.parts_order, parts)}
(i, j, f, o) = (parts['i'], parts['j'], parts['f'], parts['o'])
new_c = ((rf.sigmoid((f + self.forget_bias)) * prev_c) + (rf.sigmoid(i) * rf.tanh(j)))
new_h = (rf.sigmoid(o) * rf.tanh(new_c))
output = new_h
c = _zoneout(prev=prev_c, cur=new_c, factor=self.zoneout_factor_cell, out_dim=self.out_dim, dropout_broadcast=self.dropout_broadcast)
h = _zoneout(prev=prev_h, cur=new_h, factor=self.zoneout_factor_output, out_dim=self.out_dim, dropout_broadcast=self.dropout_broadcast)
new_state = LstmState(c=c, h=h)
if self.use_zoneout_output:
output = h
output.feature_dim = self.out_dim
new_state.h.feature_dim = self.out_dim
new_state.c.feature_dim = self.out_dim
return (output, new_state)
def __call__(self, source: Tensor, *, state: LstmState, spatial_dim: Dim) -> Tuple[(Tensor, LstmState)]:
'\n Forward call of the LSTM.\n\n :param source: Tensor of size {...,in_dim} if spatial_dim is single_step_dim else {...,spatial_dim,in_dim}.\n :param state: State of the LSTM. Both h and c are of shape {...,out_dim}.\n :return: output of shape {...,out_dim} if spatial_dim is single_step_dim else {...,spatial_dim,out_dim},\n and new state of the LSTM.\n '
if ((not state.h) or (not state.c)):
raise ValueError(f'{self}: state {state} needs attributes ``h`` (hidden) and ``c`` (cell).')
if (self.in_dim not in source.dims):
raise ValueError(f'{self}: input {source} does not have in_dim {self.in_dim}')
x = rf.dot(source, self.ff_weight, reduce=self.in_dim)
if (self.bias is not None):
x = (x + self.bias)
if (spatial_dim == single_step_dim):
return self._inner_step(x, state=state)
batch_dims = source.remaining_dims((spatial_dim, self.in_dim))
(output, new_state, _) = rf.scan(spatial_dim=spatial_dim, initial=state, xs=x, ys=Tensor('lstm-out', dims=(batch_dims + [self.out_dim]), dtype=source.dtype, feature_dim=self.out_dim), body=(lambda x_, s: self._inner_step(x_, state=s)))
return (output, new_state)
|
def _zoneout(*, prev: Tensor, cur: Tensor, factor: float, out_dim: Dim, dropout_broadcast: bool) -> Tensor:
if (factor == 0.0):
return cur
return rf.cond(rf.get_run_ctx().train_flag, (lambda : (((1 - factor) * rf.dropout((cur - prev), factor, axis=(dropout_broadcast and out_dim))) + prev)), (lambda : (((1 - factor) * cur) + (factor * prev))))
|
def reduce(source: Tensor[T], *, mode: str, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param mode: "sum", "max", "min", "mean", "logsumexp", "any", "all", "argmin", "argmax"\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return source._raw_backend.reduce(source=source, mode=mode, axis=axis, use_mask=use_mask)
|
def reduce_sum(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='sum', axis=axis, use_mask=use_mask)
|
def reduce_max(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='max', axis=axis, use_mask=use_mask)
|
def reduce_min(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='min', axis=axis, use_mask=use_mask)
|
def reduce_mean(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='mean', axis=axis, use_mask=use_mask)
|
def reduce_logsumexp(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='logsumexp', axis=axis, use_mask=use_mask)
|
def reduce_any(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='any', axis=axis, use_mask=use_mask)
|
def reduce_all(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='all', axis=axis, use_mask=use_mask)
|
def reduce_argmin(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='argmin', axis=axis, use_mask=use_mask)
|
def reduce_argmax(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
return reduce(source=source, mode='argmax', axis=axis, use_mask=use_mask)
|
def reduce_out(source: Tensor, *, mode: str, num_pieces: int, out_dim: Optional[Dim]=None) -> Tensor:
'\n Combination of :class:`SplitDimsLayer` applied to the feature dim\n and :class:`ReduceLayer` applied to the resulting feature dim.\n This can e.g. be used to do maxout.\n\n :param source:\n :param mode: "sum" or "max" or "mean"\n :param num_pieces: how many elements to reduce. The output dimension will be input.dim // num_pieces.\n :param out_dim:\n :return: out, with feature_dim set to new dim\n '
assert source.feature_dim
parts_dim = Dim(num_pieces, name='readout-parts')
if (not out_dim):
out_dim = (source.feature_dim // parts_dim)
out = rf.split_dims(source, axis=source.feature_dim, dims=(out_dim, parts_dim))
out = reduce(out, mode=mode, axis=parts_dim)
out.feature_dim = out_dim
return out
|
def top_k(source: Tensor, *, axis: Union[(Dim, Sequence[Dim])], k: Optional[Union[(int, Tensor)]]=None, k_dim: Optional[Dim]=None, sorted: bool=True) -> Tuple[(Tensor, Union[(Tensor, Sequence[Tensor])], Dim)]:
'\n Basically wraps tf.nn.top_k.\n Returns the top_k values and the indices.\n\n For an input [B,D] with axis=D, the output and indices values are shape [B,K].\n\n It\'s somewhat similar to :func:`reduce` with max and argmax.\n The axis dim is reduced and then a new dim for K is added.\n\n Axis can also cover multiple axes, such as [beam,classes].\n In that cases, there is not a single "indices" sub-layer,\n but sub-layers "indices0" .. "indices{N-1}"\n corresponding to each axis, in the same order.\n\n All other axes are treated as batch dims.\n\n :param source:\n :param axis: the axis to do the top_k on, which is reduced, or a sequence of axes\n :param k: the "K" in "TopK"\n :param k_dim: the new axis dim for K. if not provided, will be automatically created.\n :param sorted:\n :return: values, indices (sequence if axis is a sequence), k_dim\n '
if (k is None):
assert k_dim, 'top_k: either provide `k` or `k_dim`'
k = (k_dim.dimension or k_dim.dyn_size_ext)
assert (k is not None), f'top_k: k_dim {k_dim} undefined and no k provided'
return source._raw_backend.top_k(source, axis=axis, k=k, k_dim=k_dim, sorted=sorted)
|
def reset_run_ctx():
'\n Call this after a train_step or forward_step function has been called,\n when you write your own training or forward loop.\n '
global _run_ctx
_run_ctx = None
|
def init_train_step_run_ctx(*, train_flag: Union[(bool, Tensor)]=True, step: Union[(int, Tensor)]=0, epoch: Union[(int, Tensor)]=1):
'\n Call this before the train_step function is called,\n when you write your own training loop.\n\n Also see :func:`init_forward_step_run_ctx`.\n\n :param train_flag: whether we intend to do actual training. you might want to use dropout only in this case.\n (In case of PyTorch, we would also call module.train() first, which will also store this flag internally.)\n If False, we would call the same train_step function, but we intend to do evaluation with the same loss.\n :param step: you might want to schedule dropout or other things depending on the step\n :param epoch: you might want to schedule dropout or other things depending on the epoch\n '
global _run_ctx
_run_ctx = RunCtx(stage='train_step', train_flag=train_flag, step=step, epoch=epoch)
|
def init_forward_step_run_ctx(*, expected_outputs: Optional[TensorDict]=None, step: Union[(int, Tensor)]=0, epoch: Union[(int, Tensor)]=1):
'\n Call this before the forward_step function is called,\n when you write your own forward loop.\n\n Also see :func:`init_train_step_run_ctx`.\n '
global _run_ctx
_run_ctx = RunCtx(stage='forward_step', expected_outputs=expected_outputs, step=step, epoch=epoch)
|
def get_run_ctx() -> RunCtx:
'\n :return: current run context, see :class:`RunCtx`\n '
global _run_ctx, _init_run_ctx
if (_run_ctx is None):
if (_init_run_ctx is None):
_init_run_ctx = RunCtx(stage='init')
return _init_run_ctx
return _run_ctx
|
class RunCtx():
'\n We can either be in param-init stage,\n or in the main training (or eval) loop,\n or forwarding loop (doing recog, beam search, dumping whatever, ...).\n\n In training/eval, we expect that some loss is being defined via mark_as_loss().\n In forwarding, we expect that some output is being defined via mark_as_output().\n '
def __init__(self, *, stage: str, train_flag: Union[(bool, Tensor)]=False, step: Union[(int, Tensor)]=0, epoch: Union[(int, Tensor)]=1, expected_outputs: Optional[TensorDict]=None):
'\n :param stage:\n - "init"\n - "train_step", also for eval, for mark_as_loss and get_total_loss\n - "forward_step", for mark_as_output\n '
self._stage = stage
self._train_flag = train_flag
self._step = step
self._epoch = epoch
self.losses = {}
self.outputs = TensorDict()
self.expected_outputs = expected_outputs
def __repr__(self):
return f'<RunCtx stage={self.stage!r} train_flag={self.train_flag!r} step={self.step!r}>'
@property
def stage(self) -> str:
'\n :return: "init", "train_step", "forward_step"\n '
return self._stage
@property
def train_flag(self) -> Union[(bool, Tensor)]:
'\n :return: whether we are in training mode, i.e. the model is updated,\n and we are supposed to use dropout and similar mechanisms.\n In a graph-based backend, this can be dynamic.\n '
return self._train_flag
@property
def step(self) -> Union[(int, Tensor)]:
'\n :return: global train step, starting with 0, not reset after an epoch, i.e. ignoring the epochs.\n In a graph-based backend, this can be dynamic.\n '
return self._step
@property
def epoch(self) -> Union[(int, Tensor)]:
'\n :return: epoch\n '
return self._epoch
def mark_as_loss(self, loss: Union[(Tensor, Any)], name: str, *, dims: Optional[Sequence[Dim]]=None, scale: float=1.0, as_error: bool=False, use_normalized_loss: bool=False, use_flatten_frames: bool=True, custom_inv_norm_factor: Optional[Tensor]=None) -> None:
'\n Mark the given loss tensor as a loss.\n This has the effect that it is specially handled by RETURNN.\n Specifically, the optimizer can use it in training,\n and it is used for reporting per batch or per epoch,\n and for learning rate scheduling.\n\n This currently uses :class:`AsIsLoss` in RETURNN\n but this is an implementation detail and might change.\n\n :param loss: E.g. shape [B,T] or [B]. A :class:`Tensor` is usually expected, but a raw tensor is also possible.\n You should not reduce the axes where RETURNN should collect epoch-wise statistics,\n such that RETURNN can properly accumulate it over batches.\n You should reduce_sum over axes where you do not want to have normalization.\n E.g. if you calculate framewise CE getting shape [B,T], and you want it to be sequence-level CE,\n calculate reduce_sum(loss, axis=T) to get [B] and pass only those sequence-level CE losses here.\n :param name: name of the loss. this name is used for reporting by RETURNN, and also for LR scheduling.\n :param dims: in case `loss` is not a :class:`Tensor`, but a raw tensor\n :param scale: scale the loss by this factor for the training optimizer\n (but not for any reporting). setting to 0.0 has the effect that this loss is not used by the optimizer.\n :param as_error: if True, this loss is reported as an error instead of a loss,\n and not used by the training optimizer.\n This is by convention sth like the frame-error or edit-distance, and usually not differentiable anyway.\n :param use_normalized_loss: the loss used in optimization will be normalized via reduce_mean\n instead of reduce_sum.\n E.g. if the overall normalization is sum(loss)/sum(num_frames), this is also what the optimizer will use,\n otherwise the optimizer will just use sum(loss).\n :param use_flatten_frames: If True, will use :func:`returnn.tf.util.basic.flatten_with_seq_len_mask`,\n i.e. a "packed" sequence with the padded frames removed, and accumulates over that.\n This can be more efficient, also because it will further optimize incoming computations\n and e.g. skip softmax computations right before on the padded frames.\n This can also avoid issues with inf/nan in some cases.\n If False, it will mask the loss to 0 in the padded frames and accumulate over that.\n Typically, setting this to True (default) is both more efficient and better.\n :param custom_inv_norm_factor:\n The standard inv norm factor is sum(target_seq_len) if the target has a time-axis,\n or sum(output_seq_len) if there is no target and the output has a time-axis,\n or 1 otherwise. (See :func:`Loss.init` for details.)\n This is used for proper normalization of accumulated loss/error per epoch\n and also proper normalization per batch for reporting,\n no matter if use_normalized_loss is True or False.\n If you want to change this norm factor, you can set this.\n Basically, for all reporting, it uses sum(loss) / sum(custom_inv_norm_factor).\n '
assert (self.stage == 'train_step')
if (not isinstance(loss, Tensor)):
assert isinstance(loss, _backend.global_backend.RawTensorType)
loss = _output_tensor_from_raw(loss, name=name, dims=dims)
if (not rf.is_float_dtype(loss.dtype)):
loss = rf.cast(loss, rf.get_default_float_dtype())
assert (name not in self.losses)
self.losses[name] = Loss(loss=loss, name=name, scale=scale, as_error=as_error, use_normalized_loss=use_normalized_loss, use_flatten_frames=use_flatten_frames, custom_inv_norm_factor=custom_inv_norm_factor)
def mark_as_output(self, tensor: Union[(Tensor, Any)], name: str, *, dims: Optional[Sequence[int]]=None) -> None:
'\n Mark this as an output.\n This has the effect that RETURNN will in any case construct the corresponding layer.\n Also see :func:`mark_as_default_output`.\n\n This is intended mostly for forwarding, or exporting the model (TF graph, TFLite, ONNX, etc).\n You must specify a shape to have the output shape (order of dims) well-defined\n (if not specified, we check if some defaults are possible, like BTF, or BF).\n\n :param tensor:\n :param name:\n :param dims: this specifies the order of the dims of the output, such that it is well-defined\n for some external application.\n If not specified, we try to infer BTF or BF as default, if that works, otherwise it will be an error.\n '
assert (self.stage == 'forward_step')
if (self.expected_outputs is not None):
assert (name in self.expected_outputs.data), f'mark_as_output: unexpected output {name!r}, we expect outputs: {self.expected_outputs}'
expected_output = (self.expected_outputs.data[name] if self.expected_outputs else None)
if ((dims is None) and expected_output):
dims = expected_output.dims
if ((dims is not None) and expected_output):
assert (expected_output.dims == dims), f'mark_as_output: {name!r} dims mismatch from expected output, given {dims}, expected {expected_output}'
if (not isinstance(tensor, Tensor)):
assert isinstance(tensor, _backend.global_backend.RawTensorType)
ndim = _backend.global_backend.get_ndim_raw(tensor)
if (dims is None):
if (ndim > 0):
dims = ([batch_dim] + [_backend.global_backend.get_new_dim_raw(tensor, d, name=((name or 'const') + f'_dim{d}')) for d in range(1, ndim)])
else:
dims = ()
tensor = _output_tensor_from_raw(tensor, dims=dims, name=name)
dims = tensor.dims
if (dims is None):
dims = _default_dim_order(tensor)
assert (set(dims) == set(tensor.dims)), f'mark_as_output: tensor {tensor} does not have the dims {dims}'
tensor = tensor._raw_backend.make_output_tensor(tensor, dims, name=name)
assert (name not in self.outputs.data)
self.outputs.data[name] = tensor
if expected_output:
assert (len(expected_output.dims) == len(tensor.dims)), f"mark_as_output: lengths of expected output {expected_output.dims} and actual output {tensor.dims} don't match."
for (expected_dim, actual_dim) in zip(expected_output.dims, tensor.dims):
expected_dim: Dim
actual_dim: Dim
if (not expected_dim.is_dim_known()):
assert actual_dim.is_dynamic(), f"mark_as_output: expected dim {expected_dim} doesn't have a known value. Matching actual dim assumed to be dynamic, but got non-dynamic dim {actual_dim}."
elif expected_dim.is_dynamic():
assert actual_dim.is_dynamic(), f'mark_as_output: expected dim {expected_dim} is dynamic. Matching actual dim assumed to be dynamic, but got non-dynamic dim {actual_dim}.'
elif expected_dim.is_static():
assert (expected_dim.is_static() and (actual_dim.dimension == expected_dim.dimension)), f'mark_as_output: expected dim {expected_dim} is static. Matching actual dim assumed to be the same static dim value, but got {actual_dim}.'
else:
assert False, f'mark_as_output: unexpected expected dim {expected_dim}.'
assert (expected_output.dtype == tensor.dtype), f'mark_as_output: {name!r} dtype mismatch from expected output, given {tensor.dtype}, expected {expected_output.dtype}'
assert (expected_output.sparse_dim == tensor.sparse_dim), f'mark_as_output: {name!r} sparse_dim mismatch from expected output, given {tensor.sparse_dim}, expected {expected_output.sparse_dim}'
def mark_as_default_output(self, tensor: Union[(Tensor, Any)], *, shape: Optional[Sequence[Dim]]=None) -> None:
'\n Calls mark_as_output(tensor, "output", shape=shape).\n\n Mark this as the default output.\n See :func:`Frontend.mark_as_default_output` for more details.\n\n :param tensor:\n :param shape:\n '
self.mark_as_output(tensor, 'output', dims=shape)
def check_outputs_complete(self):
'\n If expected outputs are given, check that all expected outputs are present.\n '
if self.expected_outputs:
assert (set(self.expected_outputs.data.keys()) == set(self.outputs.data.keys())), f'check_outputs_complete: expected outputs {self.expected_outputs} do not match actual outputs {self.outputs}'
def total_loss(self) -> Union[(Tensor, float)]:
'\n :return: total loss, as it is used for backpropagation\n '
assert (self.stage == 'train_step')
assert self.losses, 'call rf.get_run_ctx().mark_as_loss(...)'
loss = 0.0
for (name, loss_obj) in self.losses.items():
if ((loss_obj.scale == 0.0) or loss_obj.as_error):
continue
loss += loss_obj.get_scaled_reduced_loss()
return loss
|
@dataclass
class Loss():
'\n Loss via :func:`RunCtx.mark_as_loss`.\n\n We collect all relevant information here.\n '
loss: Tensor
name: str
scale: float = 1.0
as_error: bool = False
use_normalized_loss: bool = False
use_flatten_frames: bool = True
custom_inv_norm_factor: Optional[Tensor] = None
_summed_loss_cached: Optional[Tensor] = None
_mean_loss_cached: Optional[Tensor] = None
def get_summed_loss(self) -> Tensor:
'\n :return: sum of loss (scalar)\n '
if (not self.loss.dims):
return self.loss
if (self._summed_loss_cached is not None):
return self._summed_loss_cached
self._summed_loss_cached = rf.reduce_sum(self.loss, axis=self.loss.dims)
return self._summed_loss_cached
def get_mean_loss(self) -> Tensor:
'\n :return: sum of loss (scalar)\n '
if (self._mean_loss_cached is not None):
return self._mean_loss_cached
if self.custom_inv_norm_factor:
loss = self.get_summed_loss()
inv_norm = rf.reduce_sum(self.custom_inv_norm_factor, axis=self.custom_inv_norm_factor.dims)
inv_norm = rf.cast(inv_norm, loss.dtype)
inv_norm = rf.reciprocal(inv_norm)
inv_norm = rf.copy_to_device(inv_norm, loss.device)
loss *= inv_norm
return loss
if (not self.loss.dims):
return self.loss
self._mean_loss_cached = rf.reduce_mean(self.loss, axis=self.loss.dims)
return self._mean_loss_cached
def get_inv_norm_factor(self) -> Union[(int, Tensor)]:
'\n :return: inverse norm factor (scalar)\n '
if self.custom_inv_norm_factor:
if self.custom_inv_norm_factor.dims:
return rf.reduce_sum(self.custom_inv_norm_factor, axis=self.custom_inv_norm_factor.dims)
return self.custom_inv_norm_factor
return self.loss.num_elements()
def get_scaled_reduced_loss(self) -> Tensor:
'\n :return: scaled reduced loss (scalar), as it is supposed to be used for calculating the train gradient\n '
if self.use_normalized_loss:
loss = self.get_mean_loss()
else:
loss = self.get_summed_loss()
return (loss * self.scale)
|
def _default_dim_order(tensor: Tensor) -> Sequence[Dim]:
'\n See if some reasonable default dim order like BTF or BF is possible.\n\n :param tensor:\n :return:\n '
rem_dims = list(tensor.dims)
dims = []
if tensor.have_batch_axis():
rem_dims.remove(tensor.get_batch_dim_tag())
dims.append(tensor.get_batch_dim_tag())
if tensor.have_time_axis():
rem_dims.remove(tensor.get_time_dim_tag())
dims.append(tensor.get_time_dim_tag())
dyn_dims = [d for d in rem_dims if d.is_dynamic_seq_length()]
if (len(dyn_dims) > 1):
raise Exception(f'Cannot infer order of dims automatically for output {tensor}. Please specify `dims` explicitly.')
elif (len(dyn_dims) == 1):
rem_dims.remove(dyn_dims[0])
dims.append(dyn_dims[0])
if (len(rem_dims) > 1):
raise Exception(f'Cannot infer order of dims automatically for output {tensor}. Please specify `dims` explicitly.')
elif (len(rem_dims) == 1):
dims.append(rem_dims[0])
return dims
|
def _output_tensor_from_raw(raw_tensor, *, dims: Optional[Sequence[Dim]], name: str) -> Tensor:
assert isinstance(raw_tensor, _backend.global_backend.RawTensorType)
tensor = rf.convert_to_tensor(raw_tensor, dims=dims)
for (axis, dim) in enumerate(tensor.dims):
if (dim.dyn_size_ext and (dim.dyn_size_ext.raw_tensor is None)):
if dim.dyn_size_ext.dims:
raise Exception(f'Output {name!r} {tensor}: Cannot infer dynamic size for dim {dim}. You must explicitly specify the dyn size by assigning `{dim}.dyn_size_ext.raw_tensor = ...`.')
return tensor
|
def stft(x: Tensor, *, in_spatial_dim: Dim, frame_step: int, frame_length: int, fft_length: Optional[int]=None, window_use_frame_length: bool=True, align_window_left: bool=True, window_enforce_even: bool=True, out_spatial_dim: Optional[Dim]=None, out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim, Dim)]:
'\n Calculate the short-time Fourier transform (STFT) of a signal.\n\n We currently always use the Hann window.\n\n Note that there are inconsistencies between frameworks (e.g. PyTorch vs TensorFlow).\n We have options where you can explicitly specify the behavior,\n to match either PyTorch or TensorFlow.\n See here for some discussion and demonstration:\n https://github.com/pytorch/pytorch/issues/100177\n https://github.com/librosa/librosa/issues/1701\n https://github.com/albertz/playground/blob/master/tf_pt_stft.py\n\n :param x: (..., in_spatial_dim, ...). any other dims are treated as batch dims.\n :param in_spatial_dim:\n :param frame_length: win_length in PT/librosa, frame_size in StftLayer, frame_length in TF\n :param frame_step: hop_length in PT/librosa, frame_shift in StftLayer, frame_step in TF\n :param fft_length: n_fft in PT/librosa, fft_size in StftLayer, fft_length in TF.\n If not given, will use frame_length.\n :param window_use_frame_length: Whether to use window size = frame_length.\n If False, uses window size = fft_length.\n This has only an effect if frame_length != fft_length.\n This has an effect on the output seq length.\n If True: out_seq_len = ⌈(T - frame_length + 1) / frame_step⌉ (T = in_seq_len).\n If False: out_seq_len = ⌈(T - fft_length + 1) / frame_step⌉.\n Note that in TF/SciPy, the behavior matches to window_use_frame_length=True,\n but in PT/librosa, the behavior matches to window_use_frame_length=False.\n :param align_window_left: whether to align the window to the left inside the potential wider fft_length window.\n This only has an effect if frame_length != fft_length.\n If True, the window will be aligned to the left and the right side will be padded with zeros.\n Then effectively the remaining phases are not used.\n If False, the window will be aligned to the center and the left and right side will be padded with zeros.\n Then effectively the beginning and end phases are not used.\n In TF/SciPy, the behavior matches to align_window_left=True,\n but in PT/librosa, the behavior matches to align_window_left=False.\n :param window_enforce_even: enforce that the window size is even, potentially lowering the window size by 1.\n This only has an effect if the window size is uneven.\n In TF, the behavior matches to window_enforce_even=True,\n but in most other frameworks, the behavior matches to window_enforce_even=False.\n :param out_spatial_dim:\n :param out_dim:\n :return: (stft, out_spatial_dim, out_dim)\n '
fft_length = (fft_length or frame_length)
if (out_dim is None):
out_dim = Dim(((fft_length // 2) + 1), name='stft-freq')
if (out_spatial_dim is None):
from .conv import make_conv_out_spatial_dims
(out_spatial_dim,) = make_conv_out_spatial_dims([in_spatial_dim], filter_size=frame_length, strides=frame_step, padding='valid')
return (x._raw_backend.stft(x, in_spatial_dim=in_spatial_dim, frame_step=frame_step, frame_length=frame_length, fft_length=fft_length, window_use_frame_length=window_use_frame_length, align_window_left=align_window_left, window_enforce_even=window_enforce_even, out_spatial_dim=out_spatial_dim, out_dim=out_dim), out_spatial_dim, out_dim)
|
class State(dict):
'\n Covers all the state of a recurrent module,\n i.e. exactly what needs to be stored and passed into the module\n next time you call it as initial state.\n\n This behaves somewhat like a namedtuple, although we derive from dict.\n\n When you derive further from this class, make sure that it works correctly with ``tree``,\n which creates new instances of the same class\n by calling ``type(instance)(keys_and_values)``\n with ``keys_and_values = ((key, result[key]) for key in instance)``.\n See :class:`LstmState` for an example::\n\n class LstmState(rf.State):\n def __init__(self, *_args, h: Tensor = None, c: Tensor = None):\n super().__init__(*_args)\n if not _args:\n self.h = h\n self.c = c\n\n Also see: https://github.com/rwth-i6/returnn/issues/1329\n '
def __init__(self, *args, **kwargs):
if kwargs:
assert (not args)
super().__init__(**kwargs)
elif args:
assert (len(args) == 1)
if isinstance(args[0], dict):
super().__init__(**args[0])
elif isinstance(args[0], Iterable):
super().__init__(args[0])
else:
super().__init__(state=args[0])
else:
super().__init__()
def __repr__(self):
return f"{self.__class__.__name__}({', '.join((f'{k}={v!r}' for (k, v) in self.items()))})"
def __getattr__(self, item):
if (item in self):
return self[item]
raise AttributeError(f'{self}.{item}')
def __setattr__(self, key, value):
self[key] = value
def flatten_tensors(self) -> List[Tensor]:
'See :func:`cls_deep_tensors`.'
return self.cls_flatten_tensors(self)
@classmethod
def cls_flatten_tensors(cls, obj: Union[(State, dict, Any)]) -> List[Tensor]:
'\n Iterates through obj and all its sub-objects, yielding all tensors.\n '
from returnn.util.basic import RefIdEq
cache_tensor_refs = set()
tensors = []
queue = [obj]
while queue:
x = queue.pop()
if isinstance(x, Tensor):
if (RefIdEq(x) not in cache_tensor_refs):
cache_tensor_refs.add(RefIdEq(x))
tensors.append(x)
elif isinstance(x, dict):
queue.extend(x.values())
elif isinstance(x, (list, tuple)):
queue.extend(x)
else:
raise TypeError(f'unexpected type {type(x)}')
return tensors
|
class TensorArray():
'\n TensorArray.\n Think of this like a list of tensors.\n E.g. if each tensor has shape (B, D), and we have N tensors,\n stacking them together would give us a tensor of shape (N, B, D).\n Reversely, unstacking a tensor of shape (N, B, D) on the N axis\n would give us a list of N tensors of shape (B, D).\n\n We use a functional API,\n and each modifying operation (push_back) returns a new TensorArray object.\n This is to make sure it works well together with both eager-based and graph-based frameworks.\n\n Internally, the backend functions give us some opaque tensor array object\n (e.g. TF TensorArray, or maybe just a pure Python list of tensors in case of eager-based frameworks).\n '
def __init__(self, tensor_template: Tensor, *, _backend_tensor_array: Optional[Any]=None, _backend: Optional[Type[Backend]]=None, _enable_delayed_check: bool=False):
assert isinstance(tensor_template, Tensor)
self.tensor_template = tensor_template
if (_backend is None):
_backend = global_backend
if (_backend_tensor_array is None):
_backend_tensor_array = _backend.tensor_array_create()
self._backend = _backend
self._backend_tensor_array = _backend_tensor_array
self._enable_delayed_check = _enable_delayed_check
@classmethod
def unstack(cls, tensor: Tensor, *, axis: Dim) -> TensorArray:
'unstack'
backend = tensor._raw_backend
axis_int = tensor.get_axis_from_description(axis)
tensor_template = tensor.copy_template().copy_template_excluding_axis(axis_int)
return TensorArray(tensor_template=tensor_template, _backend_tensor_array=backend.tensor_array_unstack(tensor, axis=axis), _backend=backend)
def __getitem__(self, index: Union[(int, Tensor)]) -> Tensor:
return self._backend.tensor_array_get_item(self._backend_tensor_array, index)
def push_back(self, tensor: Tensor) -> TensorArray:
'push_back'
if (not self._enable_delayed_check):
self._check_matching_tensor(tensor)
backend_tensor_array = self._backend.tensor_array_push_back(self._backend_tensor_array, tensor)
return TensorArray(tensor_template=self.tensor_template, _backend_tensor_array=backend_tensor_array, _backend=self._backend, _enable_delayed_check=self._enable_delayed_check)
def _set_enable_delayed_check(self, enable: bool=True):
self._enable_delayed_check = enable
def _push_back_delayed_check(self):
'delayed because maybe dims are updated later'
if (not self._enable_delayed_check):
return
assert self._backend.executing_eagerly()
assert isinstance(self._backend_tensor_array, list)
if self._backend_tensor_array:
tensor = self._backend_tensor_array[(- 1)]
self._check_matching_tensor(tensor)
def _check_matching_tensor(self, tensor: Tensor):
assert isinstance(tensor, Tensor)
assert (tensor.dims_set == self.tensor_template.dims_set), f'TensorArray push_back: template {self.tensor_template} does not match tensor {tensor}, dims different, {self.tensor_template.dims} vs {tensor.dims}'
assert (tensor.sparse_dim == self.tensor_template.sparse_dim), f'TensorArray push_back: template {self.tensor_template} does not match tensor {tensor}, sparse_dim different, {self.tensor_template.sparse_dim} vs {tensor.sparse_dim}'
def stack(self, *, axis: Dim) -> Tensor:
'stack'
return self._backend.tensor_array_stack(self._backend_tensor_array, axis=axis, tensor_template=self.tensor_template)
|
class GetModelFunc(Protocol):
'get model func'
def __call__(self, *, epoch: int, step: int) -> rf.Module:
...
|
class StepFunc(Protocol):
'step func'
def __call__(self, *, model: rf.Module, extern_data: TensorDict) -> None:
...
|
def get_raw_tensor_type() -> Type:
'\n :return: the raw tensor type of the current selected backend, e.g. ``torch.Tensor`` or ``tf.Tensor``\n '
if TYPE_CHECKING:
import torch
return torch.Tensor
from ._backend import global_backend
return global_backend.RawTensorType
|
class InvalidVersion(Exception):
'\n The version string is invalid.\n '
|
class MissingExplicitImport(ImportError):
'\n Corresponding `import_` call missing.\n '
|
def package_path():
'\n :return: directory where packages are stored (default: ~/returnn/pkg)\n :rtype: str\n '
global _pkg_path
if _pkg_path:
return _pkg_path
if (_EnvPkgPath in os.environ):
path = os.environ[_EnvPkgPath]
assert os.path.isdir(path), ('import pkg path via env %s: is not a dir: %r' % (_EnvPkgPath, path))
else:
path = _DefaultPkgPath
os.makedirs(path, exist_ok=True)
_pkg_path = path
return path
|
def _package_import_path():
'\n :return: directory for package import\n :rtype: str\n '
global _pkg_import_path
if _pkg_import_path:
return _pkg_import_path
_pkg_import_path = os.environ.get(_EnvPkgImportPath, _DefaultPkgImportPath)
_setup_pkg_import()
return _pkg_import_path
|
def _package_import_pkg_path():
'\n :return: directory for package import\n :rtype: str\n '
assert ModuleNamePrefix.endswith('.')
path = ('%s/%s' % (_package_import_path(), ModuleNamePrefix[:(- 1)]))
return path
|
def _setup_pkg_import():
os.makedirs(_package_import_path(), exist_ok=True)
_mk_py_pkg_dirs(_package_import_pkg_path())
if (_package_import_path() not in sys.path):
sys.path.insert(0, _package_import_path())
|
def _mk_py_pkg_dirs(start_path, end_path=None):
'\n :param str start_path:\n :param str|None end_path:\n '
if end_path:
if (len(end_path) > len(start_path)):
assert end_path.startswith((start_path + '/'))
else:
assert (start_path == end_path)
else:
end_path = start_path
path = start_path
while True:
if os.path.exists(path):
assert (os.path.isdir(path) and (not os.path.islink(path)) and os.path.exists((path + '/__init__.py')))
else:
os.mkdir(path)
with open((path + '/__init__.py'), 'x') as f:
f.write('# automatically generated by RETURNN\n')
f.write('from returnn.import_.common import setup_py_pkg\n')
f.write('setup_py_pkg(globals())\n')
f.close()
if (len(path) == len(end_path)):
break
p = end_path.find('/', (len(path) + 1))
if (p < 0):
path = end_path
else:
path = end_path[:p]
|
def setup_py_pkg(mod_globals):
'\n This will get called to prepare any custom setup for the package.\n\n :param dict[str] mod_globals: globals() in the package\n '
mod_name = mod_globals['__name__']
logger.debug('import_ pkg mod %s %s', mod_name, mod_globals.get('__file__'))
if (mod_name not in _registered_modules):
from pprint import pformat
raise MissingExplicitImport(('You must call `import_(...)` before you can access the module %s.\nRegistered modules:\n%s' % (mod_name, pformat(_registered_modules))))
|
def _normalize_pkg_name(name):
'\n :param str name:\n :rtype: str\n '
name = name.replace('.', '_')
name = name.replace('-', '_')
return name
|
def _register_module(mod_name, info):
'\n :param str mod_name:\n :param object info: just used for reporting\n '
assert mod_name.startswith(ModuleNamePrefix)
_registered_modules[mod_name] = info
p = (- 1)
while True:
p = mod_name.find('.', (p + 1))
if (p < 0):
break
_registered_modules[mod_name[:p]] = info
|
def module_name(repo, repo_path, path, version, make_ready=True):
'\n :param str repo: e.g. "github.com/rwth-i6/returnn-experiments"\n :param str repo_path: what get_repo_path returns, e.g. "/home/az/returnn/pkg/...@v..."\n :param str path: path to file in repo. can be arbitrary (empty) with make_ready=False\n :param str|None version: e.g. "20211231-0123abcd0123". None for development working copy.\n ignored with make_ready=False. just used for reporting.\n :param bool make_ready: if True, we make sure that importing this module works\n :return: module name\n :rtype: str\n\n Note on the internals:\n\n We could have dynamically loaded the module directly from the package path,\n in some way.\n The reason we choose this different approach to create a real directory\n with symlinks is such that we can potentially make use\n of auto-completion features in editors.\n It might also make debugging easier.\n So, in this function, we make sure that all symlinks are correctly setup.\n '
while path.startswith('.'):
if (path == '.'):
path = ''
elif path.startswith('./'):
path = path[2:]
else:
raise ValueError(('invalid path %r' % path))
full_path = ('%s/%s' % (repo_path, path))
if os.path.exists(('%s/__init__.py' % repo_path)):
rel_pkg_path0 = ''
rel_pkg_dir = ''
else:
py_pkg_dirname = _find_root_python_package(full_path)
assert (len(py_pkg_dirname) >= len(repo_path))
rel_pkg_path = full_path[(len(py_pkg_dirname) + 1):]
p = rel_pkg_path.find('/')
if (p > 0):
rel_pkg_path0 = rel_pkg_path[:p]
else:
rel_pkg_path0 = rel_pkg_path
rel_pkg_dir = py_pkg_dirname[len(repo_path):]
repo_dir_name = os.path.dirname(repo)
repo_path_basename = os.path.basename(repo_path)
repo_v = ('%s/%s' % (repo_dir_name, repo_path_basename))
if ('@v' in repo_path_basename):
repo_v = repo_v.replace('@v', '/v')
else:
repo_v = (repo_v + '/dev')
if make_ready:
py_pkg_dir = ('%s/%s%s' % (_package_import_pkg_path(), _normalize_pkg_name(repo_v), _normalize_pkg_name(rel_pkg_dir)))
_mk_py_pkg_dirs(_package_import_pkg_path(), (py_pkg_dir if rel_pkg_path0 else os.path.dirname(py_pkg_dir)))
symlink_file = ('%s/%s' % (py_pkg_dir, rel_pkg_path0))
symlink_target = ('%s%s/%s' % (repo_path, rel_pkg_dir, rel_pkg_path0))
symlink_file = symlink_file.rstrip('/')
symlink_target = symlink_target.rstrip('/')
if os.path.exists(symlink_file):
assert (os.readlink(symlink_file) == symlink_target)
else:
logger.debug('Symlink %s -> %s', symlink_file, symlink_target)
os.symlink(symlink_target, symlink_file, target_is_directory=os.path.isdir(symlink_target))
_register_module(mod_name=(ModuleNamePrefix + _normalize_pkg_name((repo_v + rel_pkg_dir)).replace('/', '.')), info=dict(repo=repo, pkg_dir=rel_pkg_dir[1:], version=version))
repo_and_path = ('%s/%s' % (repo_v, (path[:(- 3)] if path.endswith('.py') else path)))
repo_and_path = repo_and_path.rstrip('/')
name = _normalize_pkg_name(repo_and_path).replace('/', '.')
return (ModuleNamePrefix + name)
|
def _find_root_python_package(full_path):
'\n :param str full_path: some Python file\n :return: going up from path, and first dir which does not include __init__.py\n :rtype: str\n '
p = len(full_path)
while True:
p = full_path.rfind('/', 0, p)
assert (p > 0)
d = full_path[:p]
assert os.path.isdir(d)
if (not os.path.exists((d + '/__init__.py'))):
return d
|
def stat_repo(repo, version):
'\n :param str repo: e.g. "github.com/rwth-i6/returnn-experiments"\n :param str|None version: e.g. "20211231-0123abcd0123"\n '
repo_ = _get_repo(repo)
repo_.get_work_dir(version)
|
def get_repo_path(repo, version, _report_dev_version_usage_stack_frame_depth=1):
'\n :param str repo: e.g. "github.com/rwth-i6/returnn-experiments"\n :param str|None version: e.g. "20211231-0123abcd0123"\n :param int _report_dev_version_usage_stack_frame_depth:\n :return: path to repo\n :rtype: str\n '
repo_ = _get_repo(repo)
work_dir = repo_.get_work_dir(version)
repo_path = work_dir.get_path()
if (not version):
_report_usage_dev_version(repo_path=repo_path, stack_frame_depth=(_report_dev_version_usage_stack_frame_depth + 1))
return repo_path
|
def _simple_validate_repo_name(repo):
'\n :param str repo:\n '
assert (('..' not in repo) and (':' not in repo))
|
def _main_repo_path(repo):
'\n :param str repo:\n :return: main repo dir (which includes the Git objects)\n :rtype: str\n '
return ('%s/%s' % (common.package_path(), repo))
|
def _dev_repo_path(repo):
'\n :param str repo:\n :return: dev working tree of a repo. currently the same as the main repo path\n :rtype: str\n '
return _main_repo_path(repo)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.