code stringlengths 17 6.64M |
|---|
def check_graphs(*args):
'Check that all the element in args belong to the same graph.\n\n Args:\n *args: a list of object with a obj.graph property.\n Raises:\n ValueError: if all the elements do not belong to the same graph.\n '
graph = None
for (i, sgv) in enumerate(args):
if ((graph is None) and (sgv.graph is not None)):
graph = sgv.graph
elif ((sgv.graph is not None) and (sgv.graph is not graph)):
raise ValueError('Argument[{}]: Wrong graph!'.format(i))
|
def get_unique_graph(tops, check_types=None, none_if_empty=False):
"Return the unique graph used by the all the elements in tops.\n\n Args:\n tops: list of elements to check (usually a list of tf.Operation and/or\n tf.Tensor). Or a tf.Graph.\n check_types: check that the element in tops are of given type(s). If None,\n the types (tf.Operation, tf.Tensor) are used.\n none_if_empty: don't raise an error if tops is an empty list, just return\n None.\n Returns:\n The unique graph used by all the tops.\n Raises:\n TypeError: if tops is not a iterable of tf.Operation.\n ValueError: if the graph is not unique.\n "
if isinstance(tops, tf_ops.Graph):
return tops
if (not is_iterable(tops)):
raise TypeError('{} is not iterable'.format(type(tops)))
if (check_types is None):
check_types = (tf_ops.Operation, tf_ops.Tensor)
elif (not is_iterable(check_types)):
check_types = (check_types,)
g = None
for op in tops:
if (not isinstance(op, check_types)):
raise TypeError('Expected a type in ({}), got: {}'.format(', '.join([str(t) for t in check_types]), type(op)))
if (g is None):
g = op.graph
elif (g is not op.graph):
raise ValueError('Operation {} does not belong to given graph'.format(op))
if ((g is None) and (not none_if_empty)):
raise ValueError("Can't find the unique graph of an empty list")
return g
|
def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False):
'Convert ops to a list of `tf.Operation`.\n\n Args:\n ops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single\n operation.\n check_graph: if `True` check if all the operations belong to the same graph.\n allow_graph: if `False` a `tf.Graph` cannot be converted.\n ignore_ts: if True, silently ignore `tf.Tensor`.\n Returns:\n A newly created list of `tf.Operation`.\n Raises:\n TypeError: if ops cannot be converted to a list of `tf.Operation` or,\n if `check_graph` is `True`, if all the ops do not belong to the\n same graph.\n '
if isinstance(ops, tf_ops.Graph):
if allow_graph:
return ops.get_operations()
else:
raise TypeError('allow_graph is False: cannot convert a tf.Graph.')
else:
if (not is_iterable(ops)):
ops = [ops]
if (not ops):
return []
if check_graph:
check_types = (None if ignore_ts else tf_ops.Operation)
get_unique_graph(ops, check_types=check_types)
return [op for op in ops if isinstance(op, tf_ops.Operation)]
|
def get_tensors(graph):
'get all the tensors which are input or output of an op in the graph.\n\n Args:\n graph: a `tf.Graph`.\n Returns:\n A list of `tf.Tensor`.\n Raises:\n TypeError: if graph is not a `tf.Graph`.\n '
if (not isinstance(graph, tf_ops.Graph)):
raise TypeError('Expected a graph, got: {}'.format(type(graph)))
ts = []
for op in graph.get_operations():
ts += op.outputs
return ts
|
def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):
'Convert ts to a list of `tf.Tensor`.\n\n Args:\n ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor.\n check_graph: if `True` check if all the tensors belong to the same graph.\n allow_graph: if `False` a `tf.Graph` cannot be converted.\n ignore_ops: if `True`, silently ignore `tf.Operation`.\n Returns:\n A newly created list of `tf.Tensor`.\n Raises:\n TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or,\n if `check_graph` is `True`, if all the ops do not belong to the same graph.\n '
if isinstance(ts, tf_ops.Graph):
if allow_graph:
return get_tensors(ts)
else:
raise TypeError('allow_graph is False: cannot convert a tf.Graph.')
else:
if (not is_iterable(ts)):
ts = [ts]
if (not ts):
return []
if check_graph:
check_types = (None if ignore_ops else tf_ops.Tensor)
get_unique_graph(ts, check_types=check_types)
return [t for t in ts if isinstance(t, tf_ops.Tensor)]
|
def get_generating_ops(ts):
'Return all the generating ops of the tensors in `ts`.\n\n Args:\n ts: a list of `tf.Tensor`\n Returns:\n A list of all the generating `tf.Operation` of the tensors in `ts`.\n Raises:\n TypeError: if `ts` cannot be converted to a list of `tf.Tensor`.\n '
ts = make_list_of_t(ts, allow_graph=False)
return [t.op for t in ts]
|
def get_consuming_ops(ts):
'Return all the consuming ops of the tensors in ts.\n\n Args:\n ts: a list of `tf.Tensor`\n Returns:\n A list of all the consuming `tf.Operation` of the tensors in `ts`.\n Raises:\n TypeError: if ts cannot be converted to a list of `tf.Tensor`.\n '
ts = make_list_of_t(ts, allow_graph=False)
ops = []
for t in ts:
for op in t.consumers():
if (op not in ops):
ops.append(op)
return ops
|
class ControlOutputs(object):
'The control outputs topology.'
def __init__(self, graph):
'Create a dictionary of control-output dependencies.\n\n Args:\n graph: a `tf.Graph`.\n Returns:\n A dictionary where a key is a `tf.Operation` instance and the\n corresponding value is a list of all the ops which have the key\n as one of their control-input dependencies.\n Raises:\n TypeError: graph is not a `tf.Graph`.\n '
if (not isinstance(graph, tf_ops.Graph)):
raise TypeError('Expected a tf.Graph, got: {}'.format(type(graph)))
self._control_outputs = {}
self._graph = graph
self._version = None
self._build()
def update(self):
'Update the control outputs if the graph has changed.'
if (self._version != self._graph.version):
self._build()
return self
def _build(self):
'Build the control outputs dictionary.'
self._control_outputs.clear()
ops = self._graph.get_operations()
for op in ops:
for control_input in op.control_inputs:
if (control_input not in self._control_outputs):
self._control_outputs[control_input] = []
if (op not in self._control_outputs[control_input]):
self._control_outputs[control_input].append(op)
self._version = self._graph.version
def get_all(self):
return self._control_outputs
def get(self, op):
'return the control outputs of op.'
if (op in self._control_outputs):
return self._control_outputs[op]
else:
return ()
@property
def graph(self):
return self._graph
|
def scope_finalize(scope):
if (scope and (scope[(- 1)] != '/')):
scope += '/'
return scope
|
def scope_dirname(scope):
slash = scope.rfind('/')
if (slash == (- 1)):
return ''
return scope[:(slash + 1)]
|
def scope_basename(scope):
slash = scope.rfind('/')
if (slash == (- 1)):
return scope
return scope[(slash + 1):]
|
def placeholder_name(t=None, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX):
'Create placeholder name for the graph editor.\n\n Args:\n t: optional tensor on which the placeholder operation\'s name will be based\n on\n scope: absolute scope with which to prefix the placeholder\'s name. None\n means that the scope of t is preserved. "" means the root scope.\n prefix: placeholder name prefix.\n Returns:\n A new placeholder name prefixed by "geph". Note that "geph" stands for\n Graph Editor PlaceHolder. This convention allows to quickly identify the\n placeholder generated by the Graph Editor.\n Raises:\n TypeError: if t is not None or a tf.Tensor.\n '
if (scope is not None):
scope = scope_finalize(scope)
if (t is not None):
if (not isinstance(t, tf_ops.Tensor)):
raise TypeError('Expected a tf.Tenfor, got: {}'.format(type(t)))
op_dirname = scope_dirname(t.op.name)
op_basename = scope_basename(t.op.name)
if (scope is None):
scope = op_dirname
if op_basename.startswith('{}__'.format(prefix)):
ph_name = op_basename
else:
ph_name = '{}__{}_{}'.format(prefix, op_basename, t.value_index)
return (scope + ph_name)
else:
if (scope is None):
scope = ''
return '{}{}'.format(scope, prefix)
|
def make_placeholder_from_tensor(t, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX):
'Create a `tf.compat.v1.placeholder` for the Graph Editor.\n\n Note that the correct graph scope must be set by the calling function.\n\n Args:\n t: a `tf.Tensor` whose name will be used to create the placeholder (see\n function placeholder_name).\n scope: absolute scope within which to create the placeholder. None means\n that the scope of `t` is preserved. `""` means the root scope.\n prefix: placeholder name prefix.\n\n Returns:\n A newly created `tf.compat.v1.placeholder`.\n Raises:\n TypeError: if `t` is not `None` or a `tf.Tensor`.\n '
return tf_array_ops.placeholder(dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(t, scope=scope, prefix=prefix))
|
def make_placeholder_from_dtype_and_shape(dtype, shape=None, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX):
'Create a tf.compat.v1.placeholder for the Graph Editor.\n\n Note that the correct graph scope must be set by the calling function.\n The placeholder is named using the function placeholder_name (with no\n tensor argument).\n\n Args:\n dtype: the tensor type.\n shape: the tensor shape (optional).\n scope: absolute scope within which to create the placeholder. None means\n that the scope of t is preserved. "" means the root scope.\n prefix: placeholder name prefix.\n\n Returns:\n A newly created tf.placeholder.\n '
return tf_array_ops.placeholder(dtype=dtype, shape=shape, name=placeholder_name(scope=scope, prefix=prefix))
|
def get_predefined_collection_names():
'Return all the predefined collection names.'
return [getattr(tf_ops.GraphKeys, key) for key in dir(tf_ops.GraphKeys) if (not _INTERNAL_VARIABLE_RE.match(key))]
|
def find_corresponding_elem(target, dst_graph, dst_scope='', src_scope=''):
'Find corresponding op/tensor in a different graph.\n\n Args:\n target: A `tf.Tensor` or a `tf.Operation` belonging to the original graph.\n dst_graph: The graph in which the corresponding graph element must be found.\n dst_scope: A scope which is prepended to the name to look for.\n src_scope: A scope which is removed from the original of `target` name.\n\n Returns:\n The corresponding tf.Tensor` or a `tf.Operation`.\n\n Raises:\n ValueError: if `src_name` does not start with `src_scope`.\n TypeError: if `target` is not a `tf.Tensor` or a `tf.Operation`\n KeyError: If the corresponding graph element cannot be found.\n '
src_name = target.name
if src_scope:
src_scope = scope_finalize(src_scope)
if (not src_name.startswidth(src_scope)):
raise ValueError('{} does not start with {}'.format(src_name, src_scope))
src_name = src_name[len(src_scope):]
dst_name = src_name
if dst_scope:
dst_scope = scope_finalize(dst_scope)
dst_name = (dst_scope + dst_name)
if isinstance(target, tf_ops.Tensor):
return dst_graph.get_tensor_by_name(dst_name)
if isinstance(target, tf_ops.Operation):
return dst_graph.get_operation_by_name(dst_name)
raise TypeError('Expected tf.Tensor or tf.Operation, got: {}', type(target))
|
def find_corresponding(targets, dst_graph, dst_scope='', src_scope=''):
'Find corresponding ops/tensors in a different graph.\n\n `targets` is a Python tree, that is, a nested structure of iterable\n (list, tupple, dictionary) whose leaves are instances of\n `tf.Tensor` or `tf.Operation`\n\n Args:\n targets: A Python tree containing `tf.Tensor` or `tf.Operation`\n belonging to the original graph.\n dst_graph: The graph in which the corresponding graph element must be found.\n dst_scope: A scope which is prepended to the name to look for.\n src_scope: A scope which is removed from the original of `top` name.\n\n Returns:\n A Python tree containin the corresponding tf.Tensor` or a `tf.Operation`.\n\n Raises:\n ValueError: if `src_name` does not start with `src_scope`.\n TypeError: if `top` is not a `tf.Tensor` or a `tf.Operation`\n KeyError: If the corresponding graph element cannot be found.\n '
def func(top):
return find_corresponding_elem(top, dst_graph, dst_scope, src_scope)
return transform_tree(targets, func)
|
class ForwardCallbackIface():
'\n Callback interface for the forward task.\n\n Define `forward_callback` in your config to an instance or class of this.\n\n https://github.com/rwth-i6/returnn/issues/1336\n '
def init(self, *, model):
'\n Run at the beginning.\n '
def process_seq(self, *, seq_tag: str, outputs: TensorDict):
'\n Called for each sequence, or entry in the dataset.\n This does not have the batch dim anymore.\n The values in `outputs` are Numpy arrays.\n\n :param seq_tag:\n :param outputs:\n '
def finish(self):
'\n Run at the end.\n '
|
class Backend(Generic[T]):
'\n Abstract base class for the backend, operating on tensor type T, i.e. :class:`Tensor[T]`.\n\n This class and instances do not have any state,\n and all functions are staticmethod (or classmethod).\n '
RawTensorType: Type[T]
is_tensorflow: bool = False
is_backend_raw_tensor_dim_tag_independent: bool = True
def __init__(self):
raise Exception('do not instantiate this class')
@staticmethod
def executing_eagerly() -> bool:
'\n :return: whether we are in eager execution mode\n '
raise NotImplementedError
@staticmethod
def get_tensor_dependencies(x: Tensor) -> Sequence[Tensor]:
'\n :param x: tensor\n :return: list of all tensors which are inputs to `x`, ancestor tensors, dependencies.\n E.g. :func:`tf.Tensor.op.inputs`.\n This mostly makes sense for graph-based frameworks\n but eager-based frameworks might have this too with enabled gradient tape,\n as they should know the inputs.\n '
raise NotImplementedError
@staticmethod
def get_tensor_consumers(x: Tensor) -> Sequence[Tensor]:
'\n :param x: tensor\n :return: list of all tensors depending on `x`, descendant tensors, used by.\n E.g. :func:`tf.Tensor.consumers`.\n This mostly makes sense for graph-based frameworks\n but eager-based frameworks might have this too with enabled gradient tape,\n as they should know the consumers.\n '
raise NotImplementedError
@staticmethod
def cond(pred: Tensor, true_fn: Callable, false_fn: Callable):
'\n cond: conditional execution.\n\n Note that this does not need an implementation for eager-based frameworks\n (:func:`executing_eagerly` returns True),\n as the :func:`returnn.frontend.cond` function already covers that case.\n '
assert (not pred._raw_backend.executing_eagerly()), 'should not get here'
raise NotImplementedError
@staticmethod
def while_loop(cond: Callable[([S], Union[(bool, Tensor)])], body: Callable[([S], S)], initial: S) -> S:
'while loop'
raise NotImplementedError
@staticmethod
def set_random_seed(seed: int):
'\n :param seed:\n '
raise NotImplementedError
@staticmethod
def get_random_state() -> Dict[(str, bytes)]:
'\n :return: random state\n '
raise NotImplementedError
@staticmethod
def set_random_state(state: Dict[(str, bytes)]):
'\n :param state: as returned by :func:`get_random_state`.\n This might not always be successful (e.g. different hardware, different backend version),\n so the calling code should always have called set_random_seed before to have the random generators\n in a reasonable fallback state.\n '
raise NotImplementedError
@staticmethod
def get_dtype_name_raw(raw_tensor: T) -> str:
'\n :return: dtype of raw tensor, as string\n '
raise NotImplementedError
@staticmethod
def as_dtype_raw(dtype_name: str) -> Any:
'\n :param dtype_name: e.g. "float32"\n :return: dtype object\n '
raise NotImplementedError
@staticmethod
def get_ndim_raw(raw_tensor: T) -> int:
'\n :return: ndim of raw tensor. assumes it is known\n '
raise NotImplementedError
@staticmethod
def get_shape_raw(raw_tensor: T) -> Union[(T, Tuple[Union[(int, T)]])]:
'\n :return: shape of raw tensor\n '
raise NotImplementedError
@staticmethod
def get_shape_tuple_raw(raw_tensor: T) -> Tuple[Union[(int, T)]]:
'\n :return: shape of raw tensor. assumes that ndim is known.\n In eager frameworks, all dims are int.\n '
raise NotImplementedError
@staticmethod
def get_known_shape_raw(raw_tensor: T) -> Tuple[Optional[int]]:
'\n :return: shape of raw tensor, int for static known, None otherwise. assumes that ndim is known.\n This will not create any ops.\n In eager frameworks, all dims are known.\n '
raise NotImplementedError
@staticmethod
def set_known_shape_raw(raw_tensor: T, shape: Tuple[Optional[int]]) -> None:
'\n Sets the known shape of the raw tensor.\n This is only supported in graph-based frameworks,\n and just performs a check in eager frameworks.\n '
@staticmethod
def get_new_dim_raw(raw_tensor: T, axis: int, *, name: str) -> Dim:
'\n :param raw_tensor:\n :param axis:\n :param name:\n :return: dim tag of axis\n '
raise NotImplementedError
@staticmethod
def get_device(x: Tensor) -> Optional[str]:
'\n :param x:\n :return: device, or none if unknown or logic not supported\n '
return None
@staticmethod
def copy_to_device(x: Tensor, device: Optional[str]) -> Tensor:
'\n :param x: tensor\n :param device: e.g. "cpu" or "gpu"\n :return: tensor on device\n '
return x
@staticmethod
def fill_raw(shape: Union[(Sequence[Union[(int, T)]], T)], value: Union[(Any, T)]) -> T:
'\n :param shape: shape\n :param value: scalar value to fill\n :return: raw tensor filled with value everywhere\n '
raise NotImplementedError
@staticmethod
def compare_raw(a: T, kind: str, b: T) -> T:
'\n :param a:\n :param kind: "equal", "less", "less_equal", "greater", "greater_equal", "not_equal"\n :param b:\n :return: a `kind` b\n '
raise NotImplementedError
@staticmethod
def combine_raw(a: T, kind: str, b: T) -> T:
'\n :param a:\n :param kind: "add", "sub", "mul", "truediv", "floordiv", "mod", "pow",\n "maximum", "minimum", "logical_and", "logical_or", "squared_difference"\n :param b:\n :return: a `kind` b\n '
raise NotImplementedError
@staticmethod
def reshape_raw(raw_tensor: T, shape: Union[(Sequence[Union[(int, T)]], T)]) -> T:
'\n :param raw_tensor: raw tensor\n :param shape: new shape\n :return: reshaped raw tensor\n '
raise NotImplementedError
@classmethod
def squeeze_raw(cls, raw_tensor: T, axes: Sequence[int]) -> T:
'\n :param raw_tensor: raw tensor\n :param axes: axes to squeeze\n :return: squeezed raw tensor\n '
known_shape = cls.get_known_shape_raw(raw_tensor)
assert all([(known_shape[axis] == 1) for axis in axes])
new_shape = [dim for (a, dim) in enumerate(cls.get_shape_tuple_raw(raw_tensor)) if (a not in axes)]
return cls.reshape_raw(raw_tensor, new_shape)
@staticmethod
def transpose_raw(raw_tensor: T, perm: Sequence[int]) -> T:
'\n :param raw_tensor: raw tensor\n :param perm: permutation\n :return: transposed raw tensor\n '
raise NotImplementedError
@staticmethod
def make_output_tensor(tensor: Tensor, dims: Sequence[Dim], *, name: str) -> Tensor:
'\n :param tensor:\n :param dims:\n :param name:\n :return: tensor with dims order like in dims\n '
assert (len(dims) == len(tensor.dims))
tensor = tensor.copy_compatible_to_dims(dims)
tensor = tensor.copy(name=name)
return tensor
@staticmethod
def expand_dims_raw(raw_tensor: T, axis: int) -> T:
'\n :param raw_tensor:\n :param axis:\n :return: raw tensor with new axis\n '
raise NotImplementedError
@staticmethod
def expand_raw(raw_tensor: T, axis: int, dim: Union[(int, T)]) -> T:
'\n :param raw_tensor:\n :param axis: shape[axis] must be 1\n :param dim: the new dim for shape[axis]\n :return: shape[axis] expands to dim.\n in PyTorch or other frameworks which support custom strides,\n this is an efficient view and not a copy.\n '
raise NotImplementedError
@staticmethod
def copy(tensor: Tensor) -> Tensor:
'copy'
raise NotImplementedError
@staticmethod
def cast_raw(raw_tensor: T, dtype: str) -> T:
'\n :param raw_tensor:\n :param dtype: e.g. "float32"\n :return: raw tensor with dtype casted\n '
raise NotImplementedError
@staticmethod
def cast(tensor: Tensor, dtype: str) -> Tensor:
'\n :param tensor:\n :param dtype: e.g. "float32"\n :return: tensor with dtype casted\n '
res = tensor.copy_template()
res.dtype = dtype
if res.sparse_dim:
if (dtype.startswith('int') or dtype.startswith('uint')):
pass
elif ((dtype == 'bool') and (res.sparse_dim.dimension == 2)):
pass
else:
res.sparse_dim = None
res.raw_tensor = tensor._raw_backend.cast_raw(tensor.raw_tensor, dtype)
return res
@staticmethod
def set_requires_gradient(tensor: Tensor):
'\n :param tensor:\n '
raise NotImplementedError
@staticmethod
def gradient(y: Tensor, x: Tensor) -> Tensor:
'\n :param y:\n :param x:\n :return: gradient of y w.r.t. x\n '
raise NotImplementedError
@staticmethod
def stop_gradient(tensor: Tensor) -> Tensor:
'\n :param tensor:\n :return: tensor with stopped gradient\n '
raise NotImplementedError
@staticmethod
def scaled_gradient(tensor: Tensor, scale: Union[(float, Tensor)]) -> Tensor:
'\n :param tensor:\n :param scale:\n :return: tensor with scaled gradient\n '
raise NotImplementedError
@staticmethod
def scaled_gradient_ext(x: Tensor, *, scale: Union[(float, Tensor)]=1.0, shift: Optional[Union[(float, Tensor)]]=None, scale_shift_by_sum_over_axis: Optional[Dim]=None):
'\n :param x:\n :param scale: will scale gradient by this value\n :param shift: will shift gradient by this value\n :param scale_shift_by_sum_over_axis: if given, will scale and shift by the sum over the given axis\n :return: just x, but gradient in backward pass will be transformed accordingly\n '
raise NotImplementedError
@staticmethod
def merge_dims(source: Tensor, *, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Merges a list of axes into a single one. (Flatten the dims.)\n E.g. input is (batch, width, height, dim) and dims=(width,height), then we get (batch, width*height, dim).\n Or input is (batch, time, height, dim) and axes=(height,dim), then we get (batch, time, height*dim).\n\n :param source:\n :param dims:\n :param out_dim:\n :return: tensor, out_dim\n '
raise NotImplementedError
@staticmethod
def split_dims(source: Tensor, *, axis: Dim, dims: Sequence[Dim], pad_to_multiples: Optional[bool]=None, pad_value: Union[(None, int, float)]=None) -> Tensor:
'\n :param source:\n :param axis:\n :param dims:\n :param pad_to_multiples:\n :param pad_value:\n :return: source with axis replaced by dims\n '
raise NotImplementedError
@staticmethod
def reshape(source: Tensor, in_dims: Sequence[Dim], out_dims: Sequence[Dim]) -> Tensor:
'\n :param source: e.g. (..., old_dims, ...)\n :param in_dims: the old dims which should be reshaped into new_dims.\n This should only cover those dims which should be reshaped,\n not all the dims of the source.\n :param out_dims: the new dims which should be reshaped from old_dims.\n This is excluding any of the other dims in the source.\n :return: e.g. (..., new_dims, ...)\n '
raise NotImplementedError
@staticmethod
def split(source: Tensor, *, axis: Dim, out_dims: Sequence[Dim]) -> Tuple[(Tensor, ...)]:
'\n Split the input on the specified axis (by default feature).\n Basically a wrapper around tf.split.\n\n :param source: {..., axis}\n :param axis: some static axis\n :param out_dims: list of dims where sum(out_dims) == axis\n :return: tuple of tensors, same amount as out_dims,\n with the same shape as source, but with the specified axis replaced by the out_dims\n '
raise NotImplementedError
@staticmethod
def expand_dim(source: Tensor, dim: Dim) -> Tensor:
'\n :param source:\n :param dim:\n :return: source with dim added\n '
raise NotImplementedError
@staticmethod
def squeeze(source: Tensor, axis: Dim) -> Tensor:
'\n :param source:\n :param axis:\n :return: source with axis removed\n '
raise NotImplementedError
@staticmethod
def concat(*sources: Tuple[(Tensor, Dim)], allow_broadcast: bool=False, out_dim: Dim) -> Tensor:
'concat'
raise NotImplementedError
@staticmethod
def pad(source: Tensor, *, axes: Sequence[Dim], padding: Sequence[Tuple[(Union[(Dim, int)], Union[(Dim, int)])]], out_dims: Sequence[Dim], mode: str='constant', value: Optional[Union[(rf.RawTensorTypes, Tensor)]]=None) -> Tensor:
'\n :param source:\n :param axes:\n :param padding:\n :param out_dims:\n :param mode:\n :param value:\n :return: padded tensor\n '
raise NotImplementedError
@staticmethod
def cum_concat_step(source: Tensor, *, prev_accum: Tensor, axis: Dim, out_spatial_dim: Dim) -> Tensor:
'\n Concatenates all previous frames over a time-axis.\n See RETURNN :class:`CumConcatLayer` for details.\n\n :param source: same dims as prev_accum except for the accum axis\n :param prev_accum: previous accumulated tensor, shape {..., axis}\n :param axis: the axis to accumulate over\n :param out_spatial_dim: the spatial dim of the output will be this dim. like axis+1.\n :return: accumulated. accumulated shape {..., out_spatial_dim},\n same shape as prev_accum with axis replaced by out_spatial_dim.\n '
raise NotImplementedError
_AllowedActivationFuncs = {'exp', 'expm1', 'log', 'log1p', 'sqrt', 'rsqrt', 'square', 'abs', 'tanh', 'sigmoid', 'log_sigmoid', 'sin', 'cos', 'ceil', 'floor', 'round', 'relu', 'elu', 'selu', 'silu', 'logical_not', 'neg', 'reciprocal'}
@staticmethod
def activation(tensor: Tensor, func: str) -> Tensor:
'\n :param tensor:\n :param func: "tanh", "sigmoid", "relu", ...\n :return: tensor with elementwise activation applied\n '
out = tensor.copy_template(name=func)
out_raw = tensor._raw_backend.activation_raw(tensor.raw_tensor, func)
out.dtype = tensor._raw_backend.get_dtype_name_raw(out_raw)
out.raw_tensor = out_raw
return out
@staticmethod
def activation_raw(raw_tensor: T, func: str) -> T:
'\n :param raw_tensor:\n :param func: "tanh", "sigmoid", "relu", ...\n :return: raw tensor with elementwise activation applied\n '
raise NotImplementedError
@staticmethod
def safe_log(tensor: Tensor, *, eps: float) -> Tensor:
'\n :param tensor:\n :param eps:\n :return: log(tensor + eps) in the default case. but some backends might do more things,\n like if tensor = softmax(logits), then this would be log_softmax(logits) instead.\n '
return rf.log(rf.maximum(tensor, eps))
@staticmethod
def softmax(tensor: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor:
'\n :param tensor:\n :param axis:\n :param use_mask:\n :return: softmax over axis\n '
raise NotImplementedError
@staticmethod
def log_softmax(tensor: Tensor, *, axis: Dim, use_mask: bool=True) -> Tensor:
'\n :param tensor:\n :param axis:\n :param use_mask:\n :return: log_softmax over axis\n '
raise NotImplementedError
@staticmethod
def softmax_cross_entropy_with_logits(*, logits: Tensor, targets: Tensor, axis: Dim):
"\n Efficient cross entropy.\n\n :param logits: target estimates given as inputs to softmax (i.e. unnormalized)\n :param targets: probabilities, i.e. normalized, can also be sparse\n :param axis: class labels dim over which softmax is computed\n :return: cross entropy (same Dims as 'logits' but without 'axis')\n "
raise NotImplementedError
@staticmethod
def ctc_loss(*, logits: Tensor, targets: Tensor, input_spatial_dim: Dim, targets_spatial_dim: Dim, blank_index: int, max_approx: bool=False) -> Tensor:
'\n Calculates the CTC loss.\n '
raise NotImplementedError
@staticmethod
def have_sequence_mask_raw() -> bool:
'\n :return: whether we have a sequence_mask_raw implementation\n '
return False
@staticmethod
def sequence_mask_raw(lengths: T, *, batch_major: bool=True) -> T:
'\n Like tf.sequence_mask().\n\n :param lengths: shape (batch,)\n :param batch_major:\n :return: tensor mask of shape (batch,maxlen) if batch_major else (maxlen,batch) of type bool\n '
raise NotImplementedError
@staticmethod
@contextlib.contextmanager
def name_scope_raw(name: str) -> Any:
'\n Default implementation for eager-based frameworks:\n Do nothing, tensors do not have a name.\n\n :param name:\n :return: context manager\n '
(yield)
@staticmethod
@contextlib.contextmanager
def control_dependencies_raw(dependencies: Sequence[Any]) -> Any:
'\n Default implementation for eager-based frameworks:\n Do nothing, we expect that the dependencies are already executed.\n\n :param dependencies: raw tensors or ops\n :return: context manager\n '
(yield)
@staticmethod
def identity_with_control_dependencies_raw(raw_tensor: T, dependencies: Sequence[Any]) -> T:
'\n Default implementation for eager-based frameworks:\n Do nothing, we expect that the dependencies are already executed.\n\n :param raw_tensor: raw tensor\n :param dependencies: raw tensors or ops\n :return: raw tensor\n '
return raw_tensor
@staticmethod
def create_placeholder_raw(tensor: Tensor) -> T:
'\n :return: tf.placeholder in TF\n\n This is really only for TensorFlow for the deprecated option auto_create_placeholders\n and should not be used in other backends,\n even in graph-based backends.\n Rather, the logic to create placeholders should be done elsewhere.\n '
raise Exception('create_placeholder not supported by backend')
@staticmethod
def create_parameter_raw(tensor: rf.Parameter, *, device: Optional[str]=None) -> T:
'\n :return: parameter (by default trainable)\n '
raise NotImplementedError
@staticmethod
def set_parameter_initial_value(param: rf.Parameter, value: Union[(None, Tensor, rf.RawTensorTypes)]) -> None:
'\n :param param: parameter\n :param value: initial value\n '
raise NotImplementedError
@staticmethod
def set_parameter_trainable(param: rf.Parameter, trainable: bool) -> None:
'\n :param param: parameter\n :param trainable: whether the parameter should be trainable\n '
raise NotImplementedError
@staticmethod
def parameter_assign(param: rf.Parameter, value: Tensor, *, op: str='assign') -> None:
'\n :param param: parameter\n :param value: new value\n :param op: "assign" or "add"\n '
raise NotImplementedError
@staticmethod
def parameter_assign_key(param: rf.Parameter, key: ItemKeyType, value: Tensor, *, op: str='assign', axis: Optional[Union[(Dim, Sequence[Dim])]]=None, key_dim: Union[(None, Dim, Sequence[Union[(None, Dim)]])]=None) -> None:
'\n :param param: parameter\n :param key: optional key for slice assign, like var[key] = value or var[key] += value.\n :param value: new value\n :param op: "assign" or "add"\n :param axis: if key is given, this axis is used.\n if key are indices (without specified sparse_dim), axis must be specified.\n :param key_dim: resulting dim after slicing with key\n '
raise NotImplementedError
@staticmethod
def runtime_sanity_checks(tensor: Tensor) -> Any:
'\n Checks whether the tensor.raw_tensor is consistent with the tensor metadata.\n\n In graph-based frameworks (TF graph), we return some operation here.\n In eager frameworks, we would not return anything but instead directly perform the checks.\n '
pass
@staticmethod
def is_valid_in_current_graph(tensor: Tensor) -> bool:
'\n :return: whether the raw tensor is valid in the current graph.\n In eager-mode frameworks, this is always true -- there is no graph.\n '
return True
@staticmethod
def format_graph_output(raw_tensor: T, *, max_depth: Optional[int]=None) -> str:
'\n :return: the computation graph leading to this tensor formatted.\n In eager-mode frameworks, this is not supported and returns None.\n '
return '<no-graph>'
@staticmethod
def convert_to_tensor(value: Union[(Tensor, T, RawTensorTypes)], *, dims: Sequence[Dim], dtype: str, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, name: Optional[str]=None) -> Tensor[T]:
'\n :param value: tensor, or scalar raw tensor or some other scalar value\n :param dims:\n :param dtype:\n :param sparse_dim:\n :param device:\n :param name:\n :return: tensor\n '
raise NotImplementedError
@staticmethod
def full(dims: Sequence[Dim], fill_value: Union[(RawTensorTypes, Tensor)], *, dtype: str, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor:
'\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.full.html\n\n :param dims:\n :param fill_value:\n :param dtype:\n :param device:\n :param sparse_dim:\n :param feature_dim:\n :return: tensor\n '
raise NotImplementedError
@classmethod
def compare(cls, a: Union[(Tensor, RawTensorTypes)], kind: str, b: Union[(Tensor, RawTensorTypes)], *, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None) -> Tensor:
'compare, default implementation using compare_raw'
from . import _utils
(out, a_raw, b_raw) = _utils.bin_op_out_template(cls, a, b, name=kind, copy_sparse_dim=False, allow_broadcast_all_sources=allow_broadcast_all_sources, dim_order=dim_order)
out_raw = cls.compare_raw(a_raw, kind, b_raw)
out.dtype = cls.get_dtype_name_raw(out_raw)
out.raw_tensor = out_raw
return out
@classmethod
def combine(cls, a: Union[(Tensor, RawTensorTypes)], kind: str, b: Union[(Tensor, RawTensorTypes)], *, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None) -> Tensor:
'combine, default implementation using combine_raw'
from . import _utils
(out, a_raw, b_raw) = _utils.bin_op_out_template(cls, a, b, name=kind, allow_broadcast_all_sources=allow_broadcast_all_sources, dim_order=dim_order)
out_raw = cls.combine_raw(a_raw, kind, b_raw)
out.dtype = cls.get_dtype_name_raw(out_raw)
out.raw_tensor = out_raw
return out
@staticmethod
def gather(source: Tensor, *, indices: Union[(Tensor, int)], axis: Dim, clip_to_valid: bool=False) -> Tensor:
'\n Gathers slices on a specified axis from the source using indices.\n If the source is of the shape ``[B,D,F1]``, and indices of shape ``[B,F2]``,\n this will yield output of the shape ``[B,F2,F1]`` where\n\n ``output[b,f2,f1] = source[b,indices[b,f2],f1]``\n\n (if ``D`` is the axis to gather from).\n In general, all shared axes of the input and the positions will be considered as batch-axes.\n\n The ``indices`` argument can also be an ``int``.\n In this case, this simply gives ``source[indices]`` on the specified ``axis``.\n\n :param source:\n :param indices: indices used to select the slices of the source from.\n If another tensor, must be of type ``int32`` or ``int64``.\n Can also specify a constant ``int``.\n :param axis: The axis into which we gather the indices into\n :param clip_to_valid: if True, the indices will be clipped to the valid range of the input\n Also taking seq lengths into account.\n :return: gathered values\n '
raise NotImplementedError
@staticmethod
def scatter(source: Tensor, *, indices: Tensor, indices_dim: Union[(Dim, Sequence[Dim])], out_dim: Union[(Dim, Sequence[Dim])]) -> Tensor:
'\n Scatters into new zero-tensor.\n If entries in indices are duplicated, the corresponding values in source will be added together\n (scatter_add in PyTorch).\n (TF segment_sum can be implemented via this.)\n\n :param source: [batch_dims..., indices_dim(s)..., feature_dims...]\n :param indices: [batch_dims..., indices_dim(s)...] -> out_dim\n :param indices_dim:\n :param out_dim:\n :return: [batch_dims..., out_dim, feature_dims...]\n '
raise NotImplementedError
@staticmethod
def slice(source: Tensor, *, axis: Dim, start: Optional[Union[(int, Tensor)]]=None, end: Optional[Union[(int, Tensor)]]=None, step: Optional[Union[(int, Tensor)]]=None, size: Optional[Union[(int, Tensor, Dim)]]=None, out_dim: Dim) -> Tensor:
'slice'
raise NotImplementedError
@staticmethod
def where(cond: Tensor, true_: Union[(Tensor, rf.RawTensorTypes)], false_: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor:
'where'
raise NotImplementedError
@staticmethod
def clip_by_value(x: Tensor, clip_value_min: Union[(Tensor, rf.RawTensorTypes)], clip_value_max: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor:
'clip by value'
raise NotImplementedError
@staticmethod
def matmul(a: Tensor[T], b: Tensor[T], *, reduce: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
"\n This performs a batched matmul of two sources a and b\n (non-batched matmul and dot product are special cases).\n The underlying operation is a batched matmul (shared..., I, J) * (shared..., J, K) -> (shared..., I, K).\n The inputs a and b are transformed internally into the required shapes in the following way:\n The axis J is specified via the Dim given as 'reduce'. If multiple reduce Dims are given the corresponding axes\n are merged into one before the matmul via a reshape. All other matching Dims in a and b will be treated as\n batch dimensions ('shared...'). Dims unique to a and b define the axes I and K, respectively. (Multiple or no\n unique axes in a and b are supported too.)\n\n Depending on which Dims exist in a, b and reduce this dot operation can be used to compute scaling, scalar\n product, outer product, matrix-vector multiplication, matrix-matrix multiplication etc. (all possibly batched).\n\n :param a:\n :param b:\n :param reduce: Dims over which to perform the product, have to be present in both a and b\n :param use_mask: If the reduction is over dynamic axes, to get the correct sum reduction,\n we need to apply masking to one of the inputs. This is done automatically.\n By disabling this flag, this would be disabled.\n :return: result of dot product, Dim order: common axes as sorted in a, unique axes of a (in order),\n unique axes of b (in order)\n "
raise NotImplementedError
@staticmethod
def range_over_dim(dim: Dim, *, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor[T]:
'\n :param dim:\n :param dtype:\n :param device:\n :return: tensor with shape [dim]\n '
raise NotImplementedError
@staticmethod
def replace_dim(source: Tensor, *, in_dim: Dim, out_dim: Dim) -> Tensor:
'\n :param source:\n :param in_dim:\n :param out_dim:\n :return: source with in_dim replaced by out_dim.\n '
if (not out_dim.is_dim_known()):
out_dim.copy_from(in_dim)
out = source.copy_template_replace_dim_tag(axis=source.get_axis_from_description(in_dim), new_dim_tag=out_dim, name='replace_dim')
out.raw_tensor = source.raw_tensor
return out
_AllowedReduceModes = {'sum', 'max', 'min', 'mean', 'logsumexp', 'any', 'all', 'argmin', 'argmax'}
@staticmethod
def reduce(source: Tensor[T], *, mode: str, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]:
'\n Reduce the tensor along the given axis\n\n :param source:\n :param mode: "sum", "max", "min", "mean", "logsumexp", "any", "all", "argmin", "argmax"\n :param axis:\n :param use_mask: if True (default), use the time mask (part of dim tag) to ignore padding frames\n :return: tensor with axis removed\n '
raise NotImplementedError
@staticmethod
def top_k(source: Tensor, *, axis: Union[(Dim, Sequence[Dim])], k: Union[(int, Tensor)], k_dim: Optional[Dim]=None, sorted: bool=True) -> Tuple[(Tensor, Union[(Tensor, Sequence[Tensor])], Dim)]:
'top_k. see :func:`top_k`'
raise NotImplementedError
@staticmethod
def random(*, dims: Sequence[Dim], dtype: str, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, distribution: str, mean: Optional[Union[(int, float, Tensor)]]=None, stddev: Optional[Union[(int, float, Tensor)]]=None, bound: Optional[Union[(int, float, Tensor)]]=None, minval: Optional[Union[(int, float, Tensor)]]=None, maxval: Optional[Union[(int, float, Tensor)]]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None) -> Tensor:
'\n random. See `rf.random` for details.\n '
raise NotImplementedError
@staticmethod
def masked_select(tensor: Tensor, *, mask: Tensor, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n :param tensor:\n :param mask:\n :param dims: the order of the dims defines the format. those dims should be exactly the dims of the mask.\n :param out_dim:\n :return: tensor where all dims in mask/dims are removed and replaced by a new dim.\n the new dim is also returned.\n if mask==True for all elements, the returned tensor would be simply the flattened input tensor.\n '
raise NotImplementedError
@staticmethod
def masked_scatter(source: Tensor, *, mask: Tensor, dims: Sequence[Dim], in_dim: Dim) -> Tensor:
'\n The inverse of :func:`masked_select`.\n\n :param source: [in_dim, F...]\n :param mask: [dims...] -> bool (e.g. [B,T])\n :param dims: the order of the dims defines the format. those dims should be exactly the dims of the mask.\n :param in_dim: the dim of the source which should be scattered into the mask.\n :return: [dims..., F...]\n '
raise NotImplementedError
@staticmethod
def batch_norm(source: Tensor, *, in_dim: Union[(Dim, Sequence[Dim])], running_mean: Tensor, running_variance: Tensor, gamma: Optional[Tensor], beta: Optional[Tensor], epsilon: float, momentum: float, affine: bool, use_mask: bool) -> Tensor:
'\n :param source:\n :param in_dim:\n :param running_mean:\n :param running_variance:\n :param gamma:\n :param beta:\n :param epsilon:\n :param momentum:\n :param affine:\n :param use_mask:\n :return:\n '
raise NotImplementedError
@staticmethod
def conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, strides: Optional[Union[(int, Sequence[int])]]=None, dilation_rate: Optional[Union[(int, Sequence[int])]]=None, groups: Optional[int]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]:
'convolution'
raise NotImplementedError
@staticmethod
def transposed_conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, remove_padding: Union[(Sequence[int], int)]=0, output_padding: Optional[Union[(Sequence[Optional[int]], int)]]=None, strides: Optional[Sequence[int]]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]:
'transposed convolution'
raise NotImplementedError
@staticmethod
def pool(source: Tensor, *, mode: str, pool_size: Sequence[int], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Sequence[int], in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]:
'pooling'
raise NotImplementedError
@staticmethod
def stft(x: Tensor, *, in_spatial_dim: Dim, frame_step: int, frame_length: int, fft_length: int, window_use_frame_length: bool=True, align_window_left: bool=True, window_enforce_even: bool=True, out_spatial_dim: Dim, out_dim: Dim) -> Tensor:
'stft. see :func:`stft` for details.'
raise NotImplementedError
@staticmethod
def lstm(source: Tensor, *, state_h: Tensor, state_c: Tensor, ff_weight: Tensor, rec_weight: Tensor, bias: Tensor, spatial_dim: Dim, in_dim: Dim, out_dim: Dim) -> Tuple[(Tensor, Tuple[(Tensor, Tensor)])]:
'\n Functional LSTM.\n\n :param source: Tensor of shape [*, in_dim].\n :param state_c:\n :param state_h:\n :param ff_weight: Parameters for the weights of the feed-forward part.\n :param rec_weight: Parameters for the weights of the recurrent part.\n :param bias: Parameters for the bias.\n :param spatial_dim: Dimension in which the LSTM operates.\n :param in_dim:\n :param out_dim:\n :return: output, (state_h, state_c)\n '
raise NotImplementedError
TensorArrayType = List[Tensor]
@classmethod
def tensor_array_create(cls) -> TensorArrayType:
'\n :return: empty TensorArray\n '
if cls.executing_eagerly():
return []
raise NotImplementedError
@staticmethod
def tensor_array_unstack(tensor: Tensor, *, axis: Dim) -> TensorArrayType:
'\n :param tensor:\n :param axis:\n :return: list of tensors\n '
raise NotImplementedError
@staticmethod
def tensor_array_stack(tensor_array: TensorArrayType, *, axis: Dim, tensor_template: Tensor) -> Tensor:
'\n :param tensor_array:\n :param axis:\n :param tensor_template: per element shape, excluding axis\n :return: tensor\n '
raise NotImplementedError
@classmethod
def tensor_array_push_back(cls, tensor_array: TensorArrayType, value: Tensor) -> TensorArrayType:
'\n :param tensor_array:\n :param value:\n :return: tensor_array\n '
if cls.executing_eagerly():
tensor_array.append(value)
return tensor_array
raise NotImplementedError
@classmethod
def tensor_array_get_item(cls, tensor_array: TensorArrayType, index: Union[(int, Tensor)]) -> Tensor:
'\n :param tensor_array:\n :param index:\n :return: tensor\n '
if cls.executing_eagerly():
if isinstance(index, Tensor):
assert (index.dims == ()), f'index {index} must be scalar'
index = int(index.raw_tensor)
return tensor_array[index]
raise NotImplementedError
|
def select_backend_tf():
'\n Selects the RETURNN layers backend (based on TF).\n '
import tensorflow as tf
backend = get_backend_by_raw_tensor_type(tf.Tensor)
global_backend.__class__ = backend
BehaviorVersion.set_min_behavior_version(16)
|
def select_backend_returnn_layers_tf():
'\n Selects the RETURNN layers backend (based on TF).\n '
from returnn.tf.frontend_layers import Layer
backend = get_backend_by_raw_tensor_type(Layer)
global_backend.__class__ = backend
|
def select_backend_torch():
'\n Selects the PyTorch (low-level) backend.\n '
import torch
backend = get_backend_by_raw_tensor_type(torch.Tensor)
global_backend.__class__ = backend
BehaviorVersion.set_min_behavior_version(16)
from returnn.frontend import _native
_native.setup()
_native.setup_torch()
|
def get_backend_by_tensor(tensor: Tensor, *, fallback: Optional[T2]=None) -> Union[(Type[Backend[T]], T2)]:
'\n :param tensor:\n :param fallback:\n '
if (fallback and (tensor.raw_tensor is None)):
return fallback
assert (tensor.raw_tensor is not None)
return get_backend_by_raw_tensor_type(type(tensor.raw_tensor))
|
def get_backend_by_raw_tensor_type(tensor_type: Type[T]) -> Union[Type[Backend[T]]]:
'\n :param tensor_type:\n '
if (tensor_type in _backend_tensor_type_dispatch_table):
return _backend_tensor_type_dispatch_table[tensor_type]
if (not isinstance(tensor_type, type)):
raise TypeError(f'Expected type, got {tensor_type!r} of type {type(tensor_type)}')
tensor_type: Type[T]
for base_type in tensor_type.__mro__:
if (base_type in _backend_tensor_type_dispatch_table):
register_backend_by_tensor_type(tensor_type, _backend_tensor_type_dispatch_table[base_type])
return _backend_tensor_type_dispatch_table[base_type]
for base_type in tensor_type.__mro__:
if (base_type.__module__.split('.')[0] == 'tensorflow'):
from returnn.tf.frontend_low_level import TFBackend
backend_type = TFBackend
tensor_types = _get_tensor_types_tf()
elif (base_type.__module__.split('.')[0] == 'torch'):
from returnn.torch.frontend import TorchBackend
backend_type = TorchBackend
tensor_types = _get_tensor_types_torch()
elif base_type.__module__.startswith('returnn.tf.frontend_layers.'):
from returnn.tf.frontend_layers import ReturnnLayersBackend, Layer
backend_type = ReturnnLayersBackend
tensor_types = (Layer,)
elif issubclass(base_type, numpy.ndarray):
from ._numpy_backend import NumpyBackend
backend_type = NumpyBackend
tensor_types = (numpy.ndarray,)
else:
continue
assert any((issubclass(base_type, type_) for type_ in tensor_types)), f'tensor type {tensor_type} base_type {base_type} not in {tensor_types}, expected for backend {backend_type}'
for base_type_ in tensor_types:
register_backend_by_tensor_type(base_type_, backend_type)
return backend_type
raise TypeError(f'unknown tensor type {tensor_type} with mro {tensor_type.__mro__}')
|
def register_backend_by_tensor_type(tensor_type: Type[T], backend: Type[Backend[T]]):
'\n :param tensor_type:\n :param backend:\n '
_backend_tensor_type_dispatch_table[tensor_type] = backend
|
def _get_tensor_types_tf():
'\n :return: tuple of relevant tensor types in TF.\n Note that it is not so important to cover all, as we also check issubclass as a fallback.\n '
import tensorflow as tf
ls = [tf.Tensor, tf.Variable]
return tuple(ls)
|
def _get_tensor_types_torch():
'\n :return: tuple of relevant tensor types in PyTorch.\n Note that it is not so important to cover all, as we also check issubclass as a fallback.\n '
import torch
ls = [torch.Tensor, torch.nn.Parameter]
return tuple(ls)
|
def get_module(*, verbose: bool=False):
'\n :return: native Python extension module\n '
global _module
if (_module and (not verbose)):
return _module
src_code = ''
for fn in sorted(glob((_my_dir + '/*.hpp'))):
src_code += f'''// {os.path.basename(fn)} code hash md5: {_code_hash_md5(fn)}
'''
for fn in sorted(glob((_my_dir + '/*.cpp'))):
src_code += f'''// {os.path.basename(fn)} code hash md5: {_code_hash_md5(fn)}
'''
src_code += f'''#include "{os.path.basename(fn)}"
'''
if (os.environ.get('RETURNN_TEST') == '1'):
src_code = (textwrap.dedent(' #define DEBUG 1\n #ifdef NDEBUG\n #undef NDEBUG\n #endif\n\n ') + src_code)
verbose = True
compiler = PyExtModCompiler(base_name='_returnn_frontend_native', code_version=1, code=src_code, include_paths=(_my_dir,), is_cpp=True, verbose=verbose)
module = compiler.load_py_module()
if (not _module):
_module = module
return _module
|
def _code_hash_md5(filename: str) -> str:
f_code = open(filename).read()
h = hashlib.md5()
h.update(f_code.encode('utf8'))
return h.hexdigest()
|
def setup():
'\n Setup the native code.\n '
global _is_set_up
if _is_set_up:
return
_is_set_up = True
from returnn.tensor import Tensor, Dim
from returnn.tensor.tensor import _TensorOpOverloadsMixin, _TensorMixin
from returnn.tensor.dim import _DimMixin
Tensor.raw_tensor = property(Tensor._raw_tensor.__get__, Tensor.raw_tensor.__set__)
_TensorMixin.placeholder = Tensor.raw_tensor
Tensor.dims = property(Tensor._dims.__get__)
_TensorMixin.dim_tags = Tensor.dims
_DimMixin.dimension = property(Dim.size.__get__)
try:
mod = get_module()
except Exception as exc:
if (os.environ.get('RETURNN_TEST') == '1'):
raise
print('RETURNN frontend _native backend: Error while getting module:')
print(exc)
print('This is optional (although very recommended), so we continue without it.')
return
Tensor.raw_tensor = property(Tensor._raw_tensor.__get__, mod.tensor_raw_tensor_setter)
_TensorMixin.placeholder = Tensor.raw_tensor
_TensorMixin._raw_backend = property(mod.get_backend_for_tensor)
for (name, cur_func) in _TensorOpOverloadsMixin.__dict__.items():
if (not callable(cur_func)):
continue
assert (name.startswith('__') and name.endswith('__'))
native_func = getattr(mod, (('_tensor_' + name[2:(- 2)]) + '_instancemethod'))
assert callable(native_func)
setattr(_TensorOpOverloadsMixin, name, native_func)
for (rf_name, native_name) in {'copy': 'tensor_copy', 'copy_template': 'tensor_copy_template', 'get_out_permutation_to_dims': 'tensor_get_out_permutation_to_dims', 'copy_compatible_to_dims': 'tensor_copy_compatible_to_dims', 'copy_compatible_to_dims_raw': 'tensor_copy_compatible_to_dims_raw'}.items():
assert hasattr(_TensorMixin, rf_name)
native_func = getattr(mod, (('_' + native_name) + '_instancemethod'))
setattr(_TensorMixin, rf_name, native_func)
import returnn.frontend as rf
from returnn.frontend import math_ as rf_math
for (rf_name, native_name) in {'compare': 'tensor_compare', 'combine': 'tensor_combine', 'equal': 'tensor_eq', 'not_equal': 'tensor_ne', 'less': 'tensor_lt', 'less_equal': 'tensor_le', 'greater': 'tensor_gt', 'greater_equal': 'tensor_ge', 'add': 'tensor_add', 'sub': 'tensor_sub', 'mul': 'tensor_mul', 'true_divide': 'tensor_truediv', 'floor_divide': 'tensor_floordiv', 'neg': 'tensor_neg', 'mod': 'tensor_mod', 'pow': 'tensor_pow', 'logical_and': 'tensor_and', 'logical_or': 'tensor_or', 'logical_not': 'tensor_invert', 'abs': 'tensor_abs', 'ceil': 'tensor_ceil', 'floor': 'tensor_floor'}.items():
assert hasattr(rf, rf_name)
assert hasattr(rf_math, rf_name)
native_func = getattr(mod, native_name)
setattr(rf, rf_name, native_func)
setattr(rf_math, rf_name, native_func)
|
def setup_torch():
'\n Like :func:`setup`, but specifically for the PyTorch backend.\n This assumes that we can `import torch`, unlike :func:`setup`.\n '
global _is_set_up_torch
if _is_set_up_torch:
return
_is_set_up_torch = True
import torch
try:
mod = get_module()
except Exception:
if (os.environ.get('RETURNN_TEST') == '1'):
raise
return
from returnn.torch.frontend import TorchBackend
TorchBackend.executing_eagerly = True .__bool__
TorchBackend.get_dtype_name_raw = mod.raw_torch_tensor_get_dtype
TorchBackend.get_ndim_raw = staticmethod(torch.Tensor.dim)
TorchBackend.expand_dims_raw = staticmethod(torch.unsqueeze)
TorchBackend.reshape_raw = staticmethod(torch.reshape)
|
class NumpyBackend(Backend[numpy.ndarray]):
'Numpy backend'
RawTensorType = numpy.ndarray
@staticmethod
def executing_eagerly() -> bool:
'executing eagerly'
return True
@staticmethod
def get_dtype_name_raw(raw_tensor: numpy.ndarray) -> str:
'\n :return: dtype of raw tensor, as string. e.g. "int64" etc.\n '
dtype_name = raw_tensor.dtype.name
if dtype_name.startswith('str'):
return 'string'
return dtype_name
@staticmethod
def as_dtype_raw(dtype_name: str) -> numpy.dtype:
'\n :param dtype_name: e.g. "float32"\n :return: dtype object\n '
return numpy.dtype(dtype_name)
@staticmethod
def get_ndim_raw(raw_tensor: numpy.ndarray) -> int:
'\n :return: ndim of raw tensor. assumes it is known\n '
return raw_tensor.ndim
@staticmethod
def get_shape_raw(raw_tensor: numpy.ndarray) -> Tuple[int]:
'\n :return: shape of raw tensor\n '
return raw_tensor.shape
@staticmethod
def get_shape_tuple_raw(raw_tensor: numpy.ndarray) -> Tuple[int]:
'\n :return: shape of raw tensor. assumes that ndim is known.\n In eager frameworks, all dims are int.\n '
return raw_tensor.shape
@staticmethod
def get_known_shape_raw(raw_tensor: numpy.ndarray) -> Tuple[int]:
'\n :return: shape of raw tensor, int for static known, None otherwise. assumes that ndim is known.\n This will not create any ops.\n In eager frameworks, all dims are known.\n '
return raw_tensor.shape
@staticmethod
def expand_dims_raw(raw_tensor: numpy.ndarray, axis: int) -> numpy.ndarray:
'\n :param raw_tensor:\n :param axis: e.g. 1\n :return: raw tensor with new axis\n '
return numpy.expand_dims(raw_tensor, axis)
@staticmethod
def transpose_raw(raw_tensor: numpy.ndarray, perm: Sequence[int]) -> numpy.ndarray:
'\n :param raw_tensor:\n :param perm: e.g. [0, 2, 1]\n :return: permuted (transposed) raw tensor\n '
if all(((p == i) for (i, p) in enumerate(perm))):
return raw_tensor
return raw_tensor.transpose(tuple(perm))
@staticmethod
def reshape_raw(raw_tensor: numpy.ndarray, shape: Union[(Sequence[Union[(int, numpy.ndarray)]], numpy.ndarray)]) -> numpy.ndarray:
'reshape raw'
return numpy.reshape(raw_tensor, shape)
@staticmethod
def compare_raw(a: numpy.ndarray, kind: str, b: numpy.ndarray) -> numpy.ndarray:
'\n :param a:\n :param kind: "equal", "less", "less_equal", "greater", "greater_equal", "not_equal"\n :param b:\n :return: a `kind` b\n '
assert ((a.ndim == b.ndim) or (a.ndim == 0) or (b.ndim == 0))
op = getattr(numpy, kind)
return op(a, b)
@staticmethod
def combine_raw(a: numpy.ndarray, kind: str, b: numpy.ndarray) -> numpy.ndarray:
'\n :param a:\n :param kind: "add", "sub", "mul", "truediv", "floordiv", "mod", "pow",\n "maximum", "minimum", "logical_and", "logical_or", "squared_difference"\n :param b:\n :return: a `kind` b\n '
assert ((a.ndim == b.ndim) or (a.ndim == 0) or (b.ndim == 0))
op = getattr(numpy, kind)
return op(a, b)
@staticmethod
def range_over_dim(dim: Dim, *, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor[numpy.ndarray]:
'\n :param dim:\n :param dtype:\n :param device:\n :return: tensor with shape [dim]\n '
if ((not dtype) and dim.dyn_size_ext):
dtype = dim.dyn_size_ext.dtype
if (not dtype):
dtype = rf.get_default_array_index_dtype()
out = Tensor('range', dims=[dim], sparse_dim=(dim if (dtype.startswith('int') or dtype.startswith('uint')) else None), dtype=dtype)
out.raw_tensor = numpy.arange(dim.get_dim_value(), dtype=NumpyBackend.as_dtype_raw(out.dtype))
return out
@staticmethod
def reduce(source: Tensor[numpy.ndarray], *, mode: str, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[numpy.ndarray]:
'reduce'
assert (mode in Backend._AllowedReduceModes)
if use_mask:
if isinstance(axis, Dim):
assert (not axis.need_masking())
else:
assert all(((not dim.need_masking()) for dim in axis))
func = getattr(numpy, mode)
raw_dims = ([source.get_axis_from_description(axis)] if isinstance(axis, Dim) else [source.get_axis_from_description(dim) for dim in axis])
res_dims = [dim for (i, dim) in enumerate(source.dims) if (i not in raw_dims)]
if (not res_dims):
raw_result = numpy.array(func(source.raw_tensor))
else:
raw_result = func(source.raw_tensor, axis=raw_dims)
res = Tensor(name=f'reduce_{mode}', raw_tensor=raw_result, dims=res_dims, dtype=source.dtype, sparse_dim=source.sparse_dim)
return res
|
class RandomJournal():
'random journal. see module docstring'
def __init__(self):
self._entries: List[RandomJournalEntry] = []
self._cur_entry_idx = 0
self._graph_reader_nodes: List[Tuple[(Tensor, rf.RunCtx)]] = []
def append(self, *, distribution: str, mean: Optional[Union[(int, float, Tensor)]]=None, stddev: Optional[Union[(int, float, Tensor)]]=None, bound: Optional[Union[(int, float, Tensor)]]=None, minval: Optional[Union[(int, float, Tensor)]]=None, maxval: Optional[Union[(int, float, Tensor)]]=None, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, static: Optional[bool]=None, out: Optional[Tensor[numpy.ndarray]]):
'append'
self._entries.append(RandomJournalEntry(out=out, control_flow_ctx=rf.get_current_control_flow_ctx(), run_ctx=rf.get_run_ctx(), distribution=distribution, mean=mean, stddev=stddev, bound=bound, minval=minval, maxval=maxval, seed=seed, static=static))
def get_next(self, *, new_out_template: Optional[Tensor]=None) -> RandomJournalEntry:
'read next'
assert (self._cur_entry_idx < len(self._entries))
entry = self._entries[self._cur_entry_idx]
if new_out_template:
assert (new_out_template.dtype == entry.out.dtype), f'random journal entry dtype mismatch, expected {new_out_template}, got {entry.out} at index {self._cur_entry_idx}'
assert (len(new_out_template.dims) == len(entry.out.dims)), f'random journal entry dims mismatch, expected {new_out_template}, got {entry.out} at index {self._cur_entry_idx}'
for (new_dim, old_dim) in zip(new_out_template.dims, entry.out.dims):
new_dim: Dim
old_dim: Dim
assert (new_dim.dimension == old_dim.dimension), f'random journal entry dim mismatch, expected {new_out_template}, got {entry.out} at index {self._cur_entry_idx}'
self._cur_entry_idx += 1
return entry
def reached_end(self) -> bool:
'reached end'
return (self._cur_entry_idx >= len(self._entries))
def add_graph_reader_node(self, out):
'\n In graph mode, if reading (get_next), at cgraph construction time,\n register that we are reading from the journal\n now in the current context.\n This is used in :func:`get_recent_graph_reader_node_in_accessible_ctx`.\n '
self._graph_reader_nodes.append((out, rf.get_run_ctx()))
def get_graph_reader_idx(self) -> int:
'current index'
return len(self._graph_reader_nodes)
def get_recent_graph_reader_node_in_accessible_ctx(self) -> Optional[Tensor]:
'\n From the graph reader nodes, return the most recent one which is in an accessible context.\n Accessible context means either the same, or a parent context.\n '
cur_control_flow_ctx = rf.get_current_control_flow_ctx()
cur_run_ctx = rf.get_run_ctx()
for (prev_out, prev_run_ctx) in reversed(self._graph_reader_nodes):
if (prev_run_ctx != cur_run_ctx):
return None
if ControlFlowContext.is_parent_or_same(prev_out.control_flow_ctx, cur_control_flow_ctx):
return prev_out
consumers = rf.walk_tensor_consumers(prev_out, filter_outputs=(lambda x: ControlFlowContext.is_parent_or_same(x.control_flow_ctx, cur_control_flow_ctx)), ending_condition=(lambda x: ControlFlowContext.is_parent_or_same(x.control_flow_ctx, cur_control_flow_ctx)))
if (not consumers):
raise Exception(f'cannot handle {prev_out} in current {cur_control_flow_ctx}')
return consumers[0]
return None
|
@dataclass
class RandomJournalEntry():
'entry'
out: Optional[Tensor[numpy.ndarray]]
control_flow_ctx: Optional[ControlFlowContext]
run_ctx: rf.RunCtx
distribution: str
mean: Optional[Union[(int, float, Tensor)]] = None
stddev: Optional[Union[(int, float, Tensor)]] = None
bound: Optional[Union[(int, float, Tensor)]] = None
minval: Optional[Union[(int, float, Tensor)]] = None
maxval: Optional[Union[(int, float, Tensor)]] = None
seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]] = None
static: Optional[bool] = None
|
def get_backend_from_tensors(*args):
'\n :param args:\n :return: frontend, fallback to global frontend\n '
for x in args:
if isinstance(x, Tensor):
return x._raw_backend
return _global_rf
|
def get_dtype_name(x: Union[(T, Tensor[T], int, float)]) -> str:
'\n :param x: tensor\n :return: dtype of tensor, as string\n '
if isinstance(x, Tensor):
return x.dtype
elif isinstance(x, int):
return rf.get_default_int_dtype()
elif isinstance(x, float):
return rf.get_default_float_dtype()
else:
backend = get_backend_by_raw_tensor_type(type(x))
return backend.get_dtype_name_raw(x)
|
def is_int(x: Union[(T, Tensor[T], int, float)]) -> bool:
'\n :param x:\n :return: whether the dtype is int\n '
dtype = get_dtype_name(x)
return (dtype.startswith('int') or dtype.startswith('uint'))
|
def bin_op_out_template(backend: Type[Backend], a: Union[(Tensor[T], int, float, numpy.number)], b: Union[(Tensor[T], int, float, numpy.number)], *, name: str, copy_sparse_dim: bool=True, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None, allow_scalar: bool=True) -> Tuple[(Tensor[T], T, T)]:
'\n make template for output tensor of binary op\n\n :param backend:\n :param a:\n :param b:\n :param name: for returned Tensor. no other functionality\n :param copy_sparse_dim:\n :param allow_broadcast_all_sources: if True, it is allowed that neither a nor b has all dims of the result.\n Not needed when out_dims is specified explicitly.\n :param dim_order: defines the order of the resulting dims. if None, it is automatically inferred from a and b.\n Not all the dims of a and b need to be specified here, and there could also be other dims in the dim_order.\n :param allow_scalar: if True, it is allowed that a or b is a scalar, and then no broadcast dims are added.\n This can be relevant to allow things like x * 2, where x in on GPU, and then PyTorch allows 2 to stay on CPU.\n :return: out, a_raw, b_raw\n '
src_dtype = None
src_device = None
if isinstance(a, Tensor):
src_dtype = a.dtype
src_device = a.device
elif isinstance(b, Tensor):
src_dtype = b.dtype
src_device = b.device
a = rf.convert_to_tensor(a, dtype=src_dtype, device=src_device, keep_scalar_on_cpu=allow_scalar, _backend=backend)
src_dtype = (src_dtype or a.dtype)
b = rf.convert_to_tensor(b, dtype=src_dtype, device=src_device, keep_scalar_on_cpu=allow_scalar, _backend=backend)
assert (a._raw_backend == b._raw_backend), 'Cannot combine tensors from two different frontends, e.g. TF and PT'
all_dims = []
for dim in (a.dims + b.dims):
if (dim in all_dims):
continue
if ((a.dims.count(dim) <= 1) and (b.dims.count(dim) <= 1)):
all_dims.append(dim)
continue
if (a.dims.count(dim) >= b.dims.count(dim)):
all_dims.extend([dim_ for dim_ in a.dims if (dim_ == dim)])
else:
all_dims.extend([dim_ for dim_ in b.dims if (dim_ == dim)])
if all(((set(x.dims) != set(all_dims)) for x in (a, b))):
if (allow_broadcast_all_sources is False):
raise ValueError(f'compare: sources {a!r} {b!r} not allowed with allow_broadcast_all_sources=False')
elif (allow_broadcast_all_sources is None):
raise ValueError(f'compare: sources {a!r} {b!r} require explicit allow_broadcast_all_sources=True')
elif (allow_broadcast_all_sources is True):
pass
else:
raise TypeError(f'invalid type for allow_broadcast_all_sources: {type(allow_broadcast_all_sources)}')
if dim_order:
all_dims.sort(key=(lambda d: (dim_order.index(d) if (d in dim_order) else len(dim_order))))
out = Tensor(name, dims=all_dims, dtype=src_dtype)
out.feature_dim = res_feature_dim(a, b)
if copy_sparse_dim:
out.sparse_dim = res_sparse_dim(a, b)
if ((not allow_scalar) or a.dims):
a_raw = a.copy_compatible_to_dims_raw(all_dims)
else:
a_raw = a.raw_tensor
if ((not allow_scalar) or b.dims):
b_raw = b.copy_compatible_to_dims_raw(all_dims)
else:
b_raw = b.raw_tensor
return (out, a_raw, b_raw)
|
def res_feature_dim(a: Tensor, b: Tensor) -> Optional[Dim]:
'\n :param a:\n :param b:\n :return: feature dim if consistent or None\n '
if (a.feature_dim and (not b.feature_dim)):
return a.feature_dim
if (b.feature_dim and (not a.feature_dim)):
return b.feature_dim
if (a.feature_dim and b.feature_dim and (a.feature_dim == b.feature_dim)):
return a.feature_dim
return None
|
def res_sparse_dim(a: Tensor, b: Tensor) -> Optional[Dim]:
'\n :param a:\n :param b:\n :return: sparse dim if consistent or None\n '
if (a.sparse_dim and (not b.sparse_dim)):
return a.sparse_dim
if (b.sparse_dim and (not a.sparse_dim)):
return b.sparse_dim
if (a.sparse_dim and b.sparse_dim and (a.sparse_dim == b.sparse_dim)):
return a.sparse_dim
return None
|
def strided_slice_raw_key(tensor: Tensor, axis: Optional[Union[(Dim, Sequence[Dim])]]=None, key: Optional[rf.ItemKeyType]=None, key_dim: Optional[Union[(Dim, Sequence[Dim])]]=None) -> Tuple[(Union[(slice, int, T, Sequence[Union[(None, slice, int, T)]])], Tuple[(Dim, ...)])]:
'\n Given an axis and a key, return a raw key that can be used to index into a raw tensor,\n as in `raw_tensor[key]`.\n The tensor is needed to infer the raw axis index.\n\n :return: raw item key, resulting dims of reduced tensor\n '
if ((key is None) or (isinstance(key, (list, tuple)) and (len(key) == 0))):
return (slice(None, None), tensor.dims)
if (isinstance(key, Tensor) and (key.dtype == 'bool')):
raise NotImplementedError('strided_slice: boolean mask')
if isinstance(key, (slice, int, numpy.number, numpy.ndarray, Tensor)):
if (axis is None):
axis = _slice_find_sparse_dim(key)
if (not isinstance(axis, Dim)):
raise TypeError(f'strided_slice: must specify axis for key, got {type(axis).__name__}')
axis_int = tensor.get_axis_from_description(axis)
key_raw = _map_slice_value_raw(key)
if _slice_value_is_reduce(key):
res_dims = tuple((d for (i, d) in enumerate(tensor.dims) if (i != axis_int)))
else:
if (not isinstance(key_dim, Dim)):
raise TypeError(f'strided_slice: expected key_dim of type Dim, got {key_dim}')
res_dims = tuple(((d if (i != axis_int) else key_dim) for (i, d) in enumerate(tensor.dims)))
if (axis_int == 0):
return (key_raw, res_dims)
return ((((None,) * axis_int) + (key_raw,)), res_dims)
if (not isinstance(key, (list, tuple))):
raise TypeError(f'strided_slice: unexpected key type: {type(key).__name__}')
if (axis is not None):
if (not isinstance(axis, (list, tuple))):
raise TypeError(f'strided_slice: key is sequence, thus expect axis to be sequence as well, got {type(axis).__name__}')
if (len(axis) != len(key)):
raise ValueError(f'strided_slice: mismatching axis seq len {len(axis)} and key seq length {len(key)}')
if (key_dim is not None):
if (not isinstance(key_dim, (list, tuple))):
raise TypeError(f'strided_slice: key is sequence, thus expect key_dim to be sequence as well, got {type(key_dim).__name__}')
if (len(key_dim) != len(key)):
raise ValueError(f'strided_slice: mismatching key_dim seq len {len(key_dim)} and key seq length {len(key)}')
raw_out_keys = {}
raw_out_dims: List[Optional[Dim]] = list(tensor.dims)
for (i, key_) in enumerate(key):
axis_ = None
if (axis is not None):
axis_ = axis[i]
if (axis_ is None):
axis_ = _slice_find_sparse_dim(key_)
if (not isinstance(axis_, Dim)):
raise TypeError(f'strided_slice: must specify axis for key sequence, got {type(axis_).__name__}')
axis_int = tensor.get_axis_from_description(axis_)
key_raw = _map_slice_value_raw(key_)
if (axis_int in raw_out_keys):
raise ValueError(f'strided_slice: duplicate axis {axis_} in sequence')
raw_out_keys[axis_int] = key_raw
if _slice_value_is_reduce(key_):
raw_out_dims[axis_int] = None
else:
key_dim_ = None
if (key_dim is not None):
key_dim_ = key_dim[i]
if (not isinstance(key_dim_, Dim)):
raise TypeError(f'strided_slice: expected key_dim for key sequence, got {type(key_dim_).__name__}')
raw_out_dims[axis_int] = key_dim_
out = []
for i in range(0, (max(raw_out_keys) + 1)):
out.append(raw_out_keys.get(i))
return (tuple(out), tuple((d for d in raw_out_dims if (d is not None))))
|
def _slice_find_sparse_dim(v: Union[(Tensor, slice, Any)]) -> Optional[Dim]:
if isinstance(v, Tensor):
return v.sparse_dim
if isinstance(v, slice):
attribs = {k: getattr(v, k) for k in ('start', 'stop', 'step')}
tensors = {k: v for (k, v) in attribs.items() if isinstance(v, Tensor)}
sparse_dims = {k: v.sparse_dim for (k, v) in tensors.items() if v.sparse_dim}
sparse_dims_ = list(set(sparse_dims.values()))
if (len(sparse_dims_) == 0):
return None
if (len(sparse_dims_) == 1):
return sparse_dims_[0]
raise ValueError(f'strided_slice: multiple different sparse dims in slice {v}: {sparse_dims}')
return None
|
def _map_slice_value_raw(v: Union[(None, slice, int, numpy.number, numpy.ndarray, Tensor[T])]) -> Union[(None, slice, int, numpy.number, T)]:
if (v is None):
return None
if isinstance(v, slice):
return slice(_map_slice_value_raw(v.start), _map_slice_value_raw(v.stop), _map_slice_value_raw(v.step))
if isinstance(v, (int, numpy.number)):
return v
if isinstance(v, numpy.ndarray):
assert (v.ndim <= 1), f'strided_slice: expect scalar or vector, got array with shape {v.shape}'
return v
if isinstance(v, Tensor):
assert (len(v.dims) <= 1), f'strided_slice: expect scalar or vector, got Tensor with dims {v.dims}'
return v.raw_tensor
raise TypeError(f'strided_slice: got unexpected value of type {type(v).__name__}')
|
def _slice_value_is_reduce(v: Union[(None, slice, int, numpy.number, numpy.ndarray, Tensor[T])]) -> bool:
if (v is None):
return False
if isinstance(v, slice):
return False
if isinstance(v, (int, numpy.number)):
return True
if isinstance(v, numpy.ndarray):
assert (v.ndim <= 1), f'strided_slice: expect scalar or vector, got array with shape {v.shape}'
return (v.ndim == 0)
if isinstance(v, Tensor):
assert (len(v.dims) <= 1), f'strided_slice: expect scalar or vector, got Tensor with dims {v.dims}'
return (v.dims == 0)
raise TypeError(f'strided_slice: got unexpected value of type {type(v).__name__}')
|
def convert_to_tensor(value: Union[(Tensor, T, RawTensorTypes)], *, dims: Sequence[Dim]=None, dtype: Optional[str]=None, sparse_dim: Optional[Dim]=None, shape: Sequence[Dim]=None, device: Optional[str]=None, keep_scalar_on_cpu: bool=False, name: Optional[str]=None, _backend: Optional[Type[Backend]]=None) -> Tensor[T]:
'\n :param value: tensor, or scalar raw tensor or some other scalar value\n :param dims:\n :param dtype:\n :param sparse_dim:\n :param shape: alias for dims, for some older code\n :param name:\n :param device:\n :param keep_scalar_on_cpu: if the value is already on the CPU, keep it there, even if `device` is sth else\n :param _backend:\n :return: tensor\n '
if isinstance(value, Tensor):
return value
if ((dims is None) and (shape is not None)):
dims = shape
if isinstance(value, (int, float, complex, bool, str, numpy.number)):
if (_backend is None):
_backend = global_backend
if (dims is None):
dims = ()
if (dtype is None):
if isinstance(value, bool):
dtype = 'bool'
elif isinstance(value, int):
dtype = rf.get_default_int_dtype()
elif isinstance(value, float):
dtype = rf.get_default_float_dtype()
elif isinstance(value, str):
dtype = 'string'
elif isinstance(value, numpy.number):
dtype = value.dtype.name
else:
raise ValueError(f'number {value} type {type(value)} needs explicit `dtype` specification')
if keep_scalar_on_cpu:
device = 'cpu'
elif isinstance(value, numpy.ndarray):
if (_backend is None):
_backend = global_backend
if (dims is None):
dims = [Dim(d) for d in value.shape]
if (dtype is None):
dtype = value.dtype.name
else:
value_backend = get_backend_by_raw_tensor_type(type(value))
if (_backend is None):
_backend = value_backend
if (dims is None):
dims = [value_backend.get_new_dim_raw(value, d, name=((name or 'const') + f'_dim{d}')) for d in range(value_backend.get_ndim_raw(value))]
if (dtype is None):
dtype = value_backend.get_dtype_name_raw(value)
return _backend.convert_to_tensor(value=value, dims=dims, dtype=dtype, sparse_dim=sparse_dim, device=device, name=name)
|
def copy(tensor: Tensor) -> Tensor:
'\n :param tensor:\n :return: copy of tensor.\n In eager-based frameworks, it is really a copy.\n In graph-based frameworks, it might be just a copied reference if it would be immutable.\n This is really only relevant when operating on tensors which can conceptually be mutated,\n such as variables (:class:`Parameter`).\n '
return tensor._raw_backend.copy(tensor)
|
def cast(tensor: Tensor, dtype: str) -> Tensor:
'\n :param tensor:\n :param dtype:\n :return: tensor with the same data, but with a different dtype\n '
return tensor._raw_backend.cast(tensor, dtype=dtype)
|
def merge_dims(source: Tensor, *, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Merges a list of axes into a single one. (Flatten the dims.)\n E.g. input is (batch, width, height, dim) and dims=(width,height), then we get (batch, width*height, dim).\n Or input is (batch, time, height, dim) and axes=(height,dim), then we get (batch, time, height*dim).\n\n :func:`rf.split_dims` is the reverse operation.\n\n :param source:\n :param dims:\n :param out_dim:\n :return: tensor, out_dim\n '
return source._raw_backend.merge_dims(source, dims=dims, out_dim=out_dim)
|
def split_dims(source: Tensor, *, axis: Dim, dims: Sequence[Dim], pad_to_multiples: Optional[bool]=None, pad_value: Union[(None, int, float)]=None) -> Tensor:
'\n Splits one axis into multiple axes.\n E.g. if you know that your feature-dim is composed by a window,\n i.e. the input is (batch, time, window * feature),\n you can set axis="F", dims=(window, -1),\n and you will get the output (batch, time, window, feature).\n\n If the split axis has a dynamic length,\n exactly one of the axes that we split into need to also have a dynamic length.\n You can e.g. use this to split the input dimension into smaller "chunks" of a fixed window size.\n E.g. you could have input (batch, time, feature) and set axis="T", dims=(-1, window),\n to get output (batch, split_time, window, feature).\n In this case, the exact sequence lengths are lost and everything is padded to multiples of the window size using\n the given padding value.\n Use :class:`ReinterpretDataLayer` to receive back the original sequence lengths after merging.\n\n Also see :func:`rf.merge_dims` which can undo this operation.\n\n :param source:\n :param axis: e.g. "F"\n :param dims: what the axis should be split into. e.g. (window, -1)\n :param pad_to_multiples: If true, input will be padded to the next multiple of the product of the\n static dims, such that splitting is actually possible.\n By default this is done iff the axis has a dynamic size\n :param pad_value: What pad value to use for pad_to_multiples\n :return: source with axis replaced by dims\n '
return source._raw_backend.split_dims(source, axis=axis, dims=dims, pad_to_multiples=pad_to_multiples, pad_value=pad_value)
|
def reshape(source: Tensor, in_dims: Sequence[Dim], out_dims: Sequence[Dim]) -> Tensor:
'\n Wraps tf.reshape.\n\n You should use :func:`split_dims` or :func:`merge_dims`\n when you want to split or merge dimensions.\n This here is for doing any other kind of reshape.\n This can be used for clever indexing, slicing, padding tricks.\n\n :param source: e.g. (..., old_dims, ...)\n :param in_dims: the old dims which should be reshaped into new_dims.\n This should only cover those dims which should be reshaped,\n not all the dims of the source.\n :param out_dims: the new dims which should be reshaped from old_dims.\n This is excluding any of the other dims in the source.\n :return: e.g. (..., new_dims, ...)\n '
return source._raw_backend.reshape(source, in_dims=in_dims, out_dims=out_dims)
|
def split(source: Tensor, *, axis: Dim, out_dims: Sequence[Dim]) -> Tuple[(Tensor, ...)]:
'\n Split the input on the specified axis (by default feature).\n Basically a wrapper around tf.split.\n\n :param source: {..., axis}\n :param axis: some static axis\n :param out_dims: list of dims where sum(out_dims) == axis\n :return: tuple of tensors, same amount as out_dims,\n with the same shape as source, but with the specified axis replaced by the out_dims\n '
return source._raw_backend.split(source, axis=axis, out_dims=out_dims)
|
def expand_dim(source: Tensor, dim: Dim) -> Tensor:
'\n Expand the source by the given dimension.\n\n Note that this is *never* needed for broadcasting.\n All broadcasting should always happen automatically.\n\n This might be needed for convolution or concatenation.\n '
return source._raw_backend.expand_dim(source, dim=dim)
|
def squeeze(source: Tensor, axis: Dim) -> Tensor:
'\n Removes the axis with dimension of extend 1 from the source.\n '
assert (axis.dimension == 1), f'squeeze {source}: axis {axis} is not of extend 1'
return source._raw_backend.squeeze(source, axis=axis)
|
def window(source: Tensor, *, spatial_dim: Dim, window_dim: Dim, window_right: Optional[Union[(Dim, int)]]=None, window_left: Optional[Union[(Dim, int)]]=None, padding: str='same', pad_value: Optional[Union[(int, float)]]=None, stride: int=1) -> Tuple[(Tensor, Dim)]:
'\n Follows the same idea as RETURNN tf_util.windowed,\n using clever padding and reshaping.\n\n :param source:\n :param spatial_dim:\n :param window_dim:\n :param window_left:\n :param window_right:\n :param padding: "same" or "valid"\n :param pad_value:\n :param stride:\n :return: out, out_spatial_dim\n '
assert (window_dim.dimension is not None)
if (padding == 'same'):
out_spatial_dim = spatial_dim
if (window_right is not None):
if isinstance(window_right, int):
window_right = Dim(window_right, name='window_right')
assert isinstance(window_right, Dim)
if (window_left is not None):
if isinstance(window_left, int):
window_left = Dim(window_left, name='window_left')
assert isinstance(window_left, Dim)
if (window_right is None):
if (window_left is None):
window_right = (window_dim // 2)
window_left = (window_dim.ceildiv_right(2) - 1)
else:
window_right = ((window_dim - window_left) - 1)
if (window_left is None):
window_left = ((window_dim - window_right) - 1)
(source, (in_spatial_dim,)) = rf.pad(source, axes=[spatial_dim], padding=[(window_left, window_right)], value=pad_value)
elif (padding == 'valid'):
in_spatial_dim = spatial_dim
out_spatial_dim = ((spatial_dim - window_dim) + 1)
else:
raise ValueError(f'invalid padding {padding!r}')
if (stride > 1):
(start_times, out_spatial_dim) = rf.range_over_dim_strided(out_spatial_dim, stride=stride)
win_range = rf.range_over_dim(window_dim)
indices = rf.combine_bc(start_times, '+', win_range)
final = rf.gather(source, indices=indices, axis=in_spatial_dim)
return (final, out_spatial_dim)
tiled_dimshuffle = rf.expand_dim(source, dim=window_dim)
(tiled_flat, flat_dim) = rf.merge_dims(tiled_dimshuffle, dims=(window_dim, in_spatial_dim))
rem = window_dim
(tiled_flat_pad_right, (flat_dim_ext,)) = rf.pad(tiled_flat, axes=[flat_dim], padding=[(0, rem)], value=pad_value)
out_time_ext = (out_spatial_dim + window_dim)
tiled_reshape_shift = rf.reshape(tiled_flat_pad_right, in_dims=[flat_dim_ext], out_dims=[window_dim, out_time_ext])
(final, _) = rf.slice(tiled_reshape_shift, axis=out_time_ext, size=out_spatial_dim)
if (stride > 1):
(final, out_spatial_dim) = rf.slice(final, axis=out_spatial_dim, step=stride)
return (final, out_spatial_dim)
|
def concat(*sources: Tuple[(Tensor, Dim)], allow_broadcast: bool=False, out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Concatenates multiple sources in the specified dimension.\n '
assert sources
if (not allow_broadcast):
dims = (sources[0][0].dims_set - {sources[0][1]})
for (src, dim) in sources:
assert ((src.dims_set - {dim}) == dims), f'concat {sources}, need allow_broadcast=True'
if (not out_dim):
out_dim = sum((d for (_, d) in sources))
return (sources[0][0]._raw_backend.concat(*sources, allow_broadcast=allow_broadcast, out_dim=out_dim), out_dim)
|
def concat_features(*sources: Tensor, allow_broadcast=False) -> Tensor:
'\n Concatenates multiple sources, using feature_dim of each source,\n so make sure that the feature_dim is correctly set.\n '
src_pairs = []
for src in sources:
assert (src.feature_dim is not None)
src_pairs.append((src, src.feature_dim))
(res, out_dim) = concat(*src_pairs, allow_broadcast=allow_broadcast)
res.feature_dim = out_dim
return res
|
def pad(source: Tensor, *, axes: Sequence[Dim], padding: Sequence[Tuple[(Union[(Dim, int)], Union[(Dim, int)])]], out_dims: Optional[Sequence[Dim]]=None, mode: str='constant', value: Optional[Union[(rf.RawTensorTypes, Tensor)]]=None) -> Tuple[(Tensor, Sequence[Dim])]:
'\n Pad values left/right in the specified axes.\n\n :param source:\n :param axes: which axes to add padding to\n :param padding: list of (left, right) padding for each axis\n :param out_dims: (optional) predefined out dim tags, otherwise will automatically create\n :param mode: \'constant\', \'reflect\', \'replicate\' or \'circular\'\n :param value: (optional) value to pad with in "constant" mode\n '
assert (len(axes) == len(padding))
if (not out_dims):
for (left, right) in padding:
if isinstance(left, Dim):
assert (not left.need_masking()), f'padding {padding} does not support dynamic left padding'
if isinstance(right, Dim):
assert (not right.need_masking()), f'padding {padding} does not support dynamic right padding'
out_dims = [((left + middle) + right) for (middle, (left, right)) in zip(axes, padding)]
return (source._raw_backend.pad(source, axes=axes, padding=padding, out_dims=out_dims, mode=mode, value=value), out_dims)
|
def cum_concat_step(source: Tensor, *, prev_accum: Tensor, axis: Dim, out_spatial_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Concatenates all previous frames over a time-axis.\n See RETURNN :class:`CumConcatLayer` for details.\n\n :param source: same dims as prev_accum except for the accum axis\n :param prev_accum: previous accumulated tensor, shape {..., axis}\n :param axis: the axis to accumulate over\n :param out_spatial_dim: if given, the spatial dim of the output will be this dim. axis+1.\n :return: (accumulated, out_spatial_dim). accumulated shape {..., out_spatial_dim},\n same shape as prev_accum with axis replaced by out_spatial_dim.\n '
if (not out_spatial_dim):
out_spatial_dim = (axis + 1)
return (source._raw_backend.cum_concat_step(source, prev_accum=prev_accum, axis=axis, out_spatial_dim=out_spatial_dim), out_spatial_dim)
|
def masked_select(tensor: Tensor, *, mask: Tensor, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n In TF, this is ``boolean_mask``.\n The inverse of this is :func:`masked_scatter`.\n\n :param tensor:\n :param mask:\n :param dims: the order of the dims defines the format. those dims should be exactly the dims of the mask.\n :param out_dim:\n :return: tensor where all dims in mask/dims are removed and replaced by a new dim.\n the new dim is also returned.\n if mask==True for all elements, the returned tensor would be simply the flattened input tensor.\n '
return tensor._raw_backend.masked_select(tensor, mask=mask, dims=dims, out_dim=out_dim)
|
def masked_scatter(source: Tensor, *, mask: Tensor, dims: Sequence[Dim], in_dim: Dim) -> Tensor:
'\n The inverse of :func:`masked_select`.\n\n :param source: [in_dim, F...]\n :param mask: [dims...] -> bool (e.g. [B,T])\n :param dims: the order of the dims defines the format. those dims should be exactly the dims of the mask.\n :param in_dim: the dim of the source which should be scattered into the mask.\n :return: [dims..., F...]\n '
return source._raw_backend.masked_scatter(source, mask=mask, dims=dims, in_dim=in_dim)
|
def sequence_mask(dims: Union[(Dim, Sequence[Dim])], *, device: Optional[str]=None) -> Tensor:
'\n :param dims:\n :param device:\n '
if isinstance(dims, Dim):
dims = [dims]
assert (len(dims) > 0)
dyn_dims = [d for d in dims if d.need_masking()]
assert (len(dyn_dims) == 1)
return dyn_dims[0].get_mask(dim_order=dims, device=device)
|
def pack_padded(source: Tensor, *, dims: Sequence[Dim], enforce_sorted: bool=False, out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Like pack_padded_sequence. Usually the sequences are padded when they have different lengths.\n Packing means to only store the non-padded frames.\n This uses :func:`masked_select` internally based on the mask of non-masked frames.\n\n :param source:\n :param dims: dims in source to pack. the order defines the format. first dim is major, etc.\n if there are no padded frames, e.g. dims=[B,T] would just result in the [B*T,...] reshaped tensor.\n :param enforce_sorted: seqs in the dims are reordered (stable sort) such that longest seqs come first.\n :param out_dim:\n :return: packed tensor, new packed dim\n '
assert (not enforce_sorted)
mask = rf.sequence_mask(dims, device=source.device)
assert (mask.dims_set == set(dims))
return rf.masked_select(source, mask=mask, dims=dims, out_dim=out_dim)
|
def gather(source: Tensor, *, indices: Union[(Tensor, int)], axis: Optional[Dim]=None, clip_to_valid: bool=False) -> Tensor:
'\n Gathers slices on a specified axis from the source using indices.\n If the source is of the shape ``[B,D,F1]``, and indices of shape ``[B,F2]``,\n this will yield output of the shape ``[B,F2,F1]`` where\n\n ``output[b,f2,f1] = source[b,indices[b,f2],f1]``\n\n (if ``D`` is the axis to gather from).\n In general, all shared axes of the input and the positions will be considered as batch-axes.\n\n The ``indices`` argument can also be an ``int``.\n In this case, this simply gives ``source[indices]`` on the specified ``axis``.\n\n :func:`scatter` is the inverse.\n\n :param source: [batch_dims..., axis, feature_dims...]\n :param indices: [batch_dims..., indices_dims...] indices used to select the slices of the source from.\n If another tensor, must be of type ``int32`` or ``int64``.\n Can also specify a constant ``int``.\n Batch dims are automatically determined as the common dims of source and indices.\n :param axis: The axis into which we gather the indices into.\n If not given, indices must be a tensor and the sparse_dim will be used.\n :param clip_to_valid: if True, the indices will be clipped to the valid range of the input\n Also taking seq lengths into account.\n :return: [batch_dims..., indices_dims..., feature_dims...] gathered values\n '
if (not axis):
assert (isinstance(indices, Tensor) and indices.sparse_dim)
axis = indices.sparse_dim
return source._raw_backend.gather(source, indices=indices, axis=axis, clip_to_valid=clip_to_valid)
|
def scatter(source: Tensor, *, indices: Tensor, indices_dim: Union[(Dim, Sequence[Dim])], out_dim: Optional[Union[(Dim, Sequence[Dim])]]=None) -> Tensor:
'\n Scatters into new zero-tensor.\n If entries in indices are duplicated, the corresponding values in source will be added together\n (scatter_add in PyTorch).\n (TF segment_sum can be implemented via this.)\n\n :param source: [batch_dims..., indices_dim(s)..., feature_dims...]\n :param indices: [batch_dims..., indices_dim(s)...] -> out_dim\n :param indices_dim:\n :param out_dim: The indices target dim.\n If not given, will be automatically determined as the sparse_dim from indices.\n If multiple out dims, use indices into the merged out dims,\n and then we use :func:`rf.split_dims` afterwards.\n :return: [batch_dims..., out_dim(s)..., feature_dims...]\n '
if (not out_dim):
assert (isinstance(indices, Tensor) and indices.sparse_dim)
out_dim = indices.sparse_dim
return source._raw_backend.scatter(source, indices=indices, indices_dim=indices_dim, out_dim=out_dim)
|
def slice(source: Tensor, *, axis: Dim, start: Optional[Union[(int, Tensor)]]=None, end: Optional[Union[(int, Tensor)]]=None, step: Optional[Union[(int, Tensor)]]=None, size: Optional[Union[(int, Tensor, Dim)]]=None, out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]:
'\n Slicing on the input, i.e. ``x[start:end:step]`` in some axis.\n\n If size is given, it takes out a slice-range like ``x[start:start + size]``.\n\n This function allows a non-scalar start points.\n\n :param source:\n :param axis:\n :param start:\n :param end:\n :param step:\n :param size:\n :param out_dim:\n :return: tensor, out_dim\n '
if (not out_dim):
if (size is not None):
if isinstance(size, Dim):
out_dim = size
elif isinstance(size, (int, Tensor)):
out_dim = Dim(size, name='slice')
else:
raise TypeError(f'invalid type {type(size)} for size {size}')
assert ((step is None) or (isinstance(step, int) and (step == 1)))
else:
if (start is None):
start = 0
if (isinstance(start, int) and (start >= 0)):
if (end is None):
out_dim = axis.sub_left(start)
elif isinstance(end, int):
if (end < 0):
out_dim = axis.sub_left(start).sub_right((- end))
else:
out_dim = Dim((end - start), name='slice')
elif isinstance(end, Tensor):
out_dim = Dim((end - start), name='slice')
else:
raise TypeError(f'invalid type {type(end)} for end {end}')
elif (isinstance(start, int) and (start < 0)):
if (end is None):
out_dim = Dim((- start), name='slice')
elif isinstance(end, int):
assert (end < 0)
out_dim = Dim(((- start) + end), name='slice')
else:
raise TypeError(f'invalid type {type(end)} for end {end}')
elif isinstance(start, Tensor):
out_dim = Dim((((axis.get_size_tensor() if (end is None) else end) - start) % axis.get_size_tensor()), name='slice')
else:
raise TypeError(f'invalid type {type(start)} for start {start}')
if ((step is None) or (isinstance(step, int) and (step == 1))):
pass
elif isinstance(step, int):
out_dim = out_dim.ceildiv_right(step)
elif isinstance(step, Tensor):
step_dim = Dim(step, name='step')
out_dim = out_dim.ceildiv_right(step_dim)
else:
raise TypeError(f'invalid type {type(step)} for step {step}')
return (source._raw_backend.slice(source, axis=axis, start=start, end=end, step=step, size=size, out_dim=out_dim), out_dim)
|
def shift_right(source: Tensor, *, axis: Dim, pad_value: Union[(rf.RawTensorTypes, Tensor)], amount: int=1) -> Tensor:
'shift right by amount, pad left with left_pad'
(padded, (padded_dim,)) = rf.pad(source, axes=[axis], padding=[(amount, 0)], mode='constant', value=pad_value)
(padded_slice, _) = rf.slice(padded, axis=padded_dim, size=axis)
return padded_slice
|
def reverse_sequence(tensor: Tensor, *, axis: Dim) -> Tensor:
'\n Similar as tf.reverse_sequence, or Torch flip (but taking seq lengths into account).\n\n :param tensor:\n :param axis:\n :return: reversed tensor, same dims\n '
indices = (rf.combine_bc(axis.get_size_tensor(), '-', rf.range_over_dim(axis)) - 1)
return rf.gather(tensor, indices=indices, axis=axis, clip_to_valid=True)
|
def where(cond: Union[(Tensor, rf.RawTensorTypes)], true_: Union[(Tensor, rf.RawTensorTypes)], false_: Union[(Tensor, rf.RawTensorTypes)], *, allow_broadcast_all_sources: bool=False) -> Tensor:
'\n Wraps tf.where, which is SwitchLayer in RETURNN.\n\n :return: true_ if cond else false_, elemwise.\n '
cond = rf.convert_to_tensor(cond)
return cond._raw_backend.where(cond, true_, false_, allow_broadcast_all_sources=allow_broadcast_all_sources)
|
def sparse_to_dense(labels: Union[(Tensor, rf.RawTensorTypes)], *, label_value: Union[(Tensor, rf.RawTensorTypes)], other_value: Union[(Tensor, rf.RawTensorTypes)], axis: Optional[Dim]=None) -> Tensor:
'\n Converts a sparse tensor to a dense one.\n\n This is a more generic variant of "one_hot".\n\n Note that usually this is not needed as most other functions should handle sparse tensors just fine\n and much more efficiently than they would be with dense tensors.\n '
labels = rf.convert_to_tensor(labels)
if (not axis):
assert labels.sparse_dim, 'sparse_to_dense: either provide `axis` or `labels` with sparse_dim'
axis = labels.sparse_dim
indices = rf.range_over_dim(axis)
return where(rf.compare_bc(labels, '==', indices), label_value, other_value)
|
def one_hot(source: Tensor) -> Tensor:
'\n one_hot. special case of :func:`sparse_to_dense`.\n\n Note that usually this is not needed as most other functions should handle sparse tensors just fine\n and much more efficiently than they would be with dense tensors.\n '
return sparse_to_dense(source, label_value=1.0, other_value=0.0)
|
def dot_attention(query: Tensor, keys: Tensor, values: Tensor, *, key_dim: Dim, axis: Dim, att_dropout: float=0.0, att_dropout_broadcast: Optional[bool]=None) -> Tensor:
'\n Calculates attention over the given axis, for given key dim.\n Any other unrelated axes do not matter here.\n This can be used for multi-head or single head.\n The query can have other dimensions or not.\n\n :param query: {..., key_dim}. For self-attention, do not use the `axis` as in `keys` and `values`,\n but rather replace it by another new dim via :func:`replace_dim`.\n :param keys: {..., axis, key_dim}\n :param values: {..., axis}\n :param key_dim: dim in keys and query, to be reduced to calculate the attention energies.\n :param axis: in keys and values, to apply attention on. softmax will be over this axis, and then it will be reduced\n :param att_dropout: dropout for attention weights\n :param att_dropout_broadcast: whether to broadcast over all but ``axis``.\n normally not wanted. disabled by default since behavior version 19.\n :return: like values but with axis removed, and maybe any additional axes from query\n '
query *= (key_dim.dimension ** (- 0.5))
energy = rf.matmul(query, keys, reduce=key_dim)
att_weights = rf.softmax(energy, axis=axis)
if (att_dropout_broadcast is None):
att_dropout_broadcast = _att_dropout_broadcast_default()
att_weights = rf.dropout(att_weights, att_dropout, axis=(att_dropout_broadcast and axis))
att = rf.matmul(att_weights, values, reduce=axis, use_mask=False)
if (values.feature_dim in att.dims):
att.feature_dim = values.feature_dim
return att
|
class SelfAttentionBase(rf.Module):
'\n Shared base class for (non-causal) self attention (:class:`SelfAttention`)\n and causal self attention (:class:`CausalSelfAttention`).\n\n It uses :func:`dot_attention` for multi-headed dot-attention.\n '
def __init__(self, in_dim: Dim, proj_dim: Optional[Dim], *, key_dim_total: Dim, value_dim_total: Dim, num_heads: Union[(int, Dim)], with_bias: bool=True, att_dropout: float=0.1, att_dropout_broadcast: Optional[bool]=None):
'\n :param in_dim: input dim\n :param proj_dim: if given, will add a final linear projection to this dim.\n otherwise no projection after the attention\n :param key_dim_total: total key dim. should be a multiple of num_heads\n :param value_dim_total: total value dim. should be a multiple of num_heads\n :param num_heads: number of heads\n :param with_bias: whether to add bias to qkv and proj linear projections.\n Was False in original Transformer, but many recent implementations use True by default.\n Also see: https://github.com/rwth-i6/returnn_common/issues/234.\n :param att_dropout: dropout for attention weights\n :param att_dropout_broadcast: whether to broadcast over all but ``axis``.\n normally not wanted. disabled by default since behavior version 19.\n '
super().__init__()
self.in_dim = in_dim
self.out_dim = (proj_dim if proj_dim else value_dim_total)
if isinstance(num_heads, int):
num_heads = Dim(num_heads, name='num_heads')
self.key_dim_total = key_dim_total
self.key_dim_per_head = key_dim_total.div_left(num_heads)
self.value_dim_total = value_dim_total
self.value_dim_per_head = value_dim_total.div_left(num_heads)
self.num_heads = num_heads
self.qkv_dim_total = ((2 * key_dim_total) + value_dim_total)
self.qkv_dim_per_head = ((2 * self.key_dim_per_head) + self.value_dim_per_head)
self.qkv = rf.Linear(in_dim, self.qkv_dim_total, with_bias=with_bias)
if proj_dim:
self.proj = rf.Linear(value_dim_total, proj_dim, with_bias=with_bias)
else:
self.proj = None
self.att_dropout = att_dropout
if (att_dropout_broadcast is None):
att_dropout_broadcast = _att_dropout_broadcast_default()
self.att_dropout_broadcast = att_dropout_broadcast
def forward_qkv(self, source: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]:
'\n :return: q,k,v\n '
qkv = self.qkv(source)
qkv = rf.split_dims(qkv, axis=self.qkv_dim_total, dims=(self.num_heads, self.qkv_dim_per_head))
(q, k, v) = rf.split(qkv, axis=self.qkv_dim_per_head, out_dims=(self.key_dim_per_head, self.key_dim_per_head, self.value_dim_per_head))
return (q, k, v)
def attention(self, q: Tensor, k: Tensor, v: Tensor, *, kv_axis: Dim) -> Tensor:
'apply attention'
att = dot_attention(q, k, v, key_dim=self.key_dim_per_head, axis=kv_axis, att_dropout=self.att_dropout, att_dropout_broadcast=self.att_dropout_broadcast)
(output, _) = rf.merge_dims(att, dims=(self.num_heads, self.value_dim_per_head), out_dim=self.value_dim_total)
if self.proj:
output = self.proj(output)
return output
|
class SelfAttention(SelfAttentionBase):
'\n Classic self attention on sequence level\n '
def __call__(self, source: Tensor, *, axis: Dim) -> Tensor:
'forward'
(q, k, v) = self.forward_qkv(source)
kv_axis = Dim(None, name=f'{axis.name}-kv')
(k, _) = rf.replace_dim(k, in_dim=axis, out_dim=kv_axis)
(v, _) = rf.replace_dim(v, in_dim=axis, out_dim=kv_axis)
return self.attention(q, k, v, kv_axis=kv_axis)
|
class CausalSelfAttention(SelfAttentionBase):
'\n Classic causal self attention\n '
def __call__(self, source: Tensor, axis: Dim, *, state: Optional[CausalSelfAttentionState]=None) -> Tuple[(Tensor, CausalSelfAttentionState)]:
'forward'
(q, k, v) = self.forward_qkv(source)
(k, v, hist_dim, new_state) = _causal_self_att_step(k, v, axis=axis, state=state, self=self)
output = self.attention(q, k, v, kv_axis=hist_dim)
return (output, new_state)
def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> CausalSelfAttentionState:
'\n For causal attention.\n '
expand_dim = Dim(0, name='self_att_expand_dim_init')
return CausalSelfAttentionState(k_accum=rf.zeros((list(batch_dims) + [expand_dim, self.num_heads, self.key_dim_per_head])), v_accum=rf.zeros((list(batch_dims) + [expand_dim, self.num_heads, self.value_dim_per_head])), accum_axis=expand_dim)
|
def _causal_self_att_step(k: Tensor, v: Tensor, *, axis: Dim, state: Optional[CausalSelfAttentionState], self: rf.Module) -> Tuple[(Tensor, Tensor, Dim, CausalSelfAttentionState)]:
if (axis == single_step_dim):
assert state, f'{self}: need state for single step'
(k, hist_dim) = rf.cum_concat_step(k, prev_accum=state.k_accum, axis=state.accum_axis)
(v, _) = rf.cum_concat_step(v, prev_accum=state.v_accum, out_spatial_dim=hist_dim, axis=state.accum_axis)
else:
if (state and (state.accum_axis.dimension != 0)):
raise NotImplementedError(f'{self}: on sequence over {axis} with initial state {state} not implemented yet')
hist_dim = Dim((rf.range_over_dim(axis, device='cpu') + 1), name=f'{axis.description}:kv')
(k, _) = rf.replace_dim(k, in_dim=axis, out_dim=hist_dim)
(v, _) = rf.replace_dim(v, in_dim=axis, out_dim=hist_dim)
new_state = CausalSelfAttentionState()
new_state.k_accum = k
new_state.v_accum = v
new_state.accum_axis = hist_dim
return (k, v, hist_dim, new_state)
|
class CausalSelfAttentionState(rf.State):
'\n State for :class:`StepwiseCausalSelfAttention`.\n '
def __init__(self, *_args, k_accum: Tensor=None, v_accum: Tensor=None, accum_axis: Dim=None):
'\n :param k_accum: accumulated keys\n :param v_accum: accumulated values\n :param accum_axis:\n '
super().__init__(*_args)
if (not _args):
self.k_accum = k_accum
self.v_accum = v_accum
self.accum_axis = accum_axis
|
class RelPosSelfAttention(SelfAttentionBase):
'\n Self-attention with relative positional encoding.\n This covers both Shawn et al. self-att rel pos 2018 (https://arxiv.org/abs/1803.02155),\n and Dai et al. Transformer-XL style 2019 (https://arxiv.org/abs/1901.02860).\n\n It uses :func:`relative_positional_encoding` or :class:`LearnedRelativePositionalEncoding`.\n\n To get Shawn et al. self-att rel pos 2018 / RETURNN SelfAttentionLayer + RelativePositionalEncodingLayer:\n - with_bias = False (at least that was the RETURNN behavior)\n - with_linear_pos = False\n - with_pos_bias = False\n - learnable_pos_emb = True\n - separate_pos_emb_per_head = False (at least that was the RETURNN default)\n\n To get Dai et al. Transformer-XL style 2019:\n - with_bias = False would be like the paper, however, in most implementations it is True (default)\n - with_linear_pos = True (default)\n - with_pos_bias = True (default)\n - learnable_pos_emb = True (default)\n - separate_pos_emb_per_head = True (default)\n\n Further details:\n https://github.com/rwth-i6/returnn_common/wiki/Relative-positional-encoding\n\n Code references, partly adapted from there:\n https://github.com/espnet/espnet/blob/4138010fb66ad27a43e8bee48a4932829a0847ae/espnet/nets/pytorch_backend/transformer/embedding.py#L260\n https://github.com/kimiyoung/transformer-xl/blob/44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L4\n '
def __init__(self, in_dim: Dim, proj_dim: Optional[Dim], *, key_dim_total: Dim, value_dim_total: Dim, num_heads: Union[(int, Dim)], with_bias: bool=True, with_linear_pos: bool=True, with_pos_bias: bool=True, learnable_pos_emb: bool=False, learnable_pos_emb_clipping: int=16, separate_pos_emb_per_head: bool=True, pos_emb_dropout: float=0.0, att_dropout: float=0.1):
super().__init__(in_dim=in_dim, proj_dim=proj_dim, key_dim_total=key_dim_total, value_dim_total=value_dim_total, num_heads=num_heads, with_bias=with_bias, att_dropout=att_dropout)
self.separate_pos_emb_per_head = separate_pos_emb_per_head
if with_linear_pos:
self.pos_emb_feat_dim = self.in_dim
elif separate_pos_emb_per_head:
self.pos_emb_feat_dim = self.key_dim_total
else:
self.pos_emb_feat_dim = self.key_dim_per_head
self.linear_pos = None
if with_linear_pos:
self.linear_pos = rf.Linear(self.in_dim, (self.key_dim_total if separate_pos_emb_per_head else self.key_dim_per_head), with_bias=False)
self.learned_pos_emb = None
if learnable_pos_emb:
self.learned_pos_emb = LearnedRelativePositionalEncoding(self.pos_emb_feat_dim, clipping=learnable_pos_emb_clipping)
self.pos_bias_u = None
self.pos_bias_v = None
if with_pos_bias:
self.pos_bias_u = rf.Parameter((self.num_heads, self.key_dim_per_head))
self.pos_bias_v = rf.Parameter((self.num_heads, self.key_dim_per_head))
self.pos_bias_u.initial = rf.init.Glorot()
self.pos_bias_v.initial = rf.init.Glorot()
self.pos_emb_dropout = pos_emb_dropout
def __call__(self, source: Tensor, *, axis: Dim, **_kwargs) -> Tensor:
'forward'
if (self.learned_pos_emb is not None):
(pos_emb, pos_emb_spatial_dim) = self.learned_pos_emb(query_spatial_dim=axis, key_value_spatial_dim=axis)
else:
(pos_emb, pos_emb_spatial_dim) = relative_positional_encoding(query_spatial_dim=axis, key_value_spatial_dim=axis, feat_dim=self.pos_emb_feat_dim)
if self.pos_emb_dropout:
pos_emb = rf.dropout(pos_emb, self.pos_emb_dropout)
if (self.linear_pos is not None):
pos_emb = self.linear_pos(pos_emb)
if self.separate_pos_emb_per_head:
pos_emb = rf.split_dims(pos_emb, axis=self.key_dim_total, dims=(self.num_heads, self.key_dim_per_head))
(q, k, v) = self.forward_qkv(source)
hist_dim = Dim(None, name=f'{axis.description}:kv')
(k, _) = rf.replace_dim(k, in_dim=axis, out_dim=hist_dim)
(v, _) = rf.replace_dim(v, in_dim=axis, out_dim=hist_dim)
q_with_bias_u = ((q + self.pos_bias_u) if (self.pos_bias_u is not None) else q)
q_with_bias_v = ((q + self.pos_bias_v) if (self.pos_bias_v is not None) else q)
matrix_ac = rf.matmul(q_with_bias_u, k, reduce=self.key_dim_per_head)
matrix_bd = rf.matmul(q_with_bias_v, pos_emb, reduce=self.key_dim_per_head)
matrix_bd = _rel_pos_enc_shift(matrix_bd, axis, pos_emb_spatial_dim, hist_dim)
scores = (matrix_ac + matrix_bd)
scores *= (self.key_dim_per_head.dimension ** (- 0.5))
att_weights = rf.softmax(scores, axis=hist_dim)
att_weights = rf.dropout(att_weights, self.att_dropout, axis=(self.att_dropout_broadcast and hist_dim))
att = rf.matmul(att_weights, v, reduce=hist_dim, use_mask=False)
(output, _) = rf.merge_dims(att, dims=(self.num_heads, self.value_dim_per_head), out_dim=self.value_dim_total)
if self.proj:
output = self.proj(output)
return output
@staticmethod
def _rel_shift(x: Tensor, axis: Dim, pos_emb_spatial_dim: Dim, hist_dim: Dim) -> Tensor:
return _rel_pos_enc_shift(x, axis, pos_emb_spatial_dim, hist_dim)
|
def _rel_pos_enc_shift(x: Tensor, axis: Dim, pos_emb_spatial_dim: Dim, hist_dim: Dim) -> Tensor:
"\n :param x: [B,H,T,T*2-1]\n :param axis: T\n :param pos_emb_spatial_dim: T*2-1\n :param hist_dim: T' (equal to T but separate dim)\n :return: [B,H,T,T']\n "
batch_dims = x.remaining_dims((axis, pos_emb_spatial_dim))
(x_padded, (pos_emb_spatial_dim_,)) = rf.pad(x, axes=[pos_emb_spatial_dim], padding=[(1, 0)], value=0.0)
x_padded = rf.reshape(x_padded, (axis, pos_emb_spatial_dim_), (pos_emb_spatial_dim_, axis))
(x_padded, pos_emb_spatial_dim_) = rf.slice(x_padded, axis=pos_emb_spatial_dim_, start=1)
x_padded = rf.reshape(x_padded, (pos_emb_spatial_dim_, axis), (axis, pos_emb_spatial_dim_))
(x_padded, _) = rf.slice(x_padded, axis=pos_emb_spatial_dim_, size=hist_dim)
x_padded.verify_out_shape((set(batch_dims) | {axis, hist_dim}))
return x_padded
|
class RelPosCausalSelfAttention(CausalSelfAttention):
'\n Self-attention with relative positional encoding.\n This covers both Shawn et al. self-att rel pos 2018 (https://arxiv.org/abs/1803.02155),\n and Dai et al. Transformer-XL style 2019 (https://arxiv.org/abs/1901.02860).\n\n It uses :func:`relative_positional_encoding` or :class:`LearnedRelativePositionalEncoding`.\n\n Same defaults as :class:`RelPosSelfAttention`, which is mostly Transformer-XL style.\n\n Further details:\n https://github.com/rwth-i6/returnn_common/wiki/Relative-positional-encoding\n\n Code references, partly adapted from there:\n https://github.com/espnet/espnet/blob/4138010fb66ad27a43e8bee48a4932829a0847ae/espnet/nets/pytorch_backend/transformer/embedding.py#L260\n https://github.com/kimiyoung/transformer-xl/blob/44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L4\n '
def __init__(self, in_dim: Dim, proj_dim: Optional[Dim], *, key_dim_total: Dim, value_dim_total: Dim, num_heads: Union[(int, Dim)], with_bias: bool=True, with_linear_pos: bool=True, with_pos_bias: bool=True, learnable_pos_emb: bool=False, learnable_pos_emb_clipping: int=16, separate_pos_emb_per_head: bool=True, pos_emb_dropout: float=0.0, att_dropout: float=0.1):
super().__init__(in_dim=in_dim, proj_dim=proj_dim, key_dim_total=key_dim_total, value_dim_total=value_dim_total, num_heads=num_heads, with_bias=with_bias, att_dropout=att_dropout)
self.separate_pos_emb_per_head = separate_pos_emb_per_head
if with_linear_pos:
self.pos_emb_feat_dim = self.in_dim
elif separate_pos_emb_per_head:
self.pos_emb_feat_dim = self.key_dim_total
else:
self.pos_emb_feat_dim = self.key_dim_per_head
self.linear_pos = None
if with_linear_pos:
self.linear_pos = rf.Linear(self.in_dim, (self.key_dim_total if separate_pos_emb_per_head else self.key_dim_per_head), with_bias=False)
self.learned_pos_emb = None
if learnable_pos_emb:
self.learned_pos_emb = LearnedRelativePositionalEncoding(self.pos_emb_feat_dim, clipping=learnable_pos_emb_clipping)
self.pos_bias_u = None
self.pos_bias_v = None
if with_pos_bias:
self.pos_bias_u = rf.Parameter((self.num_heads, self.key_dim_per_head))
self.pos_bias_v = rf.Parameter((self.num_heads, self.key_dim_per_head))
self.pos_bias_u.initial = rf.init.Glorot()
self.pos_bias_v.initial = rf.init.Glorot()
self.pos_emb_dropout = pos_emb_dropout
def __call__(self, source: Tensor, *, axis: Dim, state: Optional[CausalSelfAttentionState]=None) -> Tuple[(Tensor, CausalSelfAttentionState)]:
'forward'
(q, k, v) = self.forward_qkv(source)
(k, v, hist_dim, new_state) = _causal_self_att_step(k, v, axis=axis, state=state, self=self)
if (self.learned_pos_emb is not None):
(pos_emb, pos_emb_spatial_dim) = self.learned_pos_emb(query_spatial_dim=axis, key_value_spatial_dim=hist_dim)
else:
(pos_emb, pos_emb_spatial_dim) = relative_positional_encoding(query_spatial_dim=axis, key_value_spatial_dim=hist_dim, feat_dim=self.pos_emb_feat_dim)
if self.pos_emb_dropout:
pos_emb = rf.dropout(pos_emb, self.pos_emb_dropout)
if (self.linear_pos is not None):
pos_emb = self.linear_pos(pos_emb)
if self.separate_pos_emb_per_head:
pos_emb = rf.split_dims(pos_emb, axis=self.key_dim_total, dims=(self.num_heads, self.key_dim_per_head))
q_with_bias_u = ((q + self.pos_bias_u) if (self.pos_bias_u is not None) else q)
q_with_bias_v = ((q + self.pos_bias_v) if (self.pos_bias_v is not None) else q)
matrix_ac = rf.matmul(q_with_bias_u, k, reduce=self.key_dim_per_head)
matrix_bd = rf.matmul(q_with_bias_v, pos_emb, reduce=self.key_dim_per_head)
matrix_bd = _rel_pos_enc_shift(matrix_bd, axis, pos_emb_spatial_dim, hist_dim)
scores = (matrix_ac + matrix_bd)
scores *= (self.key_dim_per_head.dimension ** (- 0.5))
att_weights = rf.softmax(scores, axis=hist_dim)
att_weights = rf.dropout(att_weights, self.att_dropout, axis=(self.att_dropout_broadcast and hist_dim))
att = rf.matmul(att_weights, v, reduce=hist_dim, use_mask=False)
(output, _) = rf.merge_dims(att, dims=(self.num_heads, self.value_dim_per_head), out_dim=self.value_dim_total)
if self.proj:
output = self.proj(output)
return output
|
class CrossAttention(rf.Module):
'\n Cross attention\n\n It uses :func:`dot_attention` for multi-headed dot-attention.\n '
def __init__(self, encoder_dim: Dim, query_in_dim: Dim, proj_dim: Optional[Dim], *, key_dim_total: Dim, value_dim_total: Dim, num_heads: Union[(int, Dim)], with_bias: bool=True, att_dropout: float=0.1, att_dropout_broadcast: Optional[bool]=None):
'\n :param encoder_dim: encoder output dim = input dim for key-value\n :param query_in_dim: input dim for query\n :param proj_dim: if given, will add a final linear projection to this dim.\n otherwise no projection after the attention\n :param key_dim_total: total key dim. should be a multiple of num_heads\n :param value_dim_total: total value dim. should be a multiple of num_heads\n :param num_heads: number of heads\n :param with_bias: whether to add bias to qkv and proj linear projections.\n Was False in original Transformer, but many recent implementations use True by default.\n Also see: https://github.com/rwth-i6/returnn_common/issues/234.\n :param att_dropout: dropout for attention weights\n :param att_dropout_broadcast: whether to broadcast over all but ``axis``.\n normally not wanted. disabled by default since behavior version 19.\n '
super().__init__()
self.encoder_dim = encoder_dim
self.query_in_dim = query_in_dim
self.out_dim = (proj_dim if proj_dim else value_dim_total)
if isinstance(num_heads, int):
num_heads = Dim(num_heads, name='num_heads')
self.key_dim_total = key_dim_total
self.key_dim_per_head = key_dim_total.div_left(num_heads)
self.value_dim_total = value_dim_total
self.value_dim_per_head = value_dim_total.div_left(num_heads)
self.num_heads = num_heads
self.kv_dim_total = (key_dim_total + value_dim_total)
self.kv_dim_per_head = (self.key_dim_per_head + self.value_dim_per_head)
self.kv = rf.Linear(encoder_dim, self.kv_dim_total, with_bias=with_bias)
self.q = rf.Linear(query_in_dim, self.key_dim_total, with_bias=with_bias)
self.kv.weight.initial = rf.init.Glorot(scale=(3 / 4))
self.q.weight.initial = rf.init.Glorot(scale=(1 / 2))
if proj_dim:
self.proj = rf.Linear(value_dim_total, proj_dim, with_bias=with_bias)
else:
self.proj = None
self.att_dropout = att_dropout
if (att_dropout_broadcast is None):
att_dropout_broadcast = _att_dropout_broadcast_default()
self.att_dropout_broadcast = att_dropout_broadcast
def transform_encoder(self, encoder: Tensor, *, axis: Dim) -> rf.State:
'\n Transformer encoder output. This is intended as an initial API suggestion.\n '
(k, v) = self.forward_kv(encoder)
return rf.State(k=k, v=v, kv_axis=axis)
def forward_kv(self, source: Tensor) -> Tuple[(Tensor, Tensor)]:
'\n This would be calculated once for the whole sequence (batch)\n and then always reused for :func:`attention`.\n\n :return: k,v\n '
qkv = self.kv(source)
qkv = rf.split_dims(qkv, axis=self.kv_dim_total, dims=(self.num_heads, self.kv_dim_per_head))
(k, v) = rf.split(qkv, axis=self.kv_dim_per_head, out_dims=(self.key_dim_per_head, self.value_dim_per_head))
return (k, v)
def forward_query(self, source: Tensor) -> Tensor:
'\n This is calculated for every different query.\n\n :return: q\n '
q = self.q(source)
q = rf.split_dims(q, axis=self.key_dim_total, dims=(self.num_heads, self.key_dim_per_head))
return q
def __call__(self, q: Tensor, encoder: rf.State) -> Tensor:
q = self.forward_query(q)
return self.attention(q=q, **encoder)
def attention(self, q: Tensor, k: Tensor, v: Tensor, *, kv_axis: Dim) -> Tensor:
'apply attention'
att = dot_attention(q, k, v, key_dim=self.key_dim_per_head, axis=kv_axis, att_dropout=self.att_dropout, att_dropout_broadcast=self.att_dropout_broadcast)
(output, _) = rf.merge_dims(att, dims=(self.num_heads, self.value_dim_per_head), out_dim=self.value_dim_total)
if self.proj:
output = self.proj(output)
return output
|
class LearnedRelativePositionalEncoding(rf.Module):
'\n Learnable relative positional encoding.\n\n E.g. as used in Shawn et al, 2018 (https://arxiv.org/abs/1803.02155).\n\n https://github.com/rwth-i6/returnn_common/wiki/Relative-positional-encoding\n '
def __init__(self, feat_dim: Dim, *, clipping: int=16, dtype: Optional[str]=None, causal: bool=False):
'\n :param feat_dim: feature dim, for the emb matrix and output\n :param clipping: max distance to consider. emb matrix shape is [2 * clipping + 1, feat_dim] if not causal,\n else [clipping + 1, feat].\n The first and last frame will be the clipping frames.\n :param dtype: for the emb matrix and output\n '
super(LearnedRelativePositionalEncoding, self).__init__()
self.feat_dim = feat_dim
self.clipping = clipping
self.clipped_spatial_dim = Dim((((1 if causal else 2) * clipping) + 1), name='learned-rel-pos')
self.causal = causal
self.pos_emb = rf.Parameter((self.clipped_spatial_dim, self.feat_dim), dtype=dtype)
def __call__(self, *, query_spatial_dim: Dim, key_value_spatial_dim: Dim, query_offset: Optional[Union[(int, Tensor)]]=None) -> Tuple[(Tensor, Dim)]:
'\n same interface as :func:`relative_positional_encoding`\n\n :return: tensor of shape [spatial_dim * 2 - 1, feat_dim], and the out spatial dim (spatial_dim * 2 - 1).\n In the center is the rel pos i-j=0. All to the right are for i-j>0, all to the left for i-j<0.\n '
(indices, out_spatial_dim) = _make_indices(query_spatial_dim, key_value_spatial_dim, query_offset)
indices = rf.clip_by_value(indices, (- self.clipping), (0 if self.causal else self.clipping))
indices = (indices + self.clipping)
encoding = rf.gather(self.pos_emb, indices=indices, axis=self.clipped_spatial_dim)
return (encoding, out_spatial_dim)
def full_matrix(self, *, query_spatial_dim: Dim, key_value_spatial_dim: Dim, query_offset: Optional[Union[(int, Tensor)]]=None) -> Tensor:
'\n :return: as full matrix [query_spatial_dim,key_value_spatial_dim,feat_dim].\n however, note that __call__ is usually to be preferred, as this gives a more efficient format.\n '
kv_pos_vec = rf.range_over_dim(key_value_spatial_dim)
if (query_spatial_dim == single_step_dim):
assert (query_offset is None)
query_offset = (key_value_spatial_dim.get_size_tensor() - 1)
indices = (kv_pos_vec - query_offset)
else:
q_pos_vec = rf.range_over_dim(query_spatial_dim)
indices = rf.combine_bc(kv_pos_vec, '-', (q_pos_vec + query_offset))
indices = rf.clip_by_value(indices, (- self.clipping), (0 if self.causal else self.clipping))
indices = (indices + self.clipping)
encoding = rf.gather(self.pos_emb, indices=indices, axis=self.clipped_spatial_dim)
return encoding
|
def _make_indices(query_spatial_dim: Dim, key_value_spatial_dim: Dim, query_offset: Optional[Union[(int, Tensor)]]=None) -> Tuple[(Tensor, Dim)]:
kv_pos_vec = rf.range_over_dim(key_value_spatial_dim)
if (query_spatial_dim == single_step_dim):
indices = kv_pos_vec
out_spatial_dim = key_value_spatial_dim
assert (query_offset is None)
query_offset = (key_value_spatial_dim.get_size_tensor() - 1)
else:
query_spatial_dim_m1 = (query_spatial_dim - 1)
q_pos_vec = rf.range_over_dim(query_spatial_dim_m1)
(indices, out_spatial_dim) = rf.concat(((q_pos_vec - query_spatial_dim_m1.get_dim_value_tensor()), query_spatial_dim_m1), (kv_pos_vec, key_value_spatial_dim))
if (query_offset is not None):
indices = (indices - query_offset)
return (indices, out_spatial_dim)
|
def relative_positional_encoding(*, query_spatial_dim: Dim, key_value_spatial_dim: Dim, feat_dim: Dim, query_offset: int=0, dtype: Optional[str]=None) -> Tuple[(Tensor, Dim)]:
'\n Implements relative positional encoding, Transformer-XL style (https://arxiv.org/abs/1901.02860),\n as used for example by :class:`RelPosSelfAttention`.\n\n Code references, partly adapted from there:\n https://github.com/espnet/espnet/blob/4138010fb66ad27a43e8bee48a4932829a0847ae/espnet/nets/pytorch_backend/transformer/embedding.py#L260\n https://github.com/kimiyoung/transformer-xl/blob/44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L4\n\n Note that this encoding is stored in a cache so that it is only calculated once.\n and then reused.\n\n Note that we could extend the implementation later to also buffer it\n even across mini-batches, like the ESPnet implementation does,\n e.g. by storing it in an auxiliary variable and increasing its size when needed.\n But this is not done yet, to keep the code simple.\n\n :return: tensor of shape [spatial_dim * 2 - 1, feat_dim], and the out spatial dim (spatial_dim * 2 - 1).\n In the center is the rel pos i-j=0. All to the right are for i-j>0, all to the left for i-j<0.\n '
if (not dtype):
dtype = rf.get_default_float_dtype()
cache = _relative_positional_encoding_cache.setdefault(rf.get_run_ctx(), {})
cache_key = (query_spatial_dim, key_value_spatial_dim, feat_dim, query_offset, dtype)
if (cache_key in cache):
return cache[cache_key]
import math
with rf.control_flow_ctx(None):
(indices, out_spatial_dim) = _make_indices(query_spatial_dim, key_value_spatial_dim, query_offset)
feat2_dim = feat_dim.div_left(2)
div_term = rf.exp((rf.range_over_dim(feat2_dim, dtype=dtype) * (- ((2.0 * math.log(10000.0)) / feat_dim.dimension))))
arg_sin = rf.combine_bc(rf.cast(indices, dtype), '*', div_term)
arg_cos = (arg_sin + (math.pi / 2.0))
(arg, feat_dim_) = rf.concat((arg_sin, feat2_dim), (arg_cos, feat2_dim))
(arg, feat_dim_) = rf.replace_dim(arg, in_dim=feat_dim_, out_dim=feat_dim)
emb = rf.sin(arg)
emb.verify_out_shape(({out_spatial_dim, feat_dim} if (out_spatial_dim != single_step_dim) else {feat_dim}), allow_missing_implicit_dims=True)
emb.feature_dim = feat_dim
cache[cache_key] = (emb, out_spatial_dim)
return (emb, out_spatial_dim)
|
def sinusoidal_positional_encoding(*, spatial_dim: Dim, feat_dim: Dim, offset: Optional[Union[(int, Tensor)]]=None, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor:
'\n Implements absolute sinusoidal positional encoding.\n\n Code adopted from :func:`relative_positional_encoding`\n and our TF util :func:`get_positional_encoding`.\n\n Note that this encoding is stored in a cache so that it is only calculated once.\n and then reused.\n\n Note that we could extend the implementation later to also buffer it\n even across mini-batches, like the ESPnet implementation does,\n e.g. by storing it in an auxiliary variable and increasing its size when needed.\n But this is not done yet, to keep the code simple.\n\n :return: tensor of shape [spatial_dim, feat_dim] if spatial_dim != single_step_dim else [feat_dim]\n '
if (not dtype):
dtype = rf.get_default_float_dtype()
if (not device):
device = rf.get_default_device()
cache = _positional_encoding_cache.setdefault(rf.get_run_ctx(), {})
cache_key = (spatial_dim, feat_dim, offset, dtype, device)
if (cache_key in cache):
return cache[cache_key]
import math
with rf.control_flow_ctx(None):
if (spatial_dim == single_step_dim):
assert (offset is not None)
indices = rf.convert_to_tensor(offset, device=device)
else:
indices = rf.range_over_dim(spatial_dim, device=device)
if (offset is not None):
indices = (indices + offset)
indices = rf.copy_to_device(indices, device)
feat2_dim = feat_dim.div_left(2)
div_term = rf.exp((rf.range_over_dim(feat2_dim, dtype=dtype, device=device) * (- (math.log(10000.0) / (feat2_dim.dimension - 1)))))
arg_sin = rf.combine_bc(rf.cast(indices, dtype), '*', div_term)
arg_cos = (arg_sin + (math.pi / 2.0))
(arg, feat_dim_) = rf.concat((arg_sin, feat2_dim), (arg_cos, feat2_dim))
(arg, feat_dim_) = rf.replace_dim(arg, in_dim=feat_dim_, out_dim=feat_dim)
emb = rf.sin(arg)
emb.verify_out_shape(({spatial_dim, feat_dim} if (spatial_dim != single_step_dim) else {feat_dim}), allow_missing_implicit_dims=True)
emb.feature_dim = feat_dim
cache[cache_key] = emb
return emb
|
def _att_dropout_broadcast_default() -> bool:
from returnn.config import get_global_config
from returnn.util.basic import BehaviorVersion
config = get_global_config(raise_exception=False)
if config:
opt = config.bool('rf_att_dropout_broadcast', None)
if (opt is not None):
return opt
opts = config.bool('rf_dropout_broadcast', None)
if (opts is not None):
return opts
if (BehaviorVersion.get() <= 18):
global _att_dropout_broadcast_shown_warning
if (not _att_dropout_broadcast_shown_warning):
_att_dropout_broadcast_shown_warning = True
logging.getLogger('returnn.frontend').warning("Attention dropout uses broadcasting. This is old behavior and likely not what you want. Set config option 'rf_att_dropout_broadcast' to False to disable this, or switch to a new behavior version >= 19. (This warning is only printed once.)")
return True
return False
|
def mel_filterbank(x: Tensor, *, in_dim: Dim, out_dim: Dim, sampling_rate: Union[(int, float)], fft_length: Optional[int]=None, f_min: Optional[Union[(int, float)]]=None, f_max: Optional[Union[(int, float)]]=None):
'\n Applies the Mel filterbank to the input.\n\n :param x:\n :param in_dim: expected to be fft_length // 2 + 1. E.g. via :func:`stft`.\n :param out_dim: nr of mel filters.\n :param sampling_rate:\n :param fft_length: fft_size, n_fft. Should match fft_length from :func:`stft`.\n If not given, infer this from in_dim, as (in_dim - 1) * 2.\n :param f_min:\n :param f_max:\n :return:\n '
if (not fft_length):
fft_length = ((in_dim.dimension - 1) * 2)
f_min = (f_min or 0)
f_max = (f_max or (sampling_rate / 2.0))
backend = x._raw_backend
cache_key = (f_min, f_max, sampling_rate, fft_length, out_dim.dimension, backend, x.dtype, x.device)
if (cache_key in _mel_filter_bank_matrix_cache):
filter_bank_matrix_: Tensor = _mel_filter_bank_matrix_cache[cache_key]
filter_bank_matrix = Tensor('mel-filter-bank', dims=(in_dim, out_dim), dtype=x.dtype)
filter_bank_matrix.raw_tensor = filter_bank_matrix_.raw_tensor
else:
filter_bank_matrix_np = _mel_filter_bank_matrix_np(f_min=f_min, f_max=f_max, sampling_rate=sampling_rate, fft_size=fft_length, nr_of_filters=out_dim.dimension)
filter_bank_matrix_np = filter_bank_matrix_np.astype(x.dtype)
filter_bank_matrix = rf.convert_to_tensor(filter_bank_matrix_np, dims=(in_dim, out_dim), _backend=backend)
filter_bank_matrix = rf.copy_to_device(filter_bank_matrix, x.device)
if backend.executing_eagerly():
if (len(_mel_filter_bank_matrix_cache) > 100):
_mel_filter_bank_matrix_cache.clear()
_mel_filter_bank_matrix_cache[cache_key] = filter_bank_matrix
out = rf.matmul(x, filter_bank_matrix, reduce=in_dim)
out.feature_dim = out_dim
return out
|
@functools.lru_cache()
def _mel_filter_bank_matrix_np(*, f_min: Union[(int, float)], f_max: Union[(int, float)], sampling_rate: Union[(int, float)], fft_size: int, nr_of_filters: int) -> numpy.ndarray:
'\n Returns the filter matrix which yields the mel filter bank features, when applied to the spectrum as\n matmul(freqDom, filterMatrix), where freqDom has dimension (time, frequency)\n and filterMatrix is the matrix returned\n by this function.\n The filter matrix is computed according to equation 6.141 in\n [Huang & Acero+, 2001] "Spoken Language Processing - A Guide to Theroy, Algorithm, and System Development"\n\n :param float|int f_min: minimum frequency\n :param float|int f_max: maximum frequency\n :param float sampling_rate: sampling rate of audio signal\n :param int fft_size: dimension of discrete fourier transformation\n :param int nr_of_filters: number of mel frequency filter banks to be created\n :return: shape=(fft_size // 2 + 1, nr_of_filters), matrix yielding the mel frequency cepstral coefficients\n '
def mel_scale(freq):
'\n returns the respective value on the mel scale\n\n :param float freq: frequency value to transform onto mel scale\n :rtype: float\n '
return (1125.0 * numpy.log((1 + (float(freq) / 700))))
def inv_mel_scale(mel_val):
'\n returns the respective value in the frequency domain\n\n :param float mel_val: value in mel domain\n :rtype: float\n '
return (700.0 * (numpy.exp((float(mel_val) / 1125)) - 1))
def filter_center(filter_id, f_min, f_max, sampling_rate, fft_size, nr_of_filters):
'\n :param int filter_id: filter to compute the center frequency for\n :param float|int f_min: minimum frequency\n :param float|int f_max: maximum frequency\n :param float|int sampling_rate: sampling rate of audio signal\n :param int fft_size: dimension of discrete fourier transformation\n :param int nr_of_filters: number of mel frequency filter banks to be created\n :rtype: float\n :return: center frequency of filter\n '
return ((float(fft_size) / sampling_rate) * inv_mel_scale((mel_scale(f_min) + (filter_id * ((mel_scale(f_max) - mel_scale(f_min)) / (nr_of_filters + 1))))))
filt_cent = numpy.zeros(shape=((nr_of_filters + 2),), dtype=numpy.float32)
for i1 in range((nr_of_filters + 2)):
filt_cent[i1] = filter_center(i1, f_min, f_max, sampling_rate, fft_size, nr_of_filters)
f_mat = numpy.zeros(shape=(((fft_size // 2) + 1), nr_of_filters))
for i1 in range(f_mat.shape[0]):
for i2 in range(1, (nr_of_filters + 1)):
if ((i1 > filt_cent[(i2 - 1)]) and (i1 < filt_cent[(i2 + 1)])):
if (i1 < filt_cent[i2]):
num = (i1 - filt_cent[(i2 - 1)])
denom = (filt_cent[i2] - filt_cent[(i2 - 1)])
else:
num = (filt_cent[(i2 + 1)] - i1)
denom = (filt_cent[(i2 + 1)] - filt_cent[i2])
el_val = (num / denom)
else:
el_val = 0
f_mat[(i1, (i2 - 1))] = el_val
return f_mat
|
def log_mel_filterbank_from_raw(raw_audio: Tensor, *, in_spatial_dim: Dim, out_dim: Dim, sampling_rate: int=16000, window_len: float=0.025, step_len: float=0.01, n_fft: Optional[int]=None, log_base: Union[(int, float)]=10) -> Tuple[(Tensor, Dim)]:
'\n log mel filterbank features\n\n :param raw_audio: (..., in_spatial_dim, ...). if it has a feature_dim with dimension 1, it is squeezed away.\n :param in_spatial_dim:\n :param out_dim: nr of mel filters.\n :param sampling_rate: samples per second\n :param window_len: in seconds\n :param step_len: in seconds\n :param n_fft: fft_size, n_fft. Should match fft_length from :func:`stft`.\n If not provided, next power-of-two from window_num_frames.\n :param log_base: e.g. 10 or math.e\n '
if (raw_audio.feature_dim and (raw_audio.feature_dim.dimension == 1)):
raw_audio = rf.squeeze(raw_audio, axis=raw_audio.feature_dim)
window_num_frames = int((window_len * sampling_rate))
step_num_frames = int((step_len * sampling_rate))
if (not n_fft):
n_fft = util_math.next_power_of_two(window_num_frames)
(spectrogram, out_spatial_dim, in_dim_) = rf.stft(raw_audio, in_spatial_dim=in_spatial_dim, frame_step=step_num_frames, frame_length=window_num_frames, fft_length=n_fft)
power_spectrogram = (rf.abs(spectrogram) ** 2.0)
mel_fbank = mel_filterbank(power_spectrogram, in_dim=in_dim_, out_dim=out_dim, sampling_rate=sampling_rate)
log_mel_fbank = rf.safe_log(mel_fbank, eps=1e-10)
if (log_base != math.e):
log_mel_fbank = (log_mel_fbank * (1.0 / math.log(log_base)))
return (log_mel_fbank, out_spatial_dim)
|
def specaugment(x: Tensor, *, spatial_dim: Dim, feature_dim: Optional[Dim]=None, global_train_step_dependent: bool=True, only_on_train: bool=True, max_consecutive_spatial_dims: int=20, max_consecutive_feature_dims: Optional[int]=None, num_spatial_mask_factor: int=100, steps: Tuple[(int, int, int)]=(0, 1000, 2000)) -> Tensor:
'\n SpecAugment, https://arxiv.org/abs/1904.08779\n '
if (not feature_dim):
assert x.feature_dim
feature_dim = x.feature_dim
if (not max_consecutive_feature_dims):
max_consecutive_feature_dims = (feature_dim.dimension // 5)
if global_train_step_dependent:
with rf.set_default_device_ctx('cpu'):
step = rf.get_run_ctx().step
step0 = rf.where((step >= steps[0]), 1, 0)
step1 = rf.where((step >= steps[1]), 1, 0)
step2 = rf.where((step >= steps[2]), 1, 0)
else:
step0 = step1 = step2 = 1
def _mask_branch():
x_masked = x
spatial_len = spatial_dim.get_dim_value_tensor()
x_masked = random_mask(x_masked, mask_axis=spatial_dim, broadcast_axis=feature_dim, min_num=rf.minimum((step1 + step2), spatial_len), max_num=rf.minimum((rf.maximum((spatial_len // num_spatial_mask_factor), 2) * ((step0 + step1) + (step2 * 2))), spatial_len), max_dims=max_consecutive_spatial_dims)
x_masked = random_mask(x_masked, mask_axis=feature_dim, broadcast_axis=spatial_dim, min_num=(step1 + step2), max_num=(((step0 * 2) + step1) + (step2 * 2)), max_dims=max_consecutive_feature_dims)
return x_masked
return rf.cond((rf.get_run_ctx().train_flag | (not only_on_train)), _mask_branch, (lambda : x))
|
def random_mask(x: Tensor, *, mask_axis: Dim, broadcast_axis: Union[(Dim, Collection[Dim])], min_num: Union[(int, Tensor)], max_num: Union[(int, Tensor)], max_dims: Union[(int, Tensor)], mask_value: Union[(int, float, Tensor)]=0.0) -> Tensor:
'\n :param x: (batch,time,feature)\n :param mask_axis: axis to mask\n :param broadcast_axis: one or multiple, which should be broadcasted over.\n The remaining axes not specified by mask_axis and broadcast_axis are not broadcasted over\n and treated as batch dims.\n E.g. in [B,T,D], with mask_axis=F, broadcast_axis=T, it creates masks [B,F].\n :param min_num:\n :param max_num: inclusive\n :param max_dims: inclusive\n :param mask_value:\n '
batch_dims = list(x.dims)
batch_dims.remove(mask_axis)
if isinstance(broadcast_axis, Dim):
batch_dims.remove(broadcast_axis)
else:
for a in broadcast_axis:
batch_dims.remove(a)
if (isinstance(min_num, int) and isinstance(max_num, int) and (min_num == max_num)):
num = min_num
max_num = num
else:
num = rf.random_uniform(batch_dims, minval=min_num, maxval=(max_num + 1), dtype='int32', device='cpu')
max_num = rf.reduce_max(num, axis=num.dims)
(_, indices, k_dim) = rf.top_k(rf.random_uniform((batch_dims + [mask_axis]), minval=0.0, maxval=1.0, device=x.device), axis=mask_axis, k=(num if isinstance(num, int) else rf.reduce_max(num, axis=num.dims)))
if isinstance(num, int):
for i in range(num):
x = mask(x, mask_axis=mask_axis, pos=rf.gather(indices, axis=k_dim, indices=i), max_amount=max_dims, mask_value=mask_value)
else:
def _body(s):
(i_, x_) = s
y = mask(x_, mask_axis=mask_axis, pos=rf.gather(indices, axis=k_dim, indices=rf.copy_to_device(i_, indices.device)), max_amount=max_dims, mask_value=mask_value)
y = rf.where(rf.copy_to_device(rf.less(i_, num), y.device), y, x_)
return ((i_ + 1), y)
(_, x) = rf.while_loop(cond=(lambda s: (s[0] < max_num)), body=_body, initial=(rf.constant(0, dims=(), device='cpu'), x))
return x
|
def mask(x: Tensor, *, mask_axis: Dim, pos: Tensor, max_amount: Union[(int, Tensor)], mask_value: Union[(int, float, Tensor)]=0.0) -> Tensor:
'\n :param x: (batch,time,[feature]). any dim not mask_axis or in pos.shape will be broadcasted over\n :param mask_axis:\n :param pos: (batch,) (or multiple batch dims)\n :param max_amount: inclusive\n :param mask_value:\n '
dim = mask_axis.get_size_tensor()
dim = rf.copy_to_device(dim, pos.device)
pos = rf.cast(pos, dtype=dim.dtype)
amount = rf.random_uniform(pos.dims, minval=1, maxval=(max_amount + 1), dtype=pos.dtype, device=pos.device)
pos2 = rf.minimum((pos + amount), dim)
idxs = rf.range_over_dim(mask_axis, dtype=pos.dtype, device=pos.device)
cond = (rf.compare_bc(idxs, '>=', pos) & rf.compare_bc(idxs, '<', pos2))
x = rf.where(cond, mask_value, x)
return x
|
def is_backend_raw_tensor_dim_tag_independent() -> bool:
'\n :return: whether raw tensors of the backend are independent of :class:`Dim`\n (Usually yes, e.g. :class:`tf.Tensor` or :class:`torch.Tensor`,\n but the TF-layers backend is an exception.)\n '
return _backend.global_backend.is_backend_raw_tensor_dim_tag_independent
|
def cond(pred: Union[(bool, Tensor)], true_fn: Callable[([], T)], false_fn: Callable[([], T)]) -> T:
'\n :param pred:\n :param true_fn:\n :param false_fn:\n :return: true_fn() if pred else false_fn()\n '
if isinstance(pred, bool):
if pred:
return true_fn()
else:
return false_fn()
assert (isinstance(pred, Tensor) and (pred.dims == ()) and (pred.dtype == 'bool'))
backend = pred._raw_backend
if backend.executing_eagerly():
if bool(pred.raw_tensor):
return true_fn()
else:
return false_fn()
return backend.cond(pred, true_fn, false_fn)
|
def full(*, dims: Sequence[Dim], fill_value: Union[(RawTensorTypes, Tensor)], dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor:
'\n full, fill, constant.\n\n https://data-apis.org/array-api/latest/API_specification/generated/array_api.full.html\n\n Also see :func:`convert_to_tensor`.\n\n :param dims: shape\n :param fill_value: scalar to fill the tensor\n :param dtype:\n :param device:\n :param sparse_dim:\n :param feature_dim:\n '
if (dtype is None):
if isinstance(fill_value, bool):
dtype = 'bool'
elif isinstance(fill_value, int):
dtype = rf.get_default_int_dtype()
elif isinstance(fill_value, float):
dtype = rf.get_default_float_dtype()
elif isinstance(fill_value, Tensor):
dtype = fill_value.dtype
else:
raise ValueError(f'cannot infer dtype from {fill_value!r} or type ({type(fill_value)})')
if isinstance(fill_value, numpy.ndarray):
assert (fill_value.shape == ()), f'''full/fill/constant: expect scalar fill_value, got array with shape {fill_value.shape}.
Use rf.convert_to_tensor to convert an arbitrary array to a tensor.'''
if isinstance(fill_value, Tensor):
assert (fill_value.dims == ()), f'full/fill/constant: expect scalar fill_value, got tensor with shape {fill_value.dims}.'
return global_backend.full(dims, fill_value, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim)
|
def constant(fill_value: RawTensorTypes, *, dims: Sequence[Dim], dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor:
'alias to :func:`full`, mapping `value` to `fill_value`. also see :func:`convert_to_tensor`'
return full(dims=dims, fill_value=fill_value, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim)
|
def zeros(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor:
'\n zeros. float by default.\n '
return full(dims=dims, fill_value=0, dtype=(dtype or rf.get_default_float_dtype()), device=device, sparse_dim=sparse_dim, feature_dim=feature_dim)
|
def ones(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor:
'\n ones. float by default.\n '
return full(dims=dims, fill_value=1, dtype=(dtype or rf.get_default_float_dtype()), device=device, sparse_dim=sparse_dim, feature_dim=feature_dim)
|
def zeros_like(other: Tensor) -> Tensor:
'zeros like other'
return zeros(dims=other.dims, dtype=other.dtype, device=other.device, sparse_dim=other.sparse_dim, feature_dim=other.feature_dim)
|
def ones_like(other: Tensor) -> Tensor:
'ones like other'
return ones(dims=other.dims, dtype=other.dtype, device=other.device, sparse_dim=other.sparse_dim, feature_dim=other.feature_dim)
|
class ModuleList(rf.Module, Generic[__ModT]):
'\n Module list, getting passed an Iterable of Modules and creates a list of Modules in that order\n '
def __init__(self, *modules: Union[(__ModT, Iterable[__ModT], Dict[(str, __ModT)], ModuleList)]):
super().__init__()
if ((len(modules) == 1) and isinstance(modules[0], dict)):
for (i, (key, module)) in enumerate(modules[0].items()):
if _is_int_str(key):
key = str(i)
setattr(self, key, _convert_to_module(module))
elif ((len(modules) == 1) and isinstance(modules[0], ModuleList)):
for (key, module) in modules[0]._get_modules().items():
setattr(self, key, _convert_to_module(module))
elif ((len(modules) == 1) and _is_iterable(modules[0])):
for (idx, module) in enumerate(modules[0]):
setattr(self, str(idx), _convert_to_module(module))
else:
for (idx, module) in enumerate(modules):
setattr(self, str(idx), _convert_to_module(module))
def _get_modules(self) -> Dict[(str, __ModT)]:
return {key: value for (key, value) in vars(self).items() if isinstance(value, rf.Module)}
def append(self, module: __ModT) -> ModuleList[__ModT]:
'\n appends one module to the list\n '
setattr(self, str(len(self)), _convert_to_module(module))
return self
def extend(self, modules: Iterable[__ModT]) -> ModuleList[__ModT]:
'\n appends multiple modules to the list\n '
for module in modules:
self.append(module)
return self
def __len__(self) -> int:
return len(self._get_modules())
def __iter__(self) -> Iterator[__ModT]:
return iter(self._get_modules().values())
def items(self) -> Iterable[Tuple[(str, __ModT)]]:
'module items'
return self._get_modules().items()
def __getitem__(self, idx: Union[(slice, int)]) -> Union[(ModuleList[__ModT], __ModT)]:
if isinstance(idx, slice):
return self.__class__(dict(list(self._get_modules().items())[idx]))
else:
return list(self._get_modules().values())[idx]
def __setitem__(self, idx: Union[(slice, int)], module: Union[(__ModT, Iterable[__ModT])]) -> None:
key = list(self._get_modules().keys())[idx]
if isinstance(idx, slice):
assert ((not idx.step) or (idx.step == 1))
mod_items = list(self._get_modules().items())
if (idx.stop is not None):
remaining = mod_items[idx.stop:]
else:
remaining = []
for (k, _) in mod_items[idx.start:]:
delattr(self, k)
i = (idx.start or 0)
for mod_ in module:
assert (not hasattr(self, str(i)))
setattr(self, str(i), _convert_to_module(mod_))
i += 1
for (k, v) in remaining:
if _is_int_str(k):
k = str(i)
assert (not hasattr(self, k))
setattr(self, k, v)
i += 1
else:
setattr(self, key, _convert_to_module(module))
def __delitem__(self, key: Union[(slice, int)]):
if isinstance(key, slice):
self[key] = []
else:
self[key:(key + 1)] = []
__call__ = rf.Module.__call__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.