code
stringlengths
17
6.64M
class Sequential(ModuleList): '\n Sequential Module, takes callable of Modules which are then executed in sequence\n ' def __call__(self, inp, *, collected_outputs: Optional[Dict[(str, Tensor)]]=None, **kwargs) -> Tensor: '\n Forward\n ' for (name, module) in self.items(): inp = module(inp, **kwargs) if (collected_outputs is not None): collected_outputs[name] = inp return inp
def sequential(source: Tensor, *modules) -> Tensor: '\n Wraps ``Sequential(*modules)(source)``\n ' return Sequential(*modules)(source)
def _convert_to_module(obj: _ModT) -> rf.Module: if isinstance(obj, rf.Module): return obj elif callable(obj): return rf.Functional(obj) else: raise TypeError(f'Expected rf.Module or callable, did not expect {obj!r} ({type(obj)})')
def _is_iterable(obj) -> bool: try: iter(obj) return True except TypeError: return False
class ParameterList(rf.Module): '\n Parameter list, getting passed an Iterable of Parameters and creates a list of Parameters in that order\n ' def __init__(self, *parameters: Union[(rf.Parameter, Iterable[rf.Parameter], Dict[(str, rf.Parameter)], ParameterList)]): super().__init__() if ((len(parameters) == 1) and isinstance(parameters[0], dict)): for (i, (key, parameter)) in enumerate(parameters[0].items()): if _is_int_str(key): key = str(i) setattr(self, key, parameter) elif ((len(parameters) == 1) and isinstance(parameters[0], ParameterList)): for (key, parameter) in parameters[0]._get_parameters().items(): setattr(self, key, parameter) elif ((len(parameters) == 1) and _is_iterable(parameters[0])): for (idx, parameter) in enumerate(parameters[0]): setattr(self, str(idx), parameter) else: for (idx, parameter) in enumerate(parameters): setattr(self, str(idx), parameter) def _get_parameters(self) -> Dict[(str, rf.Parameter)]: return {key: value for (key, value) in vars(self).items() if isinstance(value, rf.Parameter)} def append(self, parameter: rf.Parameter) -> ParameterList: '\n appends one Parameter to the list\n ' setattr(self, str(len(self)), parameter) return self def extend(self, parameters: Iterable[rf.Parameter]) -> ParameterList: '\n appends multiple Parameters to the list\n ' for parameter in parameters: self.append(parameter) return self def __len__(self) -> int: return len(self._get_parameters()) def __iter__(self) -> Iterator[rf.Parameter]: return iter(self._get_parameters().values()) def __getitem__(self, idx) -> Union[(ParameterList, rf.Parameter)]: if isinstance(idx, slice): return self.__class__(dict(list(self._get_parameters().items())[idx])) else: return list(self._get_parameters().values())[idx] def __setitem__(self, idx: int, parameter: rf.Parameter) -> None: key = list(self._get_parameters().keys())[idx] return setattr(self, key, rf.Parameter) __call__ = rf.Module.__call__
def _is_int_str(s: str) -> bool: try: int(s) return True except ValueError: return False
@contextmanager def control_flow_ctx(ctx: Optional[ControlFlowContext]=None): '\n Activates the given control flow context.\n ' global _ctx prev_ctx = _ctx try: _ctx = ctx (yield ctx) finally: _ctx = prev_ctx
def get_current_control_flow_ctx() -> Optional[ControlFlowContext]: '\n :return: current control flow context\n ' return _ctx
class _ConvOrTransposedConv(rf.Module): '\n Base class for both convolution and transposed convolution.\n ' nd: Optional[int] = None _transposed: bool groups: Optional[int] = None def __init__(self, in_dim: Dim, out_dim: Dim, filter_size: Union[(Sequence[Union[(int, Dim)]], int, Dim)], *, padding: str, with_bias: bool): '\n :param Dim in_dim:\n :param Dim out_dim:\n :param filter_size: (width,), (height,width) or (depth,height,width) for 1D/2D/3D conv.\n the input data ndim must match, or you can add dimensions via input_expand_dims or input_add_feature_dim.\n it will automatically swap the batch-dim to the first axis of the input data.\n :param padding: "same" or "valid"\n :param with_bias:\n ' super().__init__() assert (isinstance(in_dim, Dim) and isinstance(out_dim, Dim)) self.in_dim = rf.dim_match_priority_when_needed(in_dim, out_dim) self.out_dim = out_dim if isinstance(filter_size, (int, Dim)): if (self.nd in (None, 1)): filter_size = [filter_size] else: filter_size = ([filter_size] * self.nd) assert isinstance(filter_size, (tuple, list)) if self.nd: assert (self.nd == len(filter_size)) else: self.nd = len(filter_size) self.filter_size = [(s if isinstance(s, Dim) else Dim(s, name=f'filter-dim{i}')) for (i, s) in enumerate(filter_size)] self.padding = padding filter_in_dim = in_dim if ((self.groups is not None) and (self.groups > 1)): assert (not self._transposed) filter_in_dim //= self.groups filter_in_dim = rf.dim_match_priority_when_needed(filter_in_dim, self.out_dim) self.filter_in_dim = filter_in_dim self.filter = rf.Parameter((([self.out_dim, self.filter_in_dim] if (not self._transposed) else [self.in_dim, self.out_dim]) + self.filter_size)) self.filter.initial = rf.init.Glorot() self.with_bias = with_bias self.bias = None if self.with_bias: self.bias = rf.Parameter([self.out_dim]) self.bias.initial = 0.0 def _call_nd1(self, source: Tensor, *, in_spatial_dim: Dim, out_spatial_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: assert (self.nd == 1) (out, (out_spatial_dim,)) = self.__class__.__base__.__call__(self, source, in_spatial_dims=[in_spatial_dim], out_spatial_dims=([out_spatial_dim] if out_spatial_dim else None)) return (out, out_spatial_dim)
class _Conv(_ConvOrTransposedConv): '\n A generic convolution layer which supports 1D, 2D and 3D convolution.\n Base class for :class:`Conv1d`, :class:`Conv2d`, :class:`Conv3d`.\n ' _transposed = False def __init__(self, in_dim: Dim, out_dim: Dim, filter_size: Union[(Sequence[Union[(int, Dim)]], int, Dim)], *, padding: str, strides: Optional[Union[(int, Sequence[int])]]=None, dilation_rate: Optional[Union[(int, Sequence[int])]]=None, groups: Optional[int]=None, with_bias: bool=True): '\n :param Dim in_dim:\n :param Dim out_dim:\n :param filter_size: (width,), (height,width) or (depth,height,width) for 1D/2D/3D conv.\n the input data ndim must match, or you can add dimensions via input_expand_dims or input_add_feature_dim.\n it will automatically swap the batch-dim to the first axis of the input data.\n :param str padding: "same" or "valid"\n :param int|Sequence[int] strides: strides for the spatial dims,\n i.e. length of this tuple should be the same as filter_size, or a single int.\n :param int|Sequence[int] dilation_rate: dilation for the spatial dims\n :param int groups: grouped convolution\n :param bool with_bias: if True, will add a bias to the output features\n ' self.groups = groups super().__init__(in_dim=in_dim, out_dim=out_dim, filter_size=filter_size, padding=padding, with_bias=with_bias) if isinstance(strides, int): strides = ([strides] * self.nd) self.strides = strides self.dilation_rate = dilation_rate def __call__(self, source: Tensor, *, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]: return conv(source, in_dim=self.in_dim, out_dim=self.out_dim, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims, filter=self.filter, filter_size=self.filter_size, padding=self.padding, strides=self.strides, dilation_rate=self.dilation_rate, groups=self.groups, bias=(self.bias if self.with_bias else None))
def conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, strides: Optional[Union[(int, Sequence[int])]]=None, dilation_rate: Optional[Union[(int, Sequence[int])]]=None, groups: Optional[int]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]: 'convolution' for in_spatial_dim in in_spatial_dims: if (in_spatial_dim not in source.dims): raise ValueError(f'conv: source {source} does not have spatial dim {in_spatial_dim}') (out, out_spatial_dims) = source._raw_backend.conv(source, in_dim=in_dim, out_dim=out_dim, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims, filter=filter, filter_size=filter_size, padding=padding, strides=strides, dilation_rate=dilation_rate, groups=groups, bias=bias) return (out, out_spatial_dims)
class Conv1d(_Conv): '\n 1D convolution\n ' nd = 1 def __init__(self, in_dim: Dim, out_dim: Dim, filter_size: Union[(int, Dim)], *, padding: str, strides: Optional[int]=None, dilation_rate: Optional[int]=None, groups: Optional[int]=None, with_bias: bool=True): '\n :param Dim in_dim:\n :param Dim out_dim:\n :param int|Dim filter_size:\n :param str padding: "same" or "valid"\n :param int|None strides: strides for the spatial dims,\n i.e. length of this tuple should be the same as filter_size, or a single int.\n :param int|None dilation_rate: dilation for the spatial dims\n :param int groups: grouped convolution\n :param bool with_bias: if True, will add a bias to the output features\n ' super().__init__(in_dim=in_dim, out_dim=out_dim, filter_size=[filter_size], padding=padding, strides=strides, dilation_rate=dilation_rate, groups=groups, with_bias=with_bias) __call__ = _ConvOrTransposedConv._call_nd1
class Conv2d(_Conv): '\n 2D convolution\n ' nd = 2
class Conv3d(_Conv): '\n 3D convolution\n ' nd = 3
class _TransposedConv(_ConvOrTransposedConv): '\n Transposed convolution, sometimes also called deconvolution.\n See :func:`tf.nn.conv2d_transpose` (currently we support 1D/2D).\n ' nd: Optional[int] = None _transposed = True def __init__(self, in_dim: Dim, out_dim: Dim, filter_size: Sequence[Union[(int, Dim)]], *, padding: str, remove_padding: Union[(Sequence[int], int)]=0, output_padding: Optional[Union[(Sequence[Optional[int]], int)]]=None, strides: Optional[Sequence[int]]=None, with_bias: bool=True): '\n :param Dim in_dim:\n :param Dim out_dim:\n :param list[int] filter_size:\n :param list[int]|None strides: specifies the upscaling. by default, same as filter_size\n :param str padding: "same" or "valid"\n :param list[int]|int remove_padding:\n :param list[int|None]|int|None output_padding:\n :param bool with_bias: whether to add a bias. enabled by default\n ' super().__init__(in_dim=in_dim, out_dim=out_dim, filter_size=filter_size, padding=padding, with_bias=with_bias) self.strides = strides self.remove_padding = remove_padding self.output_padding = output_padding def __call__(self, source: Tensor, *, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]: return transposed_conv(source, in_dim=self.in_dim, out_dim=self.out_dim, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims, filter=self.filter, filter_size=self.filter_size, padding=self.padding, remove_padding=self.remove_padding, output_padding=self.output_padding, strides=self.strides, bias=(self.bias if self.with_bias else None))
def transposed_conv(source: Tensor, *, in_dim: Dim, out_dim: Dim, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None, filter: Tensor, filter_size: Sequence[Dim], padding: str, remove_padding: Union[(Sequence[int], int)]=0, output_padding: Optional[Union[(Sequence[Optional[int]], int)]]=None, strides: Optional[Sequence[int]]=None, bias: Optional[Tensor]=None) -> Tuple[(Tensor, Sequence[Dim])]: 'transposed conv' (out, out_spatial_dims) = source._raw_backend.transposed_conv(source=source, in_dim=in_dim, out_dim=out_dim, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims, filter=filter, filter_size=filter_size, padding=padding, remove_padding=remove_padding, output_padding=output_padding, strides=strides, bias=bias) return (out, out_spatial_dims)
class TransposedConv1d(_TransposedConv): '\n 1D transposed convolution\n ' nd = 1 __call__ = _ConvOrTransposedConv._call_nd1
class TransposedConv2d(_TransposedConv): '\n 2D transposed convolution\n ' nd = 2
class TransposedConv3d(_TransposedConv): '\n 3D transposed convolution\n ' nd = 3
def pool(source: Tensor, *, mode: str, pool_size: Union[(Sequence[int], int)], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Optional[Union[(Sequence[int], int)]]=None, in_spatial_dims: Union[(Sequence[Dim], Dim)], out_spatial_dims: Optional[Union[(Sequence[Dim], Dim)]]=None, nd: Optional[int]=None) -> Tuple[(Tensor, Sequence[Dim])]: '\n A generic N-D pooling layer.\n This would usually be done after a convolution for down-sampling.\n\n :param Tensor source:\n :param nd:\n :param str mode: "max" or "avg"\n :param tuple[int] pool_size: shape of the window of each reduce\n :param str padding: "valid" or "same"\n :param tuple[int]|int dilation_rate:\n :param tuple[int]|int|None strides: in contrast to tf.nn.pool, the default (if it is None) will be set to pool_size\n :param Sequence[Dim] in_spatial_dims:\n :param Sequence[Dim]|None out_spatial_dims:\n :return: layer, out_spatial_dims\n ' if isinstance(in_spatial_dims, Dim): in_spatial_dims = [in_spatial_dims] assert isinstance(in_spatial_dims, (list, tuple)) assert all((isinstance(d, Dim) for d in in_spatial_dims)) if (nd is None): nd = len(in_spatial_dims) else: assert (nd == len(in_spatial_dims)) if (out_spatial_dims is not None): if isinstance(out_spatial_dims, Dim): out_spatial_dims = [out_spatial_dims] if isinstance(pool_size, int): pool_size = ([pool_size] * nd) assert isinstance(pool_size, (list, tuple)) assert (len(pool_size) == nd) if (not strides): strides = pool_size elif isinstance(strides, int): strides = ([strides] * nd) assert isinstance(strides, (list, tuple)) assert (len(strides) == nd) (out, out_spatial_dims) = source._raw_backend.pool(source=source, mode=mode, pool_size=pool_size, padding=padding, dilation_rate=dilation_rate, strides=strides, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims) return (out, out_spatial_dims)
def max_pool(source: Tensor, *, pool_size: Union[(Sequence[int], int)], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Optional[Union[(Sequence[int], int)]]=None, in_spatial_dims: Union[(Sequence[Dim], Dim)], out_spatial_dims: Optional[Union[(Sequence[Dim], Dim)]]=None) -> Tuple[(Tensor, Sequence[Dim])]: 'max-pool' return pool(source=source, mode='max', pool_size=pool_size, padding=padding, dilation_rate=dilation_rate, strides=strides, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims)
def max_pool1d(source: Tensor, *, pool_size: int, padding: str='valid', dilation_rate: int=1, strides: Optional[int]=None, in_spatial_dim: Dim, out_spatial_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: 'max pool' return pool1d(source=source, mode='max', pool_size=pool_size, padding=padding, dilation_rate=dilation_rate, strides=strides, in_spatial_dim=in_spatial_dim, out_spatial_dim=out_spatial_dim)
def pool1d(source: Tensor, *, mode: str, pool_size: int, padding: str='valid', dilation_rate: int=1, strides: Optional[int]=None, in_spatial_dim: Dim, out_spatial_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: '\n 1D pooling.\n\n :param Tensor source:\n :param str mode: "max" or "avg"\n :param tuple[int] pool_size: shape of the window of each reduce\n :param str padding: "valid" or "same"\n :param tuple[int]|int dilation_rate:\n :param tuple[int]|int|None strides: in contrast to tf.nn.pool, the default (if it is None) will be set to pool_size\n :param Sequence[Dim] in_spatial_dim:\n :param Sequence[Dim]|None out_spatial_dim:\n :return: layer, out_spatial_dim\n ' assert isinstance(in_spatial_dim, Dim) (out, (out_spatial_dim,)) = pool(source=source, mode=mode, pool_size=pool_size, padding=padding, dilation_rate=dilation_rate, strides=strides, in_spatial_dims=[in_spatial_dim], out_spatial_dims=([out_spatial_dim] if (out_spatial_dim is not None) else None)) return (out, out_spatial_dim)
def pool2d(source: Tensor, *, mode: str, pool_size: Union[(Sequence[int], int)], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Optional[Union[(Sequence[int], int)]]=None, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]: '\n 2D pooling.\n\n :param Tensor source:\n :param str mode: "max" or "avg"\n :param tuple[int] pool_size: shape of the window of each reduce\n :param str padding: "valid" or "same"\n :param tuple[int]|int dilation_rate:\n :param tuple[int]|int|None strides: in contrast to tf.nn.pool, the default (if it is None) will be set to pool_size\n :param Sequence[Dim] in_spatial_dims:\n :param Sequence[Dim]|None out_spatial_dims:\n :return: layer, out_spatial_dims\n ' assert (len(in_spatial_dims) == 2) return pool(source=source, mode=mode, pool_size=pool_size, padding=padding, dilation_rate=dilation_rate, strides=strides, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims)
def pool3d(source: Tensor, *, mode: str, pool_size: Union[(Sequence[int], int)], padding: str='valid', dilation_rate: Union[(Sequence[int], int)]=1, strides: Optional[Union[(Sequence[int], int)]]=None, in_spatial_dims: Sequence[Dim], out_spatial_dims: Optional[Sequence[Dim]]=None) -> Tuple[(Tensor, Sequence[Dim])]: '\n 3D pooling.\n\n :param Tensor source:\n :param str mode: "max" or "avg"\n :param tuple[int] pool_size: shape of the window of each reduce\n :param str padding: "valid" or "same"\n :param tuple[int]|int dilation_rate:\n :param tuple[int]|int|None strides: in contrast to tf.nn.pool, the default (if it is None) will be set to pool_size\n :param Sequence[Dim] in_spatial_dims:\n :param Sequence[Dim]|None out_spatial_dims:\n :return: layer, out_spatial_dims\n ' assert (len(in_spatial_dims) == 3) return pool(source=source, mode=mode, pool_size=pool_size, padding=padding, dilation_rate=dilation_rate, strides=strides, in_spatial_dims=in_spatial_dims, out_spatial_dims=out_spatial_dims)
def make_conv_out_spatial_dims(in_spatial_dims: Sequence[Dim], *, filter_size: Union[(Sequence[Union[(int, Dim)]], int, Dim)], padding: str, strides: Union[(Sequence[int], int)]=1, dilation_rate: Union[(Sequence[int], int)]=1, description_prefix: Optional[str]=None) -> Sequence[Dim]: 'create out spatial dims from in spatial dims' nd = len(in_spatial_dims) if isinstance(filter_size, (int, Dim)): filter_size = ([filter_size] * nd) filter_size = [(d.dimension if isinstance(d, Dim) else d) for d in filter_size] assert all((isinstance(s, int) for s in filter_size)) if isinstance(strides, int): strides = ([strides] * nd) if isinstance(dilation_rate, int): dilation_rate = ([dilation_rate] * nd) assert (nd == len(in_spatial_dims) == len(filter_size) == len(strides) == len(dilation_rate)) assert (padding.lower() in ('valid', 'same')) out_spatial_dims = [] for i in range(nd): in_spatial_dim = in_spatial_dims[i] if ((filter_size[i] == strides[i] == 1) or ((strides[i] == 1) and (padding.lower() == 'same'))): out_spatial_dims.append(in_spatial_dim) else: out_spatial_dim = _calc_out_dim(in_dim=in_spatial_dim, filter_size=filter_size[i], stride=strides[i], dilation_rate=dilation_rate[i], padding=padding) assert isinstance(out_spatial_dim, Dim) if (description_prefix and (out_spatial_dim != in_spatial_dim)): out_spatial_dim.name = f'{description_prefix}:spatial{i}' if (in_spatial_dim.dyn_size_ext and (not out_spatial_dim.dyn_size_ext)): out_spatial_dim.dyn_size_ext = _calc_out_dim(in_dim=in_spatial_dim.dyn_size_ext, filter_size=filter_size[i], stride=strides[i], dilation_rate=dilation_rate[i], padding=padding) out_spatial_dims.append(out_spatial_dim) return out_spatial_dims
def _calc_out_dim(in_dim, filter_size, stride, padding, dilation_rate=1): '\n Copied and adapted from TF ConvLayer.calc_out_dim.\n\n :param T|int|Tensor|torch.Tensor|tensorflow.Tensor|Dim in_dim: dimension in some axis\n :param int filter_size: e.g. 2, for the corresponding axis\n :param int stride: e.g. 1, for the corresponding axis\n :param int dilation_rate: e.g. 1\n :param str padding: "valid" or "same"\n :return: the output dimension\n :rtype: T\n ' def ceildiv(a, b): '\n :param T|int|Tensor|torch.Tensor|tensorflow.Tensor a:\n :param T|int|Tensor|torch.Tensor|tensorflow.Tensor b:\n :rtype: T\n ' if (isinstance(b, int) and (b == 1)): return a if isinstance(a, Tensor): return rf.ceil_divide(a, b) return (- ((- a) // b)) padding = padding.upper() if (padding == 'SAME'): if isinstance(in_dim, Dim): return in_dim.ceildiv_right(stride) return ceildiv(in_dim, stride) elif (padding == 'VALID'): if isinstance(in_dim, Dim): filter_left_dilated = (((filter_size - 1) * dilation_rate) // 2) filter_right_dilated = (((filter_size - 1) * dilation_rate) - filter_left_dilated) valid_part = in_dim.sub_left(filter_left_dilated).sub_right(filter_right_dilated) return valid_part.ceildiv_right(stride) return ceildiv((in_dim - ((filter_size - 1) * dilation_rate)), stride) else: raise Exception(('invalid padding %r' % padding))
class TransformerDecoder(rf.Module): '\n Represents Transformer decoder architecture\n ' def __init__(self, encoder_dim: Dim, vocab_dim: Dim, model_dim: Dim=Dim(512, name='transformer-dec-default-model-dim'), *, num_layers: int, ff_dim: Dim=NotSpecified, ff_activation: Callable[([Tensor], Tensor)]=rf.relu, dropout: float=0.1, num_heads: int=8, att_dropout: float=0.1, decoder_layer: Optional[Union[(TransformerDecoderLayer, rf.Module, type, Any)]]=None, decoder_layer_opts: Optional[Dict[(str, Any)]]=None, share_embedding: bool=False, sequential=rf.Sequential): '\n :param encoder_dim:\n :param vocab_dim:\n :param model_dim: the output feature dimension\n :param num_layers: the number of encoder layers\n :param ff_dim: the dimension of feed-forward layers. 2048 originally, or 4 times out_dim\n :param ff_activation: activation function for feed-forward network\n :param dropout: the dropout value for the FF block\n :param num_heads: the number of attention heads\n :param att_dropout: attention dropout value\n :param decoder_layer: an instance of :class:`TransformerDecoderLayer` or similar\n :param decoder_layer_opts: options for the encoder layer\n :param share_embedding:\n :param sequential:\n ' super().__init__() self.encoder_dim = encoder_dim self.vocab_dim = vocab_dim self.model_dim = model_dim self.input_embedding = rf.Embedding(vocab_dim, model_dim) self.pos_enc = functools.partial(rf.sinusoidal_positional_encoding, feat_dim=model_dim, dtype=self.input_embedding.weight.dtype) if ((not decoder_layer) or isinstance(decoder_layer, type)): decoder_layer_opts_ = dict(encoder_dim=encoder_dim, out_dim=model_dim, ff_dim=ff_dim, ff_activation=ff_activation, dropout=dropout, num_heads=num_heads, att_dropout=att_dropout) if decoder_layer_opts: decoder_layer_opts_.update(decoder_layer_opts) if (not decoder_layer): decoder_layer = TransformerDecoderLayer(**decoder_layer_opts_) elif isinstance(decoder_layer, type): decoder_layer = decoder_layer(**decoder_layer_opts_) else: raise TypeError(f'unexpected decoder_layer {decoder_layer!r}') self.layers = sequential((_copy.deepcopy(decoder_layer) for _ in range(num_layers))) self.final_layer_norm = rf.LayerNorm(model_dim) self.logits = rf.Linear(model_dim, vocab_dim, with_bias=False) if share_embedding: self.logits.weight = self.input_embedding.weight def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> rf.State: 'default initial state' state = rf.State({k: v.default_initial_state(batch_dims=batch_dims) for (k, v) in self.layers.items()}) state.pos = rf.zeros((), dtype='int32', device='cpu') return state def transform_encoder(self, encoder: Tensor, *, axis: Dim) -> rf.State: '\n Transform encoder output.\n Note that the Transformer decoder usually expects that layer-norm was applied already on the encoder output.\n ' return rf.State({k: v.transform_encoder(encoder, axis=axis) for (k, v) in self.layers.items()}) def __call__(self, source: Tensor, *, spatial_dim: Dim, state: rf.State, encoder: rf.State, collected_outputs: Optional[Dict[(str, Tensor)]]=None) -> Tuple[(Tensor, rf.State)]: '\n forward, single step or whole sequence.\n\n :param source: labels\n :param spatial_dim: single_step_dim or spatial dim of source\n :param state: e.g. via :func:`default_initial_state`\n :param encoder: via :func:`transform_encoder`\n :param collected_outputs:\n :return: logits, new state\n ' new_state = rf.State() decoded = self.input_embedding(source) decoded = (decoded + self.pos_enc(spatial_dim=spatial_dim, offset=state.pos)) new_state.pos = (state.pos + (1 if (spatial_dim == single_step_dim) else spatial_dim.get_size_tensor())) for (layer_name, layer) in self.layers.items(): layer: TransformerDecoderLayer (decoded, new_state[layer_name]) = layer(decoded, spatial_dim=spatial_dim, state=state[layer_name], encoder=encoder[layer_name]) if (collected_outputs is not None): collected_outputs[layer_name] = decoded decoded = self.final_layer_norm(decoded) logits = self.logits(decoded) return (logits, new_state)
class TransformerDecoderLayer(rf.Module): '\n Represents a conformer block\n ' def __init__(self, encoder_dim: Dim, out_dim: Dim=Dim(512, name='transformer-dec-default-out-dim'), *, ff_dim: Dim=NotSpecified, ff_activation: Callable[([Tensor], Tensor)]=rf.relu, dropout: float=0.1, num_heads: int=8, self_att: Optional[Union[(rf.CausalSelfAttention, rf.RelPosCausalSelfAttention, rf.Module, type, Any)]]=None, self_att_opts: Optional[Dict[(str, Any)]]=None, att_dropout: float=0.1): '\n :param encoder_dim:\n :param out_dim: the output feature dimension\n :param ff_dim: the dimension of feed-forward layers. 2048 originally, or 4 times out_dim\n :param ff_activation: activation function for feed-forward network\n :param dropout: the dropout value for the FF block\n :param num_heads: the number of attention heads\n :param self_att: the self-attention layer. RelPosSelfAttention originally and default\n :param self_att_opts: options for the self-attention layer, for :class:`nn.RelPosSelfAttention`\n :param att_dropout: attention dropout value\n ' super().__init__() self.encoder_dim = encoder_dim self.dropout = dropout self.dropout_broadcast = rf.dropout_broadcast_default() self.out_dim = out_dim if (ff_dim is None): ff_dim = (4 * out_dim) self.ff = FeedForward(out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation) self.ff_layer_norm = rf.LayerNorm(out_dim) if ((self_att is None) or isinstance(self_att, type)): self_att_opts_ = dict(in_dim=out_dim, proj_dim=out_dim, key_dim_total=out_dim, value_dim_total=out_dim, num_heads=num_heads, att_dropout=att_dropout) if self_att_opts: self_att_opts_.update(self_att_opts) if (self_att is None): self.self_att = rf.CausalSelfAttention(**self_att_opts_) else: self.self_att = self_att(**self_att_opts_) else: self.self_att = self_att self.self_att_layer_norm = rf.LayerNorm(out_dim) self.cross_att = rf.CrossAttention(encoder_dim=self.encoder_dim, query_in_dim=out_dim, proj_dim=out_dim, key_dim_total=out_dim, value_dim_total=out_dim, num_heads=num_heads, att_dropout=att_dropout) self.cross_att_layer_norm = rf.LayerNorm(out_dim) def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> rf.State: 'default initial state' return rf.State(self_att=self.self_att.default_initial_state(batch_dims=batch_dims)) def transform_encoder(self, encoder: Tensor, *, axis: Dim) -> rf.State: 'Transform the encoder output.' return rf.State(cross_att=self.cross_att.transform_encoder(encoder, axis=axis)) def __call__(self, inp: Tensor, *, spatial_dim: Dim, state: rf.State, encoder: rf.State) -> Tuple[(Tensor, rf.State)]: 'forward' new_state = rf.State() x_sa_ln = self.self_att_layer_norm(inp) (x_sa, new_state.self_att) = self.self_att(x_sa_ln, axis=spatial_dim, state=state.self_att) x_sa = rf.dropout(x_sa, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) x_sa_out = (x_sa + inp) x_ca_ln = self.cross_att_layer_norm(x_sa_out) x_ca = self.cross_att(x_ca_ln, encoder.cross_att) x_ca = rf.dropout(x_ca, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) x_ca_out = (x_ca + x_sa_out) x_ff_ln = self.ff_layer_norm(x_ca_out) x_ff = self.ff(x_ff_ln) x_ff = rf.dropout(x_ff, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) x_ff_out = (x_ff + x_ca_out) return (x_ff_out, new_state)
class FeedForward(rf.Module): '\n Conformer position-wise feedforward neural network layer\n FF -> Activation -> Dropout -> FF\n ' def __init__(self, out_dim: Dim, *, ff_dim: Optional[Dim]=NotSpecified, dropout: float, activation: Callable[([Tensor], Tensor)]): '\n :param out_dim: output feature dimension\n :param ff_dim: dimension of the feed-forward layers\n :param dropout: dropout value\n :param activation: activation function\n ' super().__init__() if (ff_dim is NotSpecified): ff_dim = (out_dim * 4) self.out_dim = out_dim self.dropout = dropout self.dropout_broadcast = rf.dropout_broadcast_default() self.activation = activation self.linear_ff = rf.Linear(out_dim, ff_dim) self.linear_out = rf.Linear(ff_dim, out_dim) def __call__(self, inp: Tensor) -> Tensor: 'forward' x_ff1 = self.linear_ff(inp) x_act = self.activation(x_ff1) x_drop = rf.dropout(x_act, self.dropout, axis=(self.dropout_broadcast and self.linear_ff.out_dim)) x_ff2 = self.linear_out(x_drop) return x_ff2
def copy_to_device(x: Tensor, device: Optional[str]=None) -> Tensor: '\n Copy tensor to device.\n\n :param x: tensor\n :param device:\n :return: tensor on device\n ' if (not device): device = get_default_device() if (not device): return x if (x.raw_tensor is None): return x if (x.device == device): return x return x._raw_backend.copy_to_device(x, device)
def get_default_device() -> Optional[str]: '\n :return: default device, where to put new tensors (via random number generators, constant, range_over_dim, etc)\n ' return _default_device
@contextmanager def set_default_device_ctx(device: Optional[str]): '\n :param device: see :func:`get_default_device`\n ' global _default_device old_device = _default_device try: _default_device = device (yield) finally: _default_device = old_device
def range_over_dim(dim: Dim, *, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor[T]: '\n :param dim:\n :param dtype:\n :param device,\n :return: tensor with shape [dim]\n ' if dim.dyn_size_ext: backend = get_backend_by_tensor(dim.dyn_size_ext, fallback=global_backend) else: backend = global_backend return backend.range_over_dim(dim, dtype=dtype, device=device)
def range_over_dim_strided(dim: Dim, *, stride: Union[(int, Tensor)], out_dim: Optional[Dim]=None, dtype: Optional[str]=None, device: Optional[str]=None) -> Tuple[(Tensor[T], Dim)]: '\n :param dim:\n :param stride:\n :param out_dim:\n :param dtype:\n :param device,\n :return: tensor with shape [dim], out_dim\n ' if (out_dim is None): out_dim = dim.ceildiv_right(stride) return ((rf.range_over_dim(out_dim, dtype=dtype, device=device) * stride), out_dim)
def range_over_merged_dims(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None) -> Tensor[T]: '\n This is if you want to index into a merged dim.\n Related: :func:`rf.merge_dims`.\n\n :param dims:\n :param dtype:\n :param device:\n :return: tensor with shape [dim_0, ..., dim_n] -> sparse_dim = merged_dim, where merged_dim = dim_0 * ... * dim_n\n ' assert (len(dims) >= 1) merged_dim = dims[0] for dim in dims[1:]: merged_dim *= dim indices = rf.range_over_dim(merged_dim, dtype=dtype, device=device) if (len(dims) > 1): indices = rf.split_dims(indices, axis=merged_dim, dims=dims) return indices
def replace_dim(source: Tensor, *, in_dim: Dim, out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: '\n Also see: :func:`rf.merge_dims`, :func:`rf.split_dims`.\n\n :param source:\n :param in_dim:\n :param out_dim:\n :return: source with in_dim replaced by out_dim, and new out_dim.\n this does not work for the sparse_dim. see :func:`set_sparse_dim` for that case.\n ' if (not out_dim): out_dim = in_dim.copy(same_as_self=False, description='new-dim') return (source._raw_backend.replace_dim(source, in_dim=in_dim, out_dim=out_dim), out_dim)
def dim_match_priority_when_needed(dim: Dim, *other_dims: Dim) -> Dim: '\n :return: maybe copy of dim with higher match_priority if needed to distinguish from other_dims\n\n Why or when is this needed?\n\n For activation values, this should never be needed,\n and all dims should be unique.\n\n In case of self-attention, the standard way is to create a separate distinct dim\n to perform the attention reduction over.\n See :class:`SelfAttention`.\n\n However, in case of weight matrices, it is not unusual to have the same dim for both the input and output,\n so a square weight matrix.\n When reduction is performed in :func:`matmul`, we want to match the input feature dim\n to the dim in the weight matrix with higher match priority.\n\n So :func:`dim_match_priority_when_needed` would be applied on the input feature dim.\n\n https://github.com/rwth-i6/returnn/pull/871\n https://github.com/rwth-i6/returnn_common/issues/17#issuecomment-997463222\n ' if (dim in other_dims): return dim.copy(match_priority=1) return dim
def num_elements_of_shape(dims: Sequence[Dim]) -> Union[(int, Tensor)]: '\n :param dims:\n :return: num elements of a tensor of shape dims, properly considering masking\n ' if all((dim.is_static() for dim in dims)): n = 1 for dim in dims: n *= dim.dimension return n n = 1 dims = list(dims) dims.sort(key=(lambda dim: ((- dim.dyn_size_ext.batch_ndim) if dim.dyn_size_ext else 0))) while dims: dim = dims.pop(0) if dim.is_static(): n *= dim.dimension continue for dim_ in dim.dyn_size_ext.dims: assert (dim_ in dims) assert (not dim_.need_masking()) dims.remove(dim_) n_ = rf.reduce_sum(dim.dyn_size_ext, axis=dim.dyn_size_ext.dims) n *= n_ return n
def dropout(source: Tensor, drop_prob: Union[(float, Tensor)], *, axis: Optional[Union[(Dim, Sequence[Dim], bool)]]=None, on_forward: bool=False) -> Tensor: '\n Applies dropout.\n\n Dropout will only be applied during training (unless you set on_forward=True).\n\n When dropout is applied, the output will be scaled by 1/dropout.\n\n :param source:\n :param drop_prob: 0.0 means to apply no dropout. 100% would mask everything.\n For every value in the tensor, the probability of it being dropped\n is drawn independently given this probability.\n The broadcasted axes are those not specified in ``axis``.\n :param axis: axis to apply dropout on. multiple axes can be specified.\n This defines the set of axes where the dropout mask is not broadcasted to.\n If None (default), it will not broadcast on any axis.\n False is the same as None, and allows to write ``axis=use_dropout_broadcast and ...feature_dim``.\n (RETURNN also has the ``noise_shape`` option but the ``axis`` option provides the same functionality.)\n :param on_forward: apply dropout during inference and training (so just always). otherwise only during training.\n ' keep_prob = (1.0 - drop_prob) if ((axis is None) or (axis is False)): noise_dims = source.dims elif (axis is True): raise ValueError('dropout axis=True is not valid') elif isinstance(axis, Dim): noise_dims = (axis,) else: noise_dims = axis if (not set(noise_dims).issubset(source.dims)): raise ValueError(f'dropout axis {axis} not in source {source}') if (isinstance(keep_prob, (float, int)) and (not (0 < keep_prob <= 1))): raise ValueError(('keep_prob must be a scalar tensor or a float in the range (0, 1], got %g' % keep_prob)) if (isinstance(keep_prob, (float, int)) and (keep_prob == 1)): return source if on_forward: return _dropout(source, keep_prob, noise_dims=noise_dims) return rf.cond(pred=rf.get_run_ctx().train_flag, true_fn=(lambda : _dropout(source, keep_prob, noise_dims=noise_dims)), false_fn=(lambda : source))
def _dropout(x: Tensor, keep_prob: Union[(float, Tensor)], noise_dims: Sequence[Dim], seed=None, apply_correction_factor=True) -> Tensor: '\n Computes dropout.\n\n Adopted from tf_util.dropout.\n Like :func:`tf.nn.dropout` but avoid :func:`tf.div` if possible.\n\n Note that in tf_util.dropout, we had special logic for recurrent loops:\n The mask would be created outside the loop\n and then the same mask would be used for every frame of the loop.\n We cannot really support such a logic for eager-based frameworks.\n\n :param x:\n :param keep_prob:\n :param noise_dims: other dims would broadcast\n :param seed: passed on to :func:`random` for the mask\n :param bool apply_correction_factor:\n ' random_tensor = (keep_prob + rf.random_uniform(dims=noise_dims, seed=seed, dtype=x.dtype, minval=0.0, maxval=1.0)) binary_tensor = rf.floor(random_tensor) if apply_correction_factor: binary_tensor *= (1.0 / keep_prob) ret = (x * binary_tensor) return ret
def dropout_broadcast_default() -> bool: '\n Check the global RETURNN config\n whether we should broadcast on non-related dropout dimensions.\n\n Historically in RETURNN, when we did dropout in the feature dimension,\n we broadcasted the dropout mask over the other dimensions (e.g. time and batch).\n\n This function provides an easy global config controllable way to control this,\n via the option ``rf_dropout_broadcast``.\n\n The default for now:\n keep same as historical RETURNN, unless we find that this is really not a good idea.\n Then we might change the default via a new behavior version.\n\n Also see the option ``rf_att_dropout_broadcast``,\n which does the same for attention dropout.\n Although the default for attention dropout broadcasting was already changed with behavior version 19.\n\n :return: whether broadcasting should be used for dropout.\n Note that this does not actually effect :func:`dropout`.\n Any user of :func:`dropout` should check this explicitly.\n ' from returnn.config import get_global_config default = True config = get_global_config(raise_exception=False) if (not config): return default return config.bool('rf_dropout_broadcast', default)
def get_default_float_dtype() -> str: '\n https://data-apis.org/array-api/latest/API_specification/data_types.html#default-data-types\n\n :return: default dtype for float\n ' return _default_float_dtype
def get_default_int_dtype() -> str: '\n https://data-apis.org/array-api/latest/API_specification/data_types.html#default-data-types\n\n :return: default dtype for int\n ' return _default_int_dtype
def get_default_array_index_dtype() -> str: '\n https://data-apis.org/array-api/latest/API_specification/data_types.html#default-data-types\n\n :return: default dtype for array index - currently just the same as :func:`get_default_int_dtype`\n ' return get_default_int_dtype()
def is_float_dtype(dtype: str) -> bool: '\n :return: whether the dtype is float, e.g. it supports backprop etc\n ' return dtype.startswith('float')
class IEncoder(rf.Module, ABC): '\n Generic encoder interface\n\n The encoder is a function x -> y.\n The input can potentially be sparse or dense.\n The output is dense with feature dim `out_dim`.\n ' out_dim: Dim def __call__(self, source: Tensor) -> Tensor: '\n Encode the input\n ' raise NotImplementedError
class ISeqFramewiseEncoder(rf.Module, ABC): '\n This specializes IEncoder that it operates on a sequence.\n The output sequence length here is the same as the input.\n ' out_dim: Dim def __call__(self, source: Tensor, *, spatial_dim: Dim) -> Tensor: raise NotImplementedError
class ISeqDownsamplingEncoder(rf.Module, ABC): '\n This is more specific than IEncoder in that it operates on a sequence.\n The output sequence length here is shorter than the input.\n\n This is a common scenario for speech recognition\n where the input might be on 10ms/frame\n and the output might cover 30ms/frame or 60ms/frame or so.\n ' out_dim: Dim downsample_factor: Union[(int, float)] def __call__(self, source: Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]: raise NotImplementedError
class ConformerPositionwiseFeedForward(rf.Module): '\n Conformer position-wise feedforward neural network layer\n FF -> Activation -> Dropout -> FF\n ' def __init__(self, out_dim: Dim, *, ff_dim: Dim, dropout: float, activation: Callable[([Tensor], Tensor)]): '\n :param out_dim: output feature dimension\n :param ff_dim: dimension of the feed-forward layers\n :param dropout: dropout value\n :param activation: activation function\n ' super().__init__() self.out_dim = out_dim self.dropout = dropout self.dropout_broadcast = rf.dropout_broadcast_default() self.activation = activation self.linear_ff = rf.Linear(out_dim, ff_dim) self.linear_out = rf.Linear(ff_dim, out_dim) def __call__(self, inp: Tensor) -> Tensor: 'forward' x_ff1 = self.linear_ff(inp) x_act = self.activation(x_ff1) x_drop = rf.dropout(x_act, self.dropout, axis=(self.dropout_broadcast and self.linear_ff.out_dim)) x_ff2 = self.linear_out(x_drop) return x_ff2
class ConformerConvBlock(rf.Module): '\n Conformer convolution block\n FF -> GLU -> depthwise conv -> BN -> Swish -> FF\n ' def __init__(self, out_dim: Dim, *, kernel_size: int, norm: Union[(rf.BatchNorm, Any)]): '\n :param out_dim: output feature dimension\n :param kernel_size: kernel size of depthwise convolution\n :param norm: Batch norm originally\n ' super().__init__() self.out_dim = out_dim self.positionwise_conv1 = rf.Linear(out_dim, (2 * out_dim)) self.depthwise_conv = rf.Conv1d(out_dim, out_dim, filter_size=kernel_size, groups=out_dim.dimension, padding='same') self.positionwise_conv2 = rf.Linear(out_dim, out_dim) self.norm = norm def __call__(self, inp: Tensor, *, spatial_dim: Dim) -> Tensor: 'forward' x_conv1 = self.positionwise_conv1(inp) (x_act, _) = rf.gating(x_conv1) (x_depthwise_conv, _) = self.depthwise_conv(x_act, in_spatial_dim=spatial_dim) x_normed = self.norm(x_depthwise_conv) x_swish = rf.swish(x_normed) x_conv2 = self.positionwise_conv2(x_swish) return x_conv2
class ConformerConvSubsample(ISeqDownsamplingEncoder): '\n Conv 2D block with optional max-pooling or striding.\n\n References:\n\n https://github.com/espnet/espnet/blob/4138010fb66ad27a43e8bee48a4932829a0847ae/espnet/nets/pytorch_backend/transformer/subsampling.py#L162\n https://github.com/rwth-i6/returnn-experiments/blob/5852e21f44d5450909dee29d89020f1b8d36aa68/2022-swb-conformer-hybrid-sat/table_1_and_2/reduced_dim.config#L226\n (actually the latter is different...)\n\n To get the ESPnet case, for example Conv2dSubsampling6, use these options\n (out_dim is the model dim of the encoder)\n\n out_dims=[out_dim, out_dim], # ESPnet standard, but this might be too large\n filter_sizes=[3, 5],\n strides=[2, 3],\n padding="valid",\n ' def __init__(self, in_dim: Dim, *, out_dims: List[Dim], filter_sizes: List[Union[(int, Tuple[(int, int)])]], strides: Optional[List[Union[(int, Tuple[(int, int)])]]]=None, pool_sizes: Optional[List[Tuple[(int, int)]]]=None, activation: Callable[([Tensor], Tensor)]=rf.relu, padding: str='same'): "\n :param out_dims: the number of output channels. last element is the output feature dimension\n :param filter_sizes: a list of filter sizes for the conv layer\n :param pool_sizes: a list of pooling factors applied after conv layer\n :param activation: the activation function\n :param padding: 'same' or 'valid'\n " super().__init__() self.pool_sizes = pool_sizes self.activation = activation self.conv_layers: rf.ModuleList[rf.Conv2d] = rf.ModuleList() if (strides is None): strides = ([1] * len(out_dims)) assert (len(out_dims) == len(filter_sizes) == len(strides) > 0) self._dummy_in_dim = Dim(1, name='dummy-input-feature-dim') self.in_dim = in_dim prev_out_dim = self._dummy_in_dim second_spatial_dim = in_dim for (i, (filter_size, stride, out_dim)) in enumerate(zip(filter_sizes, strides, out_dims)): conv = rf.Conv2d(prev_out_dim, out_dim, filter_size=filter_size, strides=stride, padding=padding) self.conv_layers.append(conv) (second_spatial_dim,) = rf.make_conv_out_spatial_dims([second_spatial_dim], filter_size=conv.filter_size[1], strides=conv.strides[1], padding=padding) if (self.pool_sizes and (i < len(self.pool_sizes))): (second_spatial_dim,) = rf.make_conv_out_spatial_dims([second_spatial_dim], filter_size=self.pool_sizes[i][1], strides=self.pool_sizes[i][1], padding='same') prev_out_dim = out_dim self._final_second_spatial_dim = second_spatial_dim self.out_dim = (second_spatial_dim * prev_out_dim) def __call__(self, source: Tensor, *, in_spatial_dim: Dim) -> Tuple[(Tensor, Dim)]: 'forward' assert (self.in_dim in source.dims) in_spatial_dims = [in_spatial_dim, self.in_dim] in_dim = self._dummy_in_dim x = rf.expand_dim(source, dim=in_dim) for (i, conv_layer) in enumerate(self.conv_layers): (x, in_spatial_dims) = conv_layer(x, in_spatial_dims=in_spatial_dims) in_dim = conv_layer.out_dim x = self.activation(x) if (self.pool_sizes and (i < len(self.pool_sizes))): (x, in_spatial_dims) = rf.pool2d(x, in_spatial_dims=in_spatial_dims, pool_size=self.pool_sizes[i], padding='same', mode='max') (x, in_spatial_dims[(- 1)]) = rf.replace_dim(x, out_dim=self._final_second_spatial_dim, in_dim=in_spatial_dims[(- 1)]) (out, _) = rf.merge_dims(x, dims=[self._final_second_spatial_dim, in_dim]) return (out, in_spatial_dims[0])
class ConformerEncoderLayer(rf.Module): '\n Represents a conformer block\n ' def __init__(self, out_dim: Dim=Dim(512, name='conformer-enc-default-out-dim'), *, ff_dim: Dim=NotSpecified, ff_activation: Callable[([Tensor], Tensor)]=rf.swish, dropout: float=0.1, conv_kernel_size: int=32, conv_norm: Union[(rf.BatchNorm, type, Any)]=NotSpecified, conv_norm_opts: Optional[Dict[(str, Any)]]=None, num_heads: int=4, self_att: Optional[Union[(rf.RelPosSelfAttention, rf.Module, type, Any)]]=None, self_att_opts: Optional[Dict[(str, Any)]]=None, att_dropout: float=0.1): "\n :param out_dim: the output feature dimension\n :param ff_dim: the dimension of feed-forward layers. 2048 originally, or 4 times out_dim\n :param ff_activation: activation function for feed-forward network\n :param dropout: the dropout value for the FF block\n :param conv_kernel_size: the kernel size of depthwise convolution in the conv block\n :param conv_norm: used for the conv block. Batch norm originally\n :param conv_norm_opts: for nn.BatchNorm or other conv_norm type.\n In case of nn.BatchNorm, uses use_mask=False by default.\n use_mask means whether to properly mask the spatial dim in batch norm.\n Most existing implementations don't do this. Except of RETURNN.\n It's faster when you don't do this.\n :param num_heads: the number of attention heads\n :param self_att: the self-attention layer. RelPosSelfAttention originally and default\n :param self_att_opts: options for the self-attention layer, for :class:`nn.RelPosSelfAttention`\n :param att_dropout: attention dropout value\n " super().__init__() self.dropout = dropout self.dropout_broadcast = rf.dropout_broadcast_default() self.out_dim = out_dim if (ff_dim is None): ff_dim = (4 * out_dim) self.ffn1 = ConformerPositionwiseFeedForward(out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation) self.ffn1_layer_norm = rf.LayerNorm(out_dim) self.ffn2 = ConformerPositionwiseFeedForward(out_dim=out_dim, ff_dim=ff_dim, dropout=dropout, activation=ff_activation) self.ffn2_layer_norm = rf.LayerNorm(out_dim) if ((conv_norm is NotSpecified) or (conv_norm is rf.BatchNorm)): conv_norm_opts = (conv_norm_opts.copy() if conv_norm_opts else {}) conv_norm_opts.setdefault('use_mask', False) conv_norm = rf.BatchNorm(out_dim, **conv_norm_opts) elif isinstance(conv_norm, type): conv_norm = conv_norm(out_dim, **(conv_norm_opts or {})) self.conv_block = ConformerConvBlock(out_dim=out_dim, kernel_size=conv_kernel_size, norm=conv_norm) self.conv_layer_norm = rf.LayerNorm(out_dim) if ((self_att is None) or isinstance(self_att, type)): self_att_opts_ = dict(in_dim=out_dim, proj_dim=out_dim, key_dim_total=out_dim, value_dim_total=out_dim, num_heads=num_heads, att_dropout=att_dropout) if self_att_opts: self_att_opts_.update(self_att_opts) if (self_att is None): self.self_att = rf.RelPosSelfAttention(**self_att_opts_) else: self.self_att = self_att(**self_att_opts_) else: self.self_att = self_att self.self_att_layer_norm = rf.LayerNorm(out_dim) self.final_layer_norm = rf.LayerNorm(out_dim) def __call__(self, inp: Tensor, *, spatial_dim: Dim) -> Tensor: 'forward' x_ffn1_ln = self.ffn1_layer_norm(inp) x_ffn1 = self.ffn1(x_ffn1_ln) x_ffn1_out = ((0.5 * rf.dropout(x_ffn1, self.dropout, axis=(self.dropout_broadcast and self.out_dim))) + inp) x_mhsa_ln = self.self_att_layer_norm(x_ffn1_out) x_mhsa = self.self_att(x_mhsa_ln, axis=spatial_dim) x_mhsa = rf.dropout(x_mhsa, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) x_mhsa_out = (x_mhsa + x_ffn1_out) x_conv_ln = self.conv_layer_norm(x_mhsa_out) x_conv = self.conv_block(x_conv_ln, spatial_dim=spatial_dim) x_conv_out = (rf.dropout(x_conv, self.dropout, axis=(self.dropout_broadcast and self.out_dim)) + x_mhsa_out) x_ffn2_ln = self.ffn2_layer_norm(x_conv_out) x_ffn2 = self.ffn2(x_ffn2_ln) x_ffn2_out = ((0.5 * rf.dropout(x_ffn2, self.dropout, axis=(self.dropout_broadcast and self.out_dim))) + x_conv_out) return self.final_layer_norm(x_ffn2_out)
class ConformerEncoder(ISeqDownsamplingEncoder): '\n Represents Conformer encoder architecture\n ' def __init__(self, in_dim: Dim, out_dim: Dim=Dim(512, name='conformer-enc-default-out-dim'), *, num_layers: int, input_layer: Union[(ConformerConvSubsample, ISeqDownsamplingEncoder, rf.Module, Any)], input_dropout: float=0.1, ff_dim: Dim=NotSpecified, ff_activation: Callable[([Tensor], Tensor)]=rf.swish, dropout: float=0.1, conv_kernel_size: int=32, conv_norm: Union[(rf.BatchNorm, type, Any)]=NotSpecified, num_heads: int=4, att_dropout: float=0.1, encoder_layer: Optional[Union[(ConformerEncoderLayer, rf.Module, type, Any)]]=None, encoder_layer_opts: Optional[Dict[(str, Any)]]=None, sequential=rf.Sequential): '\n :param out_dim: the output feature dimension\n :param num_layers: the number of encoder layers\n :param input_layer: input/frontend/prenet with potential subsampling.\n (x, in_spatial_dim) -> (y, out_spatial_dim)\n :param input_dropout: applied after input_projection(input_layer(x))\n :param ff_dim: the dimension of feed-forward layers. 2048 originally, or 4 times out_dim\n :param ff_activation: activation function for feed-forward network\n :param dropout: the dropout value for the FF block\n :param conv_kernel_size: the kernel size of depthwise convolution in the conv block\n :param conv_norm: used for the conv block. Batch norm originally\n :param num_heads: the number of attention heads\n :param att_dropout: attention dropout value\n :param encoder_layer: an instance of :class:`ConformerEncoderLayer` or similar\n :param encoder_layer_opts: options for the encoder layer\n :param sequential:\n ' super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.dropout = dropout self.dropout_broadcast = rf.dropout_broadcast_default() self.input_layer = input_layer self.input_projection = rf.Linear((self.input_layer.out_dim if self.input_layer else self.in_dim), self.out_dim, with_bias=False) self.input_dropout = input_dropout if ((not encoder_layer) or isinstance(encoder_layer, type)): encoder_layer_opts_ = dict(out_dim=out_dim, ff_dim=ff_dim, ff_activation=ff_activation, dropout=dropout, conv_kernel_size=conv_kernel_size, conv_norm=conv_norm, num_heads=num_heads, att_dropout=att_dropout) if encoder_layer_opts: encoder_layer_opts_.update(encoder_layer_opts) if (not encoder_layer): encoder_layer = ConformerEncoderLayer(**encoder_layer_opts_) elif isinstance(encoder_layer, type): encoder_layer = encoder_layer(**encoder_layer_opts_) else: raise TypeError(f'unexpected encoder_layer {encoder_layer!r}') self.layers = sequential((_copy.deepcopy(encoder_layer) for _ in range(num_layers))) def __call__(self, source: Tensor, *, in_spatial_dim: Dim, collected_outputs: Optional[Dict[(str, Tensor)]]=None) -> Tuple[(Tensor, Dim)]: 'forward' if self.input_layer: (x_subsample, out_spatial_dim) = self.input_layer(source, in_spatial_dim=in_spatial_dim) else: (x_subsample, out_spatial_dim) = (source, in_spatial_dim) x_linear = self.input_projection(x_subsample) x = rf.dropout(x_linear, self.input_dropout, axis=(self.dropout_broadcast and self.input_projection.out_dim)) x = self.layers(x, spatial_dim=out_spatial_dim, collected_outputs=collected_outputs) return (x, out_spatial_dim)
def set_requires_gradient(source: Tensor): '\n :param source:\n :return: nothing, modifies source in-place\n ' return source._raw_backend.set_requires_gradient(source)
def gradient(y: Tensor, x: Tensor) -> Tensor: '\n :param y: some scalar\n :param x: some tensor\n :return: gradient of y w.r.t. x\n ' return y._raw_backend.gradient(y, x)
def stop_gradient(source: Tensor) -> Tensor: 'wraps tf.stop_gradient or torch detach' return source._raw_backend.stop_gradient(source)
def scaled_gradient(source: Tensor, scale: Union[(float, Tensor)]) -> Tensor: '\n :param source:\n :param scale: if constant 0., will use :func:`stop_gradient`.\n Can be used as gradient reversal layer (with negative factor).\n :return: source with scaled gradient\n ' if ((not isinstance(scale, Tensor)) and (scale == 0.0)): return stop_gradient(source) return source._raw_backend.scaled_gradient(source, scale)
def scaled_gradient_ext(source: Tensor, *, scale: Union[(float, Tensor)], shift: Optional[Union[(float, Tensor)]]=None, scale_shift_by_sum_over_axis: Optional[Dim]=None) -> Tensor: '\n Just `identity` in the forward pass.\n Scales the gradient by some factor in backprop.\n Can be used as gradient reversal layer (with negative factor).\n For TF, uses :func:`returnn.tf.util.basic.scaled_gradient`, or :func:`tf.stop_gradient`\n\n :param source:\n :param scale: if constant 0. and no shift, will use :func:`stop_gradient`\n :param shift:\n :param scale_shift_by_sum_over_axis: if given, calculates the sum over this axis (absolute values)\n and multiplies the shift value by this sum.\n :return: source with transformed gradient\n ' return source._raw_backend.scaled_gradient_ext(source, scale=scale, shift=shift, scale_shift_by_sum_over_axis=scale_shift_by_sum_over_axis)
def get_tensor_dependencies(x: Tensor) -> Sequence[Tensor]: '\n :param x: tensor\n :return: list of tensors which are inputs to x\n ' return x._raw_backend.get_tensor_dependencies(x)
def get_tensor_consumers(x: Tensor) -> Sequence[Tensor]: '\n :param x: tensor\n :return: list of tensors which consume x\n ' return x._raw_backend.get_tensor_consumers(x)
def walk_tensor_consumers(seed: Union[(Tensor, Sequence[Tensor])], *, filter_outputs: Callable[([Tensor], bool)]=None, ending_condition: Callable[([Tensor], bool)]=None) -> List[Tensor]: '\n :param seed: tensor\n :param filter_outputs: if given, this function will be called with each tensor,\n and if it returns False, the tensor will be skipped.\n :param ending_condition: if given, this function will be called with each tensor,\n and if it returns True, we return.\n :return: yields all tensors which consume seed, and their consumers, recursively\n ' seen = set() queue = ([seed] if isinstance(seed, Tensor) else list(seed)) outputs = [] while queue: x = queue.pop(0) if (x in seen): continue seen.add(x) if ((not filter_outputs) or filter_outputs(x)): outputs.append(x) if (ending_condition and ending_condition(x)): break queue.extend(get_tensor_consumers(x)) return outputs
class ParamInit(): 'API for param init' def __call__(self, dims: Sequence[Dim], dtype: str, *, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, out: Optional[Tensor]=None) -> Union[(Tensor, rf.RawTensorTypes)]: raise NotImplementedError
class Normal(ParamInit): '\n Initialization by normal distribution (truncated by default),\n independent of the dimensions (fan in/out).\n\n See :class:`VarianceScaling` and derivatives for variants which depend on fan in/out.\n ' def __init__(self, stddev: float, *, truncated: bool=True, dtype: str=None): self.stddev = stddev self.truncated = truncated if (dtype is None): dtype = rf.get_default_float_dtype() self.dtype = dtype if (self.stddev <= 0.0): raise ValueError(f'Argument `stddev` must be a positive float. Received: {self.stddev}') def __call__(self, dims: Sequence[Dim], dtype: str, *, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, out: Optional[Tensor]=None) -> Tensor: if (dtype is None): dtype = self.dtype return rf.random(distribution=('truncated_normal' if self.truncated else 'normal'), static=True, dims=dims, mean=0.0, stddev=self.stddev, dtype=dtype, sparse_dim=sparse_dim, device=device, out=out)
class VarianceScaling(ParamInit): '\n Provides a generalized way for initializing weights.\n All the common initialization methods are special cases\n such as Xavier Glorot and Kaiming He.\n\n Code adopted from TensorFlow VarianceScaling.\n ' scale = 1.0 mode = 'fan_in' distribution = 'truncated_normal' dtype: str def __init__(self, scale: float=None, mode: str=None, distribution: str=None, dtype: str=None): if (scale is not None): self.scale = scale if (mode is not None): self.mode = mode if (distribution is not None): self.distribution = distribution if (dtype is None): dtype = rf.get_default_float_dtype() self.dtype = dtype if (self.scale <= 0.0): raise ValueError(f'Argument `scale` must be a positive float. Received: {self.scale}') if (self.mode not in {'fan_in', 'fan_out', 'fan_avg'}): raise ValueError(f"Argument `mode` should be one of ('fan_in', 'fan_out', 'fan_avg'). Received: {self.mode}") if (self.distribution not in {'normal', 'uniform', 'truncated_normal', 'untruncated_normal'}): raise ValueError(f"Argument `distribution` should be one of ('normal', 'uniform', 'truncated_normal', 'untruncated_normal'). Received: {self.distribution}") def __call__(self, dims: Sequence[Dim], dtype: str, *, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, out: Optional[Tensor]=None) -> Tensor: if (dtype is None): dtype = self.dtype scale = self.scale (fan_in, fan_out) = _compute_fans(dims) if (self.mode == 'fan_in'): scale /= max(1.0, fan_in) elif (self.mode == 'fan_out'): scale /= max(1.0, fan_out) else: scale /= max(1.0, ((fan_in + fan_out) / 2.0)) return self._random(dims=dims, dtype=dtype, scale=scale, sparse_dim=sparse_dim, device=device, out=out) def _random(self, dims: Sequence[Dim], dtype: Optional[str]=None, *, scale: float, sparse_dim: Optional[Dim]=None, device: Optional[str]=None, out: Optional[Tensor]=None) -> Tensor: if (self.distribution in {'truncated_normal', 'normal'}): stddev = (math.sqrt(scale) / 0.8796256610342398) return rf.random(distribution='truncated_normal', static=True, dims=dims, mean=0.0, stddev=stddev, dtype=dtype, sparse_dim=sparse_dim, device=device, out=out) elif (self.distribution == 'untruncated_normal'): stddev = math.sqrt(scale) return rf.random(distribution='normal', static=True, dims=dims, mean=0.0, stddev=stddev, dtype=dtype, sparse_dim=sparse_dim, device=device, out=out) elif (self.distribution == 'uniform'): limit = math.sqrt((3.0 * scale)) return rf.random(distribution='uniform', static=True, dims=dims, minval=(- limit), maxval=limit, dtype=dtype, sparse_dim=sparse_dim, device=device, out=out) else: raise ValueError(f'invalid distribution {self.distribution!r}')
class Glorot(VarianceScaling): '\n Xavier Glorot (http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf).\n scale 1, fan_avg, uniform\n ' scale = 1.0 mode = 'fan_avg' distribution = 'uniform'
class He(VarianceScaling): '\n Kaiming He (https://arxiv.org/pdf/1502.01852.pdf).\n scale 2, fan_in, normal\n ' scale = 2.0 mode = 'fan_in' distribution = 'normal'
class HeUniform(He): '\n He-init (:class:`He`) but using a uniform distribution.\n scale 2, fan_in, uniform\n ' distribution = 'uniform'
def _compute_fans(dims: Sequence[Dim]): 'Computes the number of input and output units for a weight shape.\n\n Args:\n dims: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of integer scalars (fan_in, fan_out).\n ' dims = [dim.dimension for dim in dims] if (len(dims) < 1): fan_in = fan_out = 1 elif (len(dims) == 1): fan_in = fan_out = dims[0] elif (len(dims) == 2): fan_in = dims[0] fan_out = dims[1] else: receptive_field_size = 1 for dim in dims[2:]: receptive_field_size *= dim fan_in = (dims[1] * receptive_field_size) fan_out = (dims[0] * receptive_field_size) return (fan_in, fan_out)
def label_smoothing(prob: Tensor, smoothing: Union[(Tensor, float)], *, axis: Optional[Dim]=None) -> Tensor: '\n Label smoothing, often used for cross entropy.\n\n In case of sparse data, it will become dense (via :func:`smooth_one_hot`)\n and the target label will get probability (1 - smoothing).\n ' if (not axis): assert (prob.feature_dim or prob.sparse_dim) axis = (prob.feature_dim or prob.sparse_dim) if prob.sparse_dim: assert (prob.sparse_dim == axis) return rf.smooth_one_hot(prob, label_prob=(1.0 - smoothing)) else: assert (axis in prob.dims) dim = axis.dimension floor_prob = (smoothing / (dim - 1)) factor = (1.0 - (dim * floor_prob)) return ((prob * factor) + floor_prob)
def smooth_one_hot(source: Tensor, *, label_prob: Union[(Tensor, float)]) -> Tensor: '\n Smooth variant of :func:`one_hot`.\n Uses ``label_prob`` for the labels and ``(1 - label_prob) / (dim - 1)`` for the remaining values.\n This is used for label smoothing.\n ' assert source.sparse_dim if (source.sparse_dim.dimension is None): raise NotImplementedError(f'smooth_one_hot({source}) not implemented for dynamic dims') return rf.sparse_to_dense(source, label_value=label_prob, other_value=((1.0 - label_prob) / (source.sparse_dim.dimension - 1)))
def label_smoothed_log_prob_gradient(log_prob: Tensor, smoothing: Union[(Tensor, float)], *, axis: Optional[Dim]=None, exclude_labels: Optional[Sequence[int]]=None) -> Tensor: '\n :param log_prob: shape [...,D] (not necessarily the same as loss)\n :param smoothing: smoothing factor, for :func:`label_smoothing`\n :param axis: label axis. uses feature_dim by default\n :param exclude_labels: list of labels to exclude from smoothing (e.g. blank)\n\n Assume some cross-entropy-like loss:\n\n loss = - sum_i target_prob[i] * log_prob[i] .\n\n The sum is over the label indices i (corresponding to the ``axis`` argument).\n Then the gradient of loss w.r.t. log_prob[i] is:\n\n grad_logprob[i] loss = -target_prob[i] .\n\n We assume that the negative gradient is a probability distribution, and apply :func:`label_smoothing` on it.\n More specifically, we apply the same scale and shift as in the :func:`label_smoothing` function\n via :func:`scaled_gradient`.\n\n Just as a side remark: assume\n\n log_prob = log_softmax(z) .\n\n The gradient of log_softmax is:\n\n grad_z[j] log_prob[i] = delta(i==j) - softmax(z)[j] .\n\n Then the gradient w.r.t. z[j] is:\n\n grad_z[j] loss = sum_i (grad_logprob[i] loss) (grad_z[j] logprob[i])\n = sum_i -target_prob[i] delta(i==j) + target_prob[i] softmax(z)[j]\n = -target_prob[j] + (sum_i target_prob[i]) softmax(z)[j]\n = softmax(z)[j] - target_prob[j] # assuming (sum_i target_prob[i]) == 1\n\n ' if (not axis): assert log_prob.feature_dim axis = log_prob.feature_dim dim = axis.dimension floor_prob = (smoothing / (dim - 1)) factor = (1.0 - (dim * floor_prob)) if exclude_labels: indices = rf.range_over_dim(axis) mask = True for label in exclude_labels: mask = (mask & (indices != label)) factor = rf.where(mask, factor, 1.0) floor_prob = rf.where(mask, floor_prob, 0.0) return rf.scaled_gradient_ext(log_prob, scale=factor, shift=(- floor_prob), scale_shift_by_sum_over_axis=axis)
class Linear(rf.Module): '\n Linear transformation.\n ' def __init__(self, in_dim: Dim, out_dim: Dim, *, with_bias=True): super().__init__() assert (isinstance(in_dim, Dim) and isinstance(out_dim, Dim)) self.in_dim = in_dim self.out_dim = out_dim self.weight = rf.Parameter((rf.dim_match_priority_when_needed(self.in_dim, self.out_dim), self.out_dim)) self.weight.initial = rf.init.Glorot() self.with_bias = with_bias self.bias = None if with_bias: self.bias = rf.Parameter((self.out_dim,)) self.bias.initial = 0.0 def __call__(self, source: Tensor) -> Tensor: if (not isinstance(source, Tensor)): raise TypeError(f'{self}: source must be a Tensor but got {type(source)}') if (self.in_dim not in source.dims): raise ValueError(f'{self}: input {source} does not have in_dim {self.in_dim}') out = rf.matmul(source, self.weight, reduce=self.in_dim) out.feature_dim = self.out_dim if self.with_bias: out += self.bias return out
class Embedding(rf.Module): '\n Embedding.\n ' def __init__(self, in_dim: Dim, out_dim: Dim): super().__init__() assert (isinstance(in_dim, Dim) and isinstance(out_dim, Dim)) self.in_dim = in_dim self.out_dim = out_dim self.weight = rf.Parameter((rf.dim_match_priority_when_needed(self.in_dim, self.out_dim), self.out_dim)) self.weight.initial = rf.init.Glorot() def __call__(self, source: Tensor) -> Tensor: if (not isinstance(source, Tensor)): raise TypeError(f'{self}: source must be a Tensor but got {type(source)}') if (self.in_dim != source.sparse_dim): raise ValueError(f'{self}: input {source} does not have in_dim {self.in_dim}') out = rf.gather(self.weight, indices=source, axis=self.in_dim) out.feature_dim = self.out_dim return out
def while_loop(cond: Callable[([S], Union[(bool, Tensor)])], body: Callable[([S], S)], initial: S) -> S: '\n It executes::\n\n while cond(loop_vars):\n loop_vars = body(loop_vars)\n\n And then it returns the final loop vars.\n\n If you want to iterate over some axis,\n maybe of an existing tensor,\n or if you want to accumulate frames in each iteration,\n see :func:`scan`.\n\n :param cond:\n :param body:\n :param initial: initial loop vars\n :return: final loop vars\n ' init_tensors = [v for v in tree.flatten(initial) if isinstance(v, Tensor)] if (not init_tensors): backend = global_backend else: v = init_tensors[0] backend = v._raw_backend if backend.executing_eagerly(): loop_vars = initial loop_var_templates = _templates_for_loop_vars(loop_vars) dim_updates = _DimUpdatesEager(initial_state=loop_vars) while _get_bool_value_eager(cond(loop_vars)): loop_vars = body(loop_vars) tree.assert_same_structure(loop_var_templates, loop_vars) loop_var_templates = dim_updates.update_template_from_new(loop_var_templates, loop_vars) _check_matching_loop_var_templates(loop_var_templates, loop_vars) return loop_vars return backend.while_loop(cond, body, initial)
def _get_bool_value_eager(v: Union[(Tensor, bool)]) -> bool: if isinstance(v, Tensor): assert ((v.dims == ()) and (v.dtype == 'bool')) assert (v.device in (None, 'cpu')), f'while_loop: cond should be on CPU, got {v} on device {v.device}' return bool(v.raw_tensor) elif isinstance(v, bool): return v else: raise TypeError(f'while_loop: cond: unexpected return type {type(v)}')
def scan(*, spatial_dim: Optional[Dim]=None, cond_dims: Optional[Sequence[Dim]]=None, cond_before_body: bool=True, initial: S=None, xs: X=None, ys: Y=None, cond: Optional[Callable[([X, S], Tensor)]]=None, body: Callable[([X, S], Tuple[(Y, S)])], max_seq_len: Optional[Union[(int, Tensor)]]=None, return_tensor_arrays: bool=False) -> Tuple[(Y, S, Dim)]: '\n Extended variant of :func:`while_loop`.\n\n Supports iterating over a given axis (spatial_dim),\n supports iterating over some input tensors (xs: X) on the given axis,\n and supports returning some frame-accumulated output tensors (ys: Y).\n\n https://github.com/rwth-i6/returnn/issues/1324\n\n :param spatial_dim: if None or unknown, need to provide cond. must be given if xs is not None.\n :param cond_dims: if spatial_dim is not given, this must be given to know what shape to expect from cond.\n This will also be the shape of the dyn_size_ext of the resulting spatial_dim.\n :param cond_before_body: if True, will execute cond before body, otherwise after.\n If True, corresponds to ``while cond(...): body(...)``,\n otherwise ``while True: body(...); if not cond(...): break``.\n Note that `cond` is executed in any case at the end of the loop\n but with `cond_before_body=True` this final value would be ignored.\n Be careful though that you do not have any side-effects in `cond`.\n :param initial: state/carry\n :param xs: input, will be unstacked over spatial_dim. can be None.\n :param ys: output, as templates, per iteration (excluding spatial_dim)\n :param cond: if spatial_dim is None/unknown, need to provide this.\n The shape will be the same as the dyn_size_ext of the resulting spatial_dim.\n Unlike while_loop cond, does not need to be scalar. E.g. some shape like [B] is normal.\n Once it returns False for all entries, the loop will stop.\n Once it returns False for some entry, further values in further iterations for this entry will be ignored.\n We do not expect any side-effects in `cond`.\n :param body:\n :param max_seq_len: If given, it is checked in addition to `cond`, and when reached, it stops the loop.\n :param return_tensor_arrays: if True, will return TensorArray instead of Tensor for ys.\n Internally, we work with TensorArray anyway, so this avoids the final stack().\n In case of beam search, it might make more sense\n to perform some post-processing on the TensorArray per entry,\n like selecting the right beam entries.\n :return: outputs ys, final state, and the new spatial_dim\n ' if ((spatial_dim is None) or (not spatial_dim.is_dim_known())): assert (cond is not None), f'scan: spatial_dim {spatial_dim} is None/unknown, need to provide `cond`' assert (cond_dims is not None), f'scan: spatial_dim {spatial_dim} is None/unknown, need to provide `cond_dims`' assert (xs is None), f'scan: spatial_dim {spatial_dim} is None/unknown, cannot use input `xs` {xs}' if (spatial_dim is None): spatial_dim = Dim(None, name='scan_dim') def _cond(_s: Tuple[(Tensor, Tensor, Tensor, S, Y)]) -> Tensor: (i, _, c, _, _) = _s c = rf.reduce_any(c, axis=c.dims) if (max_seq_len is not None): c = rf.logical_and(c, (i < max_seq_len)) return c def _body(_s: Tuple[(Tensor, Tensor, Tensor, S, Y)]) -> Tuple[(Tensor, Tensor, Tensor, S, Y)]: (i, seq_len_, prev_cond, s, ys_) = _s seq_len_ = (seq_len_ + rf.cast(prev_cond, dtype=seq_len_.dtype)) (y, s) = body(None, s) tree.assert_same_structure(ys_, y) ys_ = tree.map_structure((lambda ys__, y_: (ys__.push_back(y_) if (ys__ is not None) else None)), ys_, y) c = cond(None, s) c = rf.logical_and(c, prev_cond) return ((i + 1), seq_len_, c, s, ys_) if cond_before_body: initial_cond = cond(None, initial) assert (isinstance(initial_cond, Tensor) and (initial_cond.dtype == 'bool') and (initial_cond.dims_set == set(cond_dims))) else: initial_cond = rf.constant(True, dtype='bool', dims=cond_dims) (_, seq_len, _, final_s, ys) = while_loop(_cond, _body, (rf.constant(0, dtype=rf.get_default_array_index_dtype(), dims=()), rf.constant(0, dtype=rf.get_default_array_index_dtype(), dims=cond_dims), initial_cond, initial, tree.map_structure((lambda y: (TensorArray(y) if (y is not None) else None)), ys))) spatial_dim.dyn_size_ext = seq_len else: assert (cond is None), f'scan: spatial_dim {spatial_dim} is known, cannot use `cond` {cond}' assert (max_seq_len is None), f'scan: spatial_dim {spatial_dim} is known, cannot use `max_seq_len` {max_seq_len}' xs = tree.map_structure((lambda x: TensorArray.unstack(x, axis=spatial_dim)), xs) def _cond(_s: Tuple[(Tensor, S, Y)]) -> Tensor: (i, *_) = _s return (i < spatial_dim.get_dim_value_tensor()) def _body(_s: Tuple[(Tensor, S, Y)]) -> Tuple[(Tensor, S, Y)]: (i, s, ys_) = _s (y, s) = body(tree.map_structure((lambda x: x[i]), xs), s) tree.assert_same_structure(ys_, y) ys_ = tree.map_structure((lambda ys__, y_: (ys__.push_back(y_) if (ys__ is not None) else None)), ys_, y) return ((i + 1), s, ys_) (_, final_s, ys) = while_loop(_cond, _body, (rf.constant(0, dtype=rf.get_default_array_index_dtype(), dims=(), device='cpu'), initial, tree.map_structure((lambda y: (TensorArray(y) if (y is not None) else None)), ys))) if (not return_tensor_arrays): ys = tree.map_structure((lambda ys_: (ys_.stack(axis=spatial_dim) if (ys_ is not None) else None)), ys) return (ys, final_s, spatial_dim)
def _templates_for_loop_vars(loop_vars: S) -> S: def _get_template(x): if isinstance(x, Tensor): return x.copy_template() elif isinstance(x, Dim): return x elif isinstance(x, TensorArray): return x elif (x is None): return None else: raise TypeError(f'unexpected type {type(x)} for loop var {x}') return tree.map_structure(_get_template, loop_vars)
def _check_matching_loop_var_templates(loop_var_templates: S, loop_vars: S): def _check(path, template, x): if isinstance(template, Tensor): assert isinstance(x, Tensor), f'loop var {path} is not a Tensor but {type(x)}' assert (template.batch_ndim == x.batch_ndim), f'loop var {path} template {template} does not match var {x}, different batch_ndim {template.batch_ndim} vs {x.batch_ndim}' assert (template.dims_set == x.dims_set), f'loop var {path} template {template} does not match var {x}, different dims (no matter the order) {template.dims} vs {x.dims}' assert (template.sparse_dim == x.sparse_dim), f'loop var {path} template {template} does not match var {x}, different sparse_dim {template.sparse_dim} vs {x.sparse_dim}' assert (template.feature_dim == x.feature_dim), f'loop var {path} template {template} does not match var {x}, different feature_dim {template.feature_dim} vs {x.feature_dim}' elif isinstance(template, Dim): assert isinstance(x, Dim), f'loop var {path} is not a Dim but {type(x)}' assert (template == x), f'loop var {path} template dim {template} does not match var dim {x}' elif isinstance(template, TensorArray): assert isinstance(x, TensorArray), f'loop var {path} is not a TensorArray but {type(x)}' _check(path, template.tensor_template, x.tensor_template) x._push_back_delayed_check() else: assert (type(template) is type(x)), f'loop var {path} template type {type(template)} does not match var type {type(x)}' assert (not isinstance(x, Tensor)), f'loop var {path} is a Tensor but should not be' tree.map_structure_with_path(_check, loop_var_templates, loop_vars)
class _DimUpdatesEager(): '\n In case dims are updated in the loop body, we need to keep track of this.\n\n This implementation is for eager-based backends.\n\n A graph-based backend would need to distinguish:\n - initial dim\n - dim in loop body (temporarily), input from prev iteration state\n - dim in loop body (temporarily), output for new state\n - final dim\n\n When collecting tensors with such dims in a :class:`TensorArray`,\n we must later be able to iterate through it again, maybe backwards,\n and then also iterate over the collected dims,\n and be able to match it.\n\n See here for some discussion and motivation:\n https://github.com/rwth-i6/returnn/issues/1327\n ' def __init__(self, *, initial_state: S): self.dim_variants: Dict[(Dim, List[Dim])] = {} self._initial_dim: Dict[(Dim, Dim)] = {} self._init_initial(initial_state) def _init_initial(self, state: S): 'init' for x in tree.flatten(state): if isinstance(x, Dim): assert (x not in self.dim_variants), f'dim {x} already in dim_variants, assumed to be unique in state' self.dim_variants[x] = [] self._initial_dim[x] = x for x in tree.flatten(state): if isinstance(x, TensorArray): if any(((d in self._initial_dim) for d in x.tensor_template.dims)): x._set_enable_delayed_check() def update_template_from_new(self, template_state: S, new_state: S) -> S: '\n :param template_state: via :func:`_templates_for_loop_vars`\n :param new_state: output from body(). Warning: we update TensorArray.tensor_template inplace here.\n :return: updated template state\n ' if (not self.dim_variants): return template_state def _visit_add_dim_variants(template, x): if isinstance(x, Dim): assert isinstance(template, Dim) initial_dim = self._initial_dim[template] self.dim_variants[initial_dim].append(x) self._initial_dim[x] = initial_dim tree.map_structure(_visit_add_dim_variants, template_state, new_state) def _visit_update_template(template, x): if isinstance(x, Dim): assert isinstance(template, Dim) return x if isinstance(x, Tensor): assert isinstance(template, Tensor) return self.template_for_current_iteration(template) if isinstance(x, TensorArray): assert isinstance(template, TensorArray) x.tensor_template = self.template_for_current_iteration(x.tensor_template) template.tensor_template = self.template_for_current_iteration(template.tensor_template) return template return template return tree.map_structure(_visit_update_template, template_state, new_state) def template_for_current_iteration(self, template: Tensor) -> Tensor: '\n :param template: template for the current iteration\n :return: template for the current iteration, with updated dims\n ' if any(((d in self._initial_dim) for d in template.dims)): return template.copy_template_new_dim_tags([(self.dim_variants[self._initial_dim[d]][(- 1)] if (d in self._initial_dim) else d) for d in template.dims]) return template
def cross_entropy(*, estimated: Tensor, target: Tensor, axis: Dim, estimated_type: str) -> Tensor: '\n ``target`` is supposed to be in probability space (normalized). It can also be sparse, i.e. contain class indices.\n ``estimated`` can be probs, log-probs or logits, specified via ``estimated_type``.\n\n Assuming both are in probability space, the cross entropy is:\n\n H(target,estimated) = -reduce_sum(target * log(estimated), axis=axis)\n = -matmul(target, log(estimated), reduce=axis)\n\n In case you want label smoothing, you can use e.g.::\n\n ce = nn.cross_entropy(\n target=nn.label_smoothing(target, 0.1),\n estimated=estimated)\n\n :param estimated: probs, log-probs or logits, specified via ``estimated_type``\n :param target: probs, normalized, can also be sparse\n :param axis: class labels dim over which softmax is computed\n :param estimated_type: "probs", "log-probs" or "logits"\n :return: cross entropy (same Dims as \'estimated\' but without \'axis\')\n ' if (estimated_type == 'logits'): return estimated._raw_backend.softmax_cross_entropy_with_logits(logits=estimated, targets=target, axis=axis) if (estimated_type == 'probs'): log_prob = rf.log(estimated) elif (estimated_type == 'log-probs'): log_prob = estimated else: raise ValueError("estimated_type must be 'probs', 'log-probs' or 'logits'") if target.sparse_dim: return (- rf.gather(log_prob, indices=target, axis=axis)) return (- rf.matmul(target, log_prob, reduce=axis))
def ctc_loss(*, logits: Tensor, targets: Tensor, input_spatial_dim: Dim, targets_spatial_dim: Dim, blank_index: int, max_approx: bool=False) -> Tensor: '\n Calculates the CTC loss.\n\n Internally, this uses :func:`returnn.tf.native_op.ctc_loss`\n which is equivalent to tf.nn.ctc_loss but more efficient.\n\n Output is of shape [B].\n\n :param logits: (before softmax). shape [B...,input_spatial,C]\n :param targets: sparse. shape [B...,targets_spatial] -> C\n :param input_spatial_dim: spatial dim of input logits\n :param targets_spatial_dim: spatial dim of targets\n :param blank_index: vocab index of the blank symbol\n :param max_approx: if True, use max instead of sum over alignments (max approx, Viterbi)\n :return: loss shape [B...]\n ' return logits._raw_backend.ctc_loss(logits=logits, targets=targets, input_spatial_dim=input_spatial_dim, targets_spatial_dim=targets_spatial_dim, blank_index=blank_index, max_approx=max_approx)
@typing.overload def compare(a: Tensor, kind: str, b: Tensor, *, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None) -> Tensor: 'compare with two tensors'
def compare_bc(a: Tensor, kind: str, b: Tensor, *, dim_order: Optional[Sequence[Dim]]=None) -> Tensor: ':func:`compare` with allow_broadcast_all_sources=True' return compare(a, kind, b, allow_broadcast_all_sources=True, dim_order=dim_order)
@typing.overload def combine(a: Tensor, kind: str, b: Tensor, *, allow_broadcast_all_sources: Optional[bool]=None, dim_order: Optional[Sequence[Dim]]=None) -> Tensor: 'combine with two tensors'
def combine_bc(a: Tensor, kind: str, b: Tensor, *, dim_order: Optional[Sequence[Dim]]=None) -> Tensor: ':func:`combine` with allow_broadcast_all_sources=True' return combine(a, kind, b, allow_broadcast_all_sources=True, dim_order=dim_order)
def equal(a: Tensor, b: Tensor) -> Tensor: 'equal' return compare(a, 'equal', b)
def less(a: Tensor, b: Tensor) -> Tensor: 'less' return compare(a, 'less', b)
def less_equal(a: Tensor, b: Tensor) -> Tensor: 'less_equal' return compare(a, 'less_equal', b)
def greater(a: Tensor, b: Tensor) -> Tensor: 'greater' return compare(a, 'greater', b)
def greater_equal(a: Tensor, b: Tensor) -> Tensor: 'greater_equal' return compare(a, 'greater_equal', b)
def not_equal(a: Tensor, b: Tensor) -> Tensor: 'not_equal' return compare(a, 'not_equal', b)
def add(a: Tensor, b: Tensor) -> Tensor: 'add' return combine(a, 'add', b)
def sub(a: Tensor, b: Tensor) -> Tensor: 'sub' return combine(a, 'sub', b)
def mul(a: Tensor, b: Tensor) -> Tensor: 'mul' return combine(a, 'mul', b)
def true_divide(a: Tensor, b: Tensor) -> Tensor: 'truediv' return combine(a, 'truediv', b)
def floor_divide(a: Tensor, b: Tensor) -> Tensor: 'floordiv' return combine(a, 'floordiv', b)
def ceil_divide(a: Tensor, b: Tensor) -> Tensor: 'ceildiv' return (- ((- a) // b))
def neg(a: Tensor) -> Tensor: 'neg' return a._raw_backend.activation(a, 'neg')
def reciprocal(a: Tensor) -> Tensor: 'reciprocal / inverse, i.e. 1/a' return a._raw_backend.activation(a, 'reciprocal')