id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
22,624 | import dataclasses
import functools
from typing import Any, Iterable, Optional, Tuple
import jax
import jax.numpy as jnp
from jax import lax
from jax.nn import initializers
from flax.linen import dtypes, module, transforms
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
Axes,
)
canonicalize_dtype = dtypes.canonicalize_dtype
Module = module.Module
def _canonicalize_axes(rank: int, axes: Axes) -> Tuple[int, ...]:
"""Returns a tuple of deduplicated, sorted, and positive axes."""
if not isinstance(axes, Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
Array = Union[jax.Array, Any]
The provided code snippet includes necessary dependencies for implementing the `_normalize` function. Write a Python function `def _normalize( mdl: Module, x: Array, mean: Array, var: Array, reduction_axes: Axes, feature_axes: Axes, dtype: Optional[Dtype], param_dtype: Dtype, epsilon: float, use_bias: bool, use_scale: bool, bias_init: Initializer, scale_init: Initializer, )` to solve the following problem:
Normalizes the input of a normalization layer and optionally applies a learned scale and bias. Arguments: mdl: Module to apply the normalization in (normalization params will reside in this module). x: The input. mean: Mean to use for normalization. var: Variance to use for normalization. reduction_axes: The axes in ``x`` to reduce. feature_axes: Axes containing features. A separate bias and scale is learned for each specified feature. dtype: The dtype of the result (default: infer from input and params). param_dtype: The dtype of the parameters. epsilon: Normalization epsilon. use_bias: If true, add a bias term to the output. use_scale: If true, scale the output. bias_init: Initialization function for the bias term. scale_init: Initialization function for the scaling function. Returns: The normalized input.
Here is the function:
def _normalize(
mdl: Module,
x: Array,
mean: Array,
var: Array,
reduction_axes: Axes,
feature_axes: Axes,
dtype: Optional[Dtype],
param_dtype: Dtype,
epsilon: float,
use_bias: bool,
use_scale: bool,
bias_init: Initializer,
scale_init: Initializer,
):
"""Normalizes the input of a normalization layer and optionally applies a learned scale and bias.
Arguments:
mdl: Module to apply the normalization in (normalization params will reside
in this module).
x: The input.
mean: Mean to use for normalization.
var: Variance to use for normalization.
reduction_axes: The axes in ``x`` to reduce.
feature_axes: Axes containing features. A separate bias and scale is learned
for each specified feature.
dtype: The dtype of the result (default: infer from input and params).
param_dtype: The dtype of the parameters.
epsilon: Normalization epsilon.
use_bias: If true, add a bias term to the output.
use_scale: If true, scale the output.
bias_init: Initialization function for the bias term.
scale_init: Initialization function for the scaling function.
Returns:
The normalized input.
"""
reduction_axes = _canonicalize_axes(x.ndim, reduction_axes)
feature_axes = _canonicalize_axes(x.ndim, feature_axes)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
mean = jnp.expand_dims(mean, reduction_axes)
var = jnp.expand_dims(var, reduction_axes)
y = x - mean
mul = lax.rsqrt(var + epsilon)
args = [x]
if use_scale:
scale = mdl.param(
'scale', scale_init, reduced_feature_shape, param_dtype
).reshape(feature_shape)
mul *= scale
args.append(scale)
y *= mul
if use_bias:
bias = mdl.param(
'bias', bias_init, reduced_feature_shape, param_dtype
).reshape(feature_shape)
y += bias
args.append(bias)
dtype = dtypes.canonicalize_dtype(*args, dtype=dtype)
return jnp.asarray(y, dtype) | Normalizes the input of a normalization layer and optionally applies a learned scale and bias. Arguments: mdl: Module to apply the normalization in (normalization params will reside in this module). x: The input. mean: Mean to use for normalization. var: Variance to use for normalization. reduction_axes: The axes in ``x`` to reduce. feature_axes: Axes containing features. A separate bias and scale is learned for each specified feature. dtype: The dtype of the result (default: infer from input and params). param_dtype: The dtype of the parameters. epsilon: Normalization epsilon. use_bias: If true, add a bias term to the output. use_scale: If true, scale the output. bias_init: Initialization function for the bias term. scale_init: Initialization function for the scaling function. Returns: The normalized input. |
22,625 | import dataclasses
import functools
from typing import Any, Iterable, Optional, Tuple
import jax
import jax.numpy as jnp
from jax import lax
from jax.nn import initializers
from flax.linen import dtypes, module, transforms
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
Axes,
)
The provided code snippet includes necessary dependencies for implementing the `_l2_normalize` function. Write a Python function `def _l2_normalize(x, axis=None, eps=1e-12)` to solve the following problem:
Normalizes along dimension `axis` using an L2 norm. This specialized function exists for numerical stability reasons. Args: x: An input ndarray. axis: Dimension along which to normalize, e.g. `1` to separately normalize vectors in a batch. Passing `None` views `t` as a flattened vector when calculating the norm (equivalent to Frobenius norm). eps: Epsilon to avoid dividing by zero. Returns: An array of the same shape as 'x' L2-normalized along 'axis'.
Here is the function:
def _l2_normalize(x, axis=None, eps=1e-12):
"""Normalizes along dimension `axis` using an L2 norm.
This specialized function exists for numerical stability reasons.
Args:
x: An input ndarray.
axis: Dimension along which to normalize, e.g. `1` to separately normalize
vectors in a batch. Passing `None` views `t` as a flattened vector when
calculating the norm (equivalent to Frobenius norm).
eps: Epsilon to avoid dividing by zero.
Returns:
An array of the same shape as 'x' L2-normalized along 'axis'.
"""
return x * jax.lax.rsqrt((x * x).sum(axis=axis, keepdims=True) + eps) | Normalizes along dimension `axis` using an L2 norm. This specialized function exists for numerical stability reasons. Args: x: An input ndarray. axis: Dimension along which to normalize, e.g. `1` to separately normalize vectors in a batch. Passing `None` views `t` as a flattened vector when calculating the norm (equivalent to Frobenius norm). eps: Epsilon to avoid dividing by zero. Returns: An array of the same shape as 'x' L2-normalized along 'axis'. |
22,626 | import jax, jax.numpy as jnp
import numpy as np
def ndim_at_least(x, num_dims):
if not (isinstance(x, jax.Array) or isinstance(x, np.ndarray)):
x = jnp.asarray(x)
return x.ndim >= num_dims
def arbitrary_mergeable_leaf(min_num_dims, args, kwargs):
for a in jax.tree_util.tree_leaves(args):
if ndim_at_least(a, min_num_dims):
return a
for k in jax.tree_util.tree_leaves(kwargs):
if ndim_at_least(k, min_num_dims):
return k
# Couldn't find a satisfactory leaf.
return None | null |
22,627 | import jax, jax.numpy as jnp
import numpy as np
def ndim_at_least(x, num_dims):
if not (isinstance(x, jax.Array) or isinstance(x, np.ndarray)):
x = jnp.asarray(x)
return x.ndim >= num_dims
The provided code snippet includes necessary dependencies for implementing the `merge_leading_dims` function. Write a Python function `def merge_leading_dims(x, num_dims)` to solve the following problem:
Merge leading dimensions.
Here is the function:
def merge_leading_dims(x, num_dims):
"""Merge leading dimensions."""
# Don't merge if there aren't dimensions to merge.
if not ndim_at_least(x, num_dims):
return x
new_shape = (np.prod(x.shape[:num_dims]),) + x.shape[num_dims:]
return x.reshape(new_shape) | Merge leading dimensions. |
22,628 | import jax, jax.numpy as jnp
import numpy as np
def split_leading_dim(x, to_dim):
new_shape = to_dim + x.shape[1:]
return x.reshape(new_shape) | null |
22,629 | import jax.numpy as jnp
import numpy as np
from jax import lax
def pool(inputs, init, reduce_fn, window_shape, strides, padding):
"""Helper function to define pooling functions.
Pooling functions are implemented using the ReduceWindow XLA op.
.. note::
Be aware that pooling is not generally differentiable.
That means providing a reduce_fn that is differentiable does not imply that
pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
init: the initial value for the reduction
reduce_fn: a reduce function of the form ``(T, T) -> T``.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of ``n`` integers, representing the inter-window
strides (default: ``(1, ..., 1)``).
padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence
of ``n`` ``(low, high)`` integer pairs that give the padding to apply before
and after each spatial dimension.
Returns:
The output of the reduction for each window slice.
"""
num_batch_dims = inputs.ndim - (len(window_shape) + 1)
strides = strides or (1,) * len(window_shape)
assert len(window_shape) == len(
strides
), f'len({window_shape}) must equal len({strides})'
strides = (1,) * num_batch_dims + strides + (1,)
dims = (1,) * num_batch_dims + window_shape + (1,)
is_single_input = False
if num_batch_dims == 0:
# add singleton batch dimension because lax.reduce_window always
# needs a batch dimension.
inputs = inputs[None]
strides = (1,) + strides
dims = (1,) + dims
is_single_input = True
assert inputs.ndim == len(dims), f'len({inputs.shape}) != len({dims})'
if not isinstance(padding, str):
padding = tuple(map(tuple, padding))
assert len(padding) == len(window_shape), (
f'padding {padding} must specify pads for same number of dims as '
f'window_shape {window_shape}'
)
assert all(
[len(x) == 2 for x in padding]
), f'each entry in padding {padding} must be length 2'
padding = ((0, 0),) + padding + ((0, 0),)
y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
if is_single_input:
y = jnp.squeeze(y, axis=0)
return y
The provided code snippet includes necessary dependencies for implementing the `avg_pool` function. Write a Python function `def avg_pool( inputs, window_shape, strides=None, padding='VALID', count_include_pad=True )` to solve the following problem:
Pools the input by taking the average over a window. Args: inputs: input data with dimensions (batch, window dims..., features). window_shape: a shape tuple defining the window to reduce over. strides: a sequence of ``n`` integers, representing the inter-window strides (default: ``(1, ..., 1)``). padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence of ``n`` ``(low, high)`` integer pairs that give the padding to apply before and after each spatial dimension (default: ``'VALID'``). count_include_pad: a boolean whether to include padded tokens in the average calculation (default: ``True``). Returns: The average for each window slice.
Here is the function:
def avg_pool(
inputs, window_shape, strides=None, padding='VALID', count_include_pad=True
):
"""Pools the input by taking the average over a window.
Args:
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of ``n`` integers, representing the inter-window
strides (default: ``(1, ..., 1)``).
padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence
of ``n`` ``(low, high)`` integer pairs that give the padding to apply before
and after each spatial dimension (default: ``'VALID'``).
count_include_pad: a boolean whether to include padded tokens
in the average calculation (default: ``True``).
Returns:
The average for each window slice.
"""
y = pool(inputs, 0.0, lax.add, window_shape, strides, padding)
if count_include_pad:
y = y / np.prod(window_shape)
else:
div_shape = inputs.shape[:-1] + (1,)
if len(div_shape) - 2 == len(window_shape):
div_shape = (1,) + div_shape[1:]
y = y / pool(
jnp.ones(div_shape), 0.0, lax.add, window_shape, strides, padding
)
return y | Pools the input by taking the average over a window. Args: inputs: input data with dimensions (batch, window dims..., features). window_shape: a shape tuple defining the window to reduce over. strides: a sequence of ``n`` integers, representing the inter-window strides (default: ``(1, ..., 1)``). padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence of ``n`` ``(low, high)`` integer pairs that give the padding to apply before and after each spatial dimension (default: ``'VALID'``). count_include_pad: a boolean whether to include padded tokens in the average calculation (default: ``True``). Returns: The average for each window slice. |
22,630 | import jax.numpy as jnp
import numpy as np
from jax import lax
def pool(inputs, init, reduce_fn, window_shape, strides, padding):
"""Helper function to define pooling functions.
Pooling functions are implemented using the ReduceWindow XLA op.
.. note::
Be aware that pooling is not generally differentiable.
That means providing a reduce_fn that is differentiable does not imply that
pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
init: the initial value for the reduction
reduce_fn: a reduce function of the form ``(T, T) -> T``.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of ``n`` integers, representing the inter-window
strides (default: ``(1, ..., 1)``).
padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence
of ``n`` ``(low, high)`` integer pairs that give the padding to apply before
and after each spatial dimension.
Returns:
The output of the reduction for each window slice.
"""
num_batch_dims = inputs.ndim - (len(window_shape) + 1)
strides = strides or (1,) * len(window_shape)
assert len(window_shape) == len(
strides
), f'len({window_shape}) must equal len({strides})'
strides = (1,) * num_batch_dims + strides + (1,)
dims = (1,) * num_batch_dims + window_shape + (1,)
is_single_input = False
if num_batch_dims == 0:
# add singleton batch dimension because lax.reduce_window always
# needs a batch dimension.
inputs = inputs[None]
strides = (1,) + strides
dims = (1,) + dims
is_single_input = True
assert inputs.ndim == len(dims), f'len({inputs.shape}) != len({dims})'
if not isinstance(padding, str):
padding = tuple(map(tuple, padding))
assert len(padding) == len(window_shape), (
f'padding {padding} must specify pads for same number of dims as '
f'window_shape {window_shape}'
)
assert all(
[len(x) == 2 for x in padding]
), f'each entry in padding {padding} must be length 2'
padding = ((0, 0),) + padding + ((0, 0),)
y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
if is_single_input:
y = jnp.squeeze(y, axis=0)
return y
The provided code snippet includes necessary dependencies for implementing the `max_pool` function. Write a Python function `def max_pool(inputs, window_shape, strides=None, padding='VALID')` to solve the following problem:
Pools the input by taking the maximum of a window slice. Args: inputs: input data with dimensions (batch, window dims..., features). window_shape: a shape tuple defining the window to reduce over. strides: a sequence of ``n`` integers, representing the inter-window strides (default: ``(1, ..., 1)``). padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence of ``n`` ``(low, high)`` integer pairs that give the padding to apply before and after each spatial dimension (default: ``'VALID'``). Returns: The maximum for each window slice.
Here is the function:
def max_pool(inputs, window_shape, strides=None, padding='VALID'):
"""Pools the input by taking the maximum of a window slice.
Args:
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of ``n`` integers, representing the inter-window
strides (default: ``(1, ..., 1)``).
padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence
of ``n`` ``(low, high)`` integer pairs that give the padding to apply before
and after each spatial dimension (default: ``'VALID'``).
Returns:
The maximum for each window slice.
"""
y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
return y | Pools the input by taking the maximum of a window slice. Args: inputs: input data with dimensions (batch, window dims..., features). window_shape: a shape tuple defining the window to reduce over. strides: a sequence of ``n`` integers, representing the inter-window strides (default: ``(1, ..., 1)``). padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence of ``n`` ``(low, high)`` integer pairs that give the padding to apply before and after each spatial dimension (default: ``'VALID'``). Returns: The maximum for each window slice. |
22,631 | import jax.numpy as jnp
import numpy as np
from jax import lax
def pool(inputs, init, reduce_fn, window_shape, strides, padding):
"""Helper function to define pooling functions.
Pooling functions are implemented using the ReduceWindow XLA op.
.. note::
Be aware that pooling is not generally differentiable.
That means providing a reduce_fn that is differentiable does not imply that
pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
init: the initial value for the reduction
reduce_fn: a reduce function of the form ``(T, T) -> T``.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of ``n`` integers, representing the inter-window
strides (default: ``(1, ..., 1)``).
padding: either the string ``'SAME'``, the string ``'VALID'``, or a sequence
of ``n`` ``(low, high)`` integer pairs that give the padding to apply before
and after each spatial dimension.
Returns:
The output of the reduction for each window slice.
"""
num_batch_dims = inputs.ndim - (len(window_shape) + 1)
strides = strides or (1,) * len(window_shape)
assert len(window_shape) == len(
strides
), f'len({window_shape}) must equal len({strides})'
strides = (1,) * num_batch_dims + strides + (1,)
dims = (1,) * num_batch_dims + window_shape + (1,)
is_single_input = False
if num_batch_dims == 0:
# add singleton batch dimension because lax.reduce_window always
# needs a batch dimension.
inputs = inputs[None]
strides = (1,) + strides
dims = (1,) + dims
is_single_input = True
assert inputs.ndim == len(dims), f'len({inputs.shape}) != len({dims})'
if not isinstance(padding, str):
padding = tuple(map(tuple, padding))
assert len(padding) == len(window_shape), (
f'padding {padding} must specify pads for same number of dims as '
f'window_shape {window_shape}'
)
assert all(
[len(x) == 2 for x in padding]
), f'each entry in padding {padding} must be length 2'
padding = ((0, 0),) + padding + ((0, 0),)
y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
if is_single_input:
y = jnp.squeeze(y, axis=0)
return y
The provided code snippet includes necessary dependencies for implementing the `min_pool` function. Write a Python function `def min_pool(inputs, window_shape, strides=None, padding='VALID')` to solve the following problem:
Pools the input by taking the minimum of a window slice. Args: inputs: Input data with dimensions (batch, window dims..., features). window_shape: A shape tuple defining the window to reduce over. strides: A sequence of ``n`` integers, representing the inter-window strides (default: ``(1, ..., 1)``). padding: Either the string ``'SAME'``, the string ``'VALID'``, or a sequence of ``n`` ``(low, high)`` integer pairs that give the padding to apply before and after each spatial dimension (default: ``'VALID'``). Returns: The minimum for each window slice.
Here is the function:
def min_pool(inputs, window_shape, strides=None, padding='VALID'):
"""Pools the input by taking the minimum of a window slice.
Args:
inputs: Input data with dimensions (batch, window dims..., features).
window_shape: A shape tuple defining the window to reduce over.
strides: A sequence of ``n`` integers, representing the inter-window strides
(default: ``(1, ..., 1)``).
padding: Either the string ``'SAME'``, the string ``'VALID'``, or a sequence of
``n`` ``(low, high)`` integer pairs that give the padding to apply before and
after each spatial dimension (default: ``'VALID'``).
Returns:
The minimum for each window slice.
"""
return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding) | Pools the input by taking the minimum of a window slice. Args: inputs: Input data with dimensions (batch, window dims..., features). window_shape: A shape tuple defining the window to reduce over. strides: A sequence of ``n`` integers, representing the inter-window strides (default: ``(1, ..., 1)``). padding: Either the string ``'SAME'``, the string ``'VALID'``, or a sequence of ``n`` ``(low, high)`` integer pairs that give the padding to apply before and after each spatial dimension (default: ``'VALID'``). Returns: The minimum for each window slice. |
22,632 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def qdq_and_return(x, q_dtype, scale, amax_history, compute_dtype):
def in_qdq(compute_dtype, inp, scale, amax_history):
qin, _, _ = qdq_and_return(
inp, jnp.float8_e4m3fn, scale, amax_history, compute_dtype
)
return qin | null |
22,633 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def qdq_and_return(x, q_dtype, scale, amax_history, compute_dtype):
is_fm32 = scale.dtype == fm32 and amax_history.dtype == fm32
# convert fm32->f32 so we can do math
if is_fm32:
amax_history = lax.convert_element_type(amax_history, jnp.float32)
scale = lax.convert_element_type(scale, jnp.float32)
dtype_max = get_fp8_max(q_dtype, jnp.float32)
amax_from_history = jnp.max(amax_history, axis=0)
new_scale = compute_scale(amax_from_history, scale, dtype_max)
qx = quantize_dequantize(x, q_dtype, new_scale, compute_dtype)
new_history = compute_amax_history(x, amax_history)
# convert f32->fm32 so the autodiff system accumulates fp8 meta correctly
if is_fm32:
new_history = lax.convert_element_type(new_history, fm32)
new_scale = lax.convert_element_type(new_scale, fm32)
return qx, new_scale, new_history
def in_qdq_fwd(compute_dtype, inp, scale, amax_history):
qin, new_scale, new_history = qdq_and_return(
inp, jnp.float8_e4m3fn, scale, amax_history, compute_dtype
)
return qin, (new_scale, new_history) | null |
22,634 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def in_qdq_bwd(compute_dtype, res, g):
new_scale, new_history = res
q_g = g
return q_g, new_scale, new_history | null |
22,635 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def out_qdq(compute_dtype, out, scale, amax_history):
return out | null |
22,636 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def out_qdq_fwd(compute_dtype, out, scale, amax_history):
return out, (scale, amax_history) | null |
22,637 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def qdq_and_return(x, q_dtype, scale, amax_history, compute_dtype):
is_fm32 = scale.dtype == fm32 and amax_history.dtype == fm32
# convert fm32->f32 so we can do math
if is_fm32:
amax_history = lax.convert_element_type(amax_history, jnp.float32)
scale = lax.convert_element_type(scale, jnp.float32)
dtype_max = get_fp8_max(q_dtype, jnp.float32)
amax_from_history = jnp.max(amax_history, axis=0)
new_scale = compute_scale(amax_from_history, scale, dtype_max)
qx = quantize_dequantize(x, q_dtype, new_scale, compute_dtype)
new_history = compute_amax_history(x, amax_history)
# convert f32->fm32 so the autodiff system accumulates fp8 meta correctly
if is_fm32:
new_history = lax.convert_element_type(new_history, fm32)
new_scale = lax.convert_element_type(new_scale, fm32)
return qx, new_scale, new_history
def out_qdq_bwd(compute_dtype, res, g):
scale, amax_history = res
q_g, new_scale, new_history = qdq_and_return(
g, jnp.float8_e5m2, scale, amax_history, compute_dtype
)
return q_g, new_scale, new_history | null |
22,638 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def dot_general_with_precision(
lhs, rhs, dimension_numbers, precision=None, preferred_element_type=None
):
if precision != None or preferred_element_type != None:
warnings.warn(
'The function dot_general_with_precision will set the '
'precision/preferred_element_type and disregard any provided '
'values.'
)
return lax.dot_general(
lhs, rhs, dimension_numbers, precision=lax.Precision.DEFAULT
) | null |
22,639 | import dataclasses
import numpy as np
import warnings
from functools import partial
from jax import custom_jvp, custom_vjp, lax, random
from jax import numpy as jnp
from jax._src import core
from jax._src import dtypes
from flax.linen import initializers, module
def dot_general_with_precision_jvp(
dimension_numbers, precision, preferred_element_type, primals, tangents
):
lhs, rhs = primals
lhs_dot, rhs_dot = tangents
out = lax.dot_general(
lhs, rhs, dimension_numbers, precision=lax.Precision.DEFAULT
)
grad_out = lax.dot_general(
lhs_dot, rhs, dimension_numbers, precision=lax.Precision.HIGHEST
) + lax.dot_general(
lhs, rhs_dot, dimension_numbers, precision=lax.Precision.HIGHEST
)
return out, grad_out | null |
22,640 | from functools import partial
from typing import (
Any,
Callable,
Dict,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import jax
import numpy as np
from absl import logging
from jax import numpy as jnp
from jax import random
from typing_extensions import Protocol
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import CollectionFilter, PRNGSequenceFilter
from flax.linen import initializers, transforms
from flax.linen.activation import sigmoid, tanh
from flax.linen.dtypes import promote_dtype
from flax.linen.linear import Conv, Dense, default_kernel_init
from flax.linen.module import Module, compact, nowrap
from flax.typing import (
Array,
PRNGKey,
Dtype,
InOutScanAxis,
Initializer,
PrecisionLike,
)
A = TypeVar('A')
def _select_last_carry(sequence: A, seq_lengths: jnp.ndarray) -> A:
last_idx = seq_lengths - 1
def _slice_array(x: jnp.ndarray):
return x[last_idx, jnp.arange(x.shape[1])]
return jax.tree_util.tree_map(_slice_array, sequence) | null |
22,641 | from functools import partial
from typing import (
Any,
Callable,
Dict,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import jax
import numpy as np
from absl import logging
from jax import numpy as jnp
from jax import random
from typing_extensions import Protocol
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import CollectionFilter, PRNGSequenceFilter
from flax.linen import initializers, transforms
from flax.linen.activation import sigmoid, tanh
from flax.linen.dtypes import promote_dtype
from flax.linen.linear import Conv, Dense, default_kernel_init
from flax.linen.module import Module, compact, nowrap
from flax.typing import (
Array,
PRNGKey,
Dtype,
InOutScanAxis,
Initializer,
PrecisionLike,
)
def _expand_dims_like(x, target):
"""Expands the shape of `x` to match `target`'s shape by adding singleton dimensions."""
return x.reshape(list(x.shape) + [1] * (target.ndim - x.ndim))
Array = Union[jax.Array, Any]
The provided code snippet includes necessary dependencies for implementing the `flip_sequences` function. Write a Python function `def flip_sequences( inputs: Array, seq_lengths: Optional[Array], num_batch_dims: int, time_major: bool, ) -> Array` to solve the following problem:
Flips a sequence of inputs along the time axis. This function can be used to prepare inputs for the reverse direction of a bidirectional LSTM. It solves the issue that, when naively flipping multiple padded sequences stored in a matrix, the first elements would be padding values for those sequences that were padded. This function keeps the padding at the end, while flipping the rest of the elements. Example: ```python inputs = [[1, 0, 0], [2, 3, 0] [4, 5, 6]] lengths = [1, 2, 3] flip_sequences(inputs, lengths) = [[1, 0, 0], [3, 2, 0], [6, 5, 4]] ``` Args: inputs: An array of input IDs <int>[batch_size, seq_length]. lengths: The length of each sequence <int>[batch_size]. Returns: An ndarray with the flipped inputs.
Here is the function:
def flip_sequences(
inputs: Array,
seq_lengths: Optional[Array],
num_batch_dims: int,
time_major: bool,
) -> Array:
"""Flips a sequence of inputs along the time axis.
This function can be used to prepare inputs for the reverse direction of a
bidirectional LSTM. It solves the issue that, when naively flipping multiple
padded sequences stored in a matrix, the first elements would be padding
values for those sequences that were padded. This function keeps the padding
at the end, while flipping the rest of the elements.
Example:
```python
inputs = [[1, 0, 0],
[2, 3, 0]
[4, 5, 6]]
lengths = [1, 2, 3]
flip_sequences(inputs, lengths) = [[1, 0, 0],
[3, 2, 0],
[6, 5, 4]]
```
Args:
inputs: An array of input IDs <int>[batch_size, seq_length].
lengths: The length of each sequence <int>[batch_size].
Returns:
An ndarray with the flipped inputs.
"""
# Compute the indices to put the inputs in flipped order as per above example.
time_axis = 0 if time_major else num_batch_dims
max_steps = inputs.shape[time_axis]
if seq_lengths is None:
# reverse inputs and return
inputs = jnp.flip(inputs, axis=time_axis)
return inputs
seq_lengths = jnp.expand_dims(seq_lengths, axis=time_axis)
# create indexes
idxs = jnp.arange(max_steps - 1, -1, -1) # [max_steps]
if time_major:
idxs = jnp.reshape(idxs, [max_steps] + [1] * num_batch_dims)
else:
idxs = jnp.reshape(
idxs, [1] * num_batch_dims + [max_steps]
) # [1, ..., max_steps]
idxs = (idxs + seq_lengths) % max_steps # [*batch, max_steps]
idxs = _expand_dims_like(
idxs, target=inputs
) # [*batch, max_steps, *features]
# Select the inputs in flipped order.
outputs = jnp.take_along_axis(inputs, idxs, axis=time_axis)
return outputs | Flips a sequence of inputs along the time axis. This function can be used to prepare inputs for the reverse direction of a bidirectional LSTM. It solves the issue that, when naively flipping multiple padded sequences stored in a matrix, the first elements would be padding values for those sequences that were padded. This function keeps the padding at the end, while flipping the rest of the elements. Example: ```python inputs = [[1, 0, 0], [2, 3, 0] [4, 5, 6]] lengths = [1, 2, 3] flip_sequences(inputs, lengths) = [[1, 0, 0], [3, 2, 0], [6, 5, 4]] ``` Args: inputs: An array of input IDs <int>[batch_size, seq_length]. lengths: The length of each sequence <int>[batch_size]. Returns: An ndarray with the flipped inputs. |
22,642 | from functools import partial
from typing import (
Any,
Callable,
Dict,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import jax
import numpy as np
from absl import logging
from jax import numpy as jnp
from jax import random
from typing_extensions import Protocol
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import CollectionFilter, PRNGSequenceFilter
from flax.linen import initializers, transforms
from flax.linen.activation import sigmoid, tanh
from flax.linen.dtypes import promote_dtype
from flax.linen.linear import Conv, Dense, default_kernel_init
from flax.linen.module import Module, compact, nowrap
from flax.typing import (
Array,
PRNGKey,
Dtype,
InOutScanAxis,
Initializer,
PrecisionLike,
)
Array = Union[jax.Array, Any]
The provided code snippet includes necessary dependencies for implementing the `_concatenate` function. Write a Python function `def _concatenate(a: Array, b: Array) -> Array` to solve the following problem:
Concatenates two arrays along the last dimension.
Here is the function:
def _concatenate(a: Array, b: Array) -> Array:
"""Concatenates two arrays along the last dimension."""
return jnp.concatenate([a, b], axis=-1) | Concatenates two arrays along the last dimension. |
22,643 | import dataclasses
from typing import Any, Callable, Iterable, Optional, Tuple, Sequence
import jax.numpy as jnp
from jax import lax
from flax import linen as nn
from flax.linen import initializers
from flax.linen.partitioning import param_with_axes, with_sharding_constraint
from flax.typing import (
Array,
Dtype,
Axes,
Initializer,
PrecisionLike,
DotGeneralT,
)
def _abs_sq(x):
"""Computes the elementwise square of the absolute value |x|^2."""
if jnp.iscomplexobj(x):
return lax.square(lax.real(x)) + lax.square(lax.imag(x))
else:
return lax.square(x)
Array = Union[jax.Array, Any]
Axes = Union[int, Sequence[int]]
The provided code snippet includes necessary dependencies for implementing the `_compute_stats` function. Write a Python function `def _compute_stats(x: Array, axes: Axes)` to solve the following problem:
Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for half precision inputs - mean and variance is computable in a single XLA fusion, by using Var = E[|x|^2] - |E[x]|^2 instead of Var = E[|x - E[x]|^2]). - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency.
Here is the function:
def _compute_stats(x: Array, axes: Axes):
"""Computes mean and variance statistics.
This implementation takes care of a few important details:
- Computes in float32 precision for half precision inputs
- mean and variance is computable in a single XLA fusion,
by using Var = E[|x|^2] - |E[x]|^2 instead of Var = E[|x - E[x]|^2]).
- Clips negative variances to zero which can happen due to
roundoff errors. This avoids downstream NaNs.
- Supports averaging across a parallel axis and subgroups of a parallel axis
with a single `lax.pmean` call to avoid latency.
"""
# promote x to at least float32, this avoids half precision computation
# but preserves double or complex floating points
x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
mean = jnp.mean(x, axes)
mean2 = jnp.mean(_abs_sq(x), axes)
# mean2 - _abs_sq(mean) is not guaranteed to be non-negative due
# to floating point round-off errors.
var = jnp.maximum(0.0, mean2 - _abs_sq(mean))
return mean, var | Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for half precision inputs - mean and variance is computable in a single XLA fusion, by using Var = E[|x|^2] - |E[x]|^2 instead of Var = E[|x - E[x]|^2]). - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency. |
22,644 | import dataclasses
from typing import Any, Callable, Iterable, Optional, Tuple, Sequence
import jax.numpy as jnp
from jax import lax
from flax import linen as nn
from flax.linen import initializers
from flax.linen.partitioning import param_with_axes, with_sharding_constraint
from flax.typing import (
Array,
Dtype,
Axes,
Initializer,
PrecisionLike,
DotGeneralT,
)
def _canonicalize_axes(rank: int, axes: Axes) -> Sequence[int]:
"""Returns a tuple of deduplicated, sorted, and positive axes."""
if not isinstance(axes, Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
def param_with_axes(
name: str,
init_fn,
*init_args,
axes: Optional[Tuple[str, ...]] = None,
module: Optional['nn.Module'] = None,
**init_kwargs,
):
"""Declares and returns a parameter with logical axes in the current Module.
See :mod:`flax.linen.module.param` for original docstring.
Args:
name: The parameter name.
init_fn: The function that will be called to compute the initial value
of this variable. This function will only be called the first time
this parameter is used in this module.
*init_args: The positional arguments to pass to init_fn.
axes: A tuple of axis names, must match the rank of the param array.
module: Use an explicit module instead of deriving the most recent from
dynamic module context.
**init_kwargs: The key-word arguments to pass to init_fn.
Returns:
The value of the initialized parameter.
Raises:
TypeError: if axes specification is mal-formed.
ValueError: if specified logical axes don't match parameter rank.
"""
# get current module if not explicitly provided
if module is None:
module = nn.module._context.module_stack[-1] # pylint: disable=protected-access
assert module is not None
# define/fetch parameter on that module
module_param = module.param(name, init_fn, *init_args, **init_kwargs)
if axes is not None:
# apply logical axis constraint immediately
module_param = with_sharding_constraint(
module_param, jax.sharding.PartitionSpec(*axes)
)
# record logical axis constraint for global axis metadata
module.sow(
'params_axes',
f'{name}_axes',
AxisMetadata(axes), # type: ignore
reduce_fn=_param_with_axes_sow_reduce_fn,
)
return module_param
Array = Union[jax.Array, Any]
Dtype = Union[jax.typing.DTypeLike, Any]
Initializer = Union[jax.nn.initializers.Initializer, Callable[..., Any]]
Axes = Union[int, Sequence[int]]
The provided code snippet includes necessary dependencies for implementing the `_normalize` function. Write a Python function `def _normalize( mdl: nn.Module, x: Array, mean: Array, var: Array, reduction_axes: Axes, feature_axes: Axes, dtype: Dtype, param_dtype: Dtype, epsilon: float, use_bias: bool, use_scale: bool, bias_init: Initializer, scale_init: Initializer, )` to solve the following problem:
"Normalizes the input of a normalization layer and optionally applies a learned scale and bias. A seperate bias and scale is learned for each feature as specified by feature_axes.
Here is the function:
def _normalize(
mdl: nn.Module,
x: Array,
mean: Array,
var: Array,
reduction_axes: Axes,
feature_axes: Axes,
dtype: Dtype,
param_dtype: Dtype,
epsilon: float,
use_bias: bool,
use_scale: bool,
bias_init: Initializer,
scale_init: Initializer,
):
""" "Normalizes the input of a normalization layer and optionally applies a learned scale and bias.
A seperate bias and scale is learned for each feature as specified by
feature_axes.
"""
reduction_axes = _canonicalize_axes(x.ndim, reduction_axes)
feature_axes = _canonicalize_axes(x.ndim, feature_axes)
stats_shape = list(x.shape)
for axis in reduction_axes:
stats_shape[axis] = 1
mean = mean.reshape(stats_shape)
var = var.reshape(stats_shape)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
y = x - mean
mul = lax.rsqrt(var + epsilon)
if use_scale:
scale = mdl.param_with_axes(
'scale', scale_init, reduced_feature_shape, param_dtype, axes=('embed',)
).reshape(feature_shape)
mul *= scale
y *= mul
if use_bias:
bias = mdl.param_with_axes(
'bias', bias_init, reduced_feature_shape, param_dtype, axes=('embed',)
).reshape(feature_shape)
y += bias
return jnp.asarray(y, dtype) | "Normalizes the input of a normalization layer and optionally applies a learned scale and bias. A seperate bias and scale is learned for each feature as specified by feature_axes. |
22,645 | import dataclasses
from typing import (
Any,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import jax
import jax.numpy as jnp
import numpy as np
from jax import eval_shape, lax
from jax.core import ShapedArray
import opt_einsum
from flax.core import meta
from flax.linen import initializers
from flax.linen.dtypes import promote_dtype
from flax.linen import module
from flax.linen.module import Module, compact
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
def _normalize_axes(axes: Tuple[int, ...], ndim: int) -> Tuple[int, ...]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple(sorted(ax if ax >= 0 else ndim + ax for ax in axes)) | null |
22,646 | import dataclasses
from typing import (
Any,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import jax
import jax.numpy as jnp
import numpy as np
from jax import eval_shape, lax
from jax.core import ShapedArray
import opt_einsum
from flax.core import meta
from flax.linen import initializers
from flax.linen.dtypes import promote_dtype
from flax.linen import module
from flax.linen.module import Module, compact
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
def _canonicalize_tuple(x: Union[Sequence[int], int]) -> Tuple[int, ...]:
if isinstance(x, Iterable):
return tuple(x)
else:
return (x,) | null |
22,647 | import dataclasses
from typing import (
Any,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import jax
import jax.numpy as jnp
import numpy as np
from jax import eval_shape, lax
from jax.core import ShapedArray
import opt_einsum
from flax.core import meta
from flax.linen import initializers
from flax.linen.dtypes import promote_dtype
from flax.linen import module
from flax.linen.module import Module, compact
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
The provided code snippet includes necessary dependencies for implementing the `_conv_dimension_numbers` function. Write a Python function `def _conv_dimension_numbers(input_shape)` to solve the following problem:
Computes the dimension numbers based on the input shape.
Here is the function:
def _conv_dimension_numbers(input_shape):
"""Computes the dimension numbers based on the input shape."""
ndim = len(input_shape)
lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1))
rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2))
out_spec = lhs_spec
return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec) | Computes the dimension numbers based on the input shape. |
22,648 | import dataclasses
from typing import (
Any,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import jax
import jax.numpy as jnp
import numpy as np
from jax import eval_shape, lax
from jax.core import ShapedArray
import opt_einsum
from flax.core import meta
from flax.linen import initializers
from flax.linen.dtypes import promote_dtype
from flax.linen import module
from flax.linen.module import Module, compact
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
The provided code snippet includes necessary dependencies for implementing the `canonicalize_padding` function. Write a Python function `def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding` to solve the following problem:
"Canonicalizes conv padding to a jax.lax supported format.
Here is the function:
def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding:
""" "Canonicalizes conv padding to a jax.lax supported format."""
if isinstance(padding, str):
return padding
if isinstance(padding, int):
return [(padding, padding)] * rank
if isinstance(padding, Sequence) and len(padding) == rank:
new_pad = []
for p in padding:
if isinstance(p, int):
new_pad.append((p, p))
elif isinstance(p, tuple) and len(p) == 2:
new_pad.append(p)
else:
break
if len(new_pad) == rank:
return new_pad
raise ValueError(
f'Invalid padding format: {padding}, should be str, int,'
f' or a sequence of len {rank} where each element is an'
' int or pair of ints.'
) | "Canonicalizes conv padding to a jax.lax supported format. |
22,649 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
def _indent(x: str, num_spaces: int):
indent_str = ' ' * num_spaces
lines = x.split('\n')
# skip last line because it is always empty and should not be indented.
assert not lines[-1]
return '\n'.join(indent_str + line for line in lines[:-1]) + '\n'
def _attr_repr(value: Any):
if callable(value) and (
(isinstance(value, nn.Module) and value.__dict__.get('__name__', None))
or (not isinstance(value, nn.Module) and getattr(value, '__name__', None))
):
value_rep = value.__name__
else:
value_rep = repr(value)
return value_rep
class Module(ModuleBase):
"""Base class for all neural network modules.
Layers and models should subclass this class.
All Flax Modules are Python 3.7
`dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since
dataclasses take over ``__init__``, you should instead override :meth:`setup`,
which is automatically called to initialize the module.
Modules can contain submodules, and in this way can be nested in a tree
structure. Submodels can be assigned as regular attributes inside the
:meth:`setup` method.
You can define arbitrary "forward pass" methods on your Module subclass.
While no methods are special-cased, ``__call__`` is a popular choice because
it allows you to use module instances as if they are functions::
>>> from flax import linen as nn
>>> from typing import Tuple
>>> class Module(nn.Module):
... features: Tuple[int, ...] = (16, 4)
... def setup(self):
... self.dense1 = nn.Dense(self.features[0])
... self.dense2 = nn.Dense(self.features[1])
... def __call__(self, x):
... return self.dense2(nn.relu(self.dense1(x)))
Optionally, for more concise module implementations where submodules
definitions are co-located with their usage, you can use the
:meth:`compact` wrapper.
"""
if typing.TYPE_CHECKING:
name: Optional[str] = module_field(kw_only=True, default=None)
parent: Union['Module', _Sentinel, None] = module_field(
kw_only=True, default=None
)
def __init__(self, *args, **kwargs):
# this stub makes sure pytype accepts constructor arguments.
pass
def __call__(self, *args, **kwargs) -> Any:
# this stub allows pytype to accept Modules as Callables.
pass
def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None:
"""Automatically initializes all subclasses as custom dataclasses."""
super().__init_subclass__(**kwargs)
# All Flax Modules are dataclasses. We force this convention since
# it encourages the stateless behavior needed to clone module instances for
# functional transformation. Instead of using a python metaclass, we
# automatically transform Modules into dataclasses at subclass creation
# time, and we set the last dataclass arguments to `parent` and `name`.
cls._customized_dataclass_transform(kw_only)
# We wrap user-defined methods including setup and __call__ to enforce
# a number of different checks and to provide clear error messages.
cls._verify_single_or_no_compact()
cls._find_compact_name_scope_methods()
cls._wrap_module_attributes()
# Set empty class defaults.
cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined]
cls.scope: Optional[Scope] = None # type: ignore
# Handles weak referencing of parent Modules to prevent reference cycles.
cls._parent_ref = None # type: ignore[attr-defined]
cls.parent = ParentDescriptor() # type: ignore[assignment]
def _customized_dataclass_transform(cls, kw_only: bool):
"""Transforms `cls` into a dataclass, with custom additional behavior.
1. Inject `parent` and `name` fields. (If they are already present,
then check that they have the expected types.)
2. Set compare, hash, and repr to False for non-init fields.
3. Generate a hash function (if not provided by cls).
"""
# Check reserved attributes have expected type annotations.
annotations = dict(cls.__dict__.get('__annotations__', {}))
if annotations.get('parent', _ParentType) != _ParentType:
raise errors.ReservedModuleAttributeError(annotations)
if annotations.get('name', str) not in ('str', str, Optional[str]):
raise errors.ReservedModuleAttributeError(annotations)
# any non-init field will only be set in setup
# During __hash__ and __eq__ the field is not set yet
# so it should not be used in compare, hash or repr.
for field in annotations:
field_meta = getattr(cls, field, None)
if isinstance(field_meta, dataclasses.Field) and not field_meta.init:
field_meta.compare = False
field_meta.hash = False
field_meta.repr = False
extra_fields = [
(
'parent',
_ParentType,
kw_only_dataclasses.field(
repr=False, default=_unspecified_parent, kw_only=True
),
),
(
'name',
Optional[str],
kw_only_dataclasses.field(default=None, kw_only=True),
),
]
if kw_only:
if tuple(sys.version_info)[:3] >= (3, 10, 0):
for (
name,
annotation, # pytype: disable=invalid-annotation
default,
) in extra_fields:
setattr(cls, name, default)
cls.__annotations__[name] = annotation
dataclasses.dataclass( # type: ignore[call-overload]
unsafe_hash='__hash__' not in cls.__dict__,
repr=False,
kw_only=True,
)(cls)
else:
raise TypeError('`kw_only` is not available before Py 3.10.')
else:
# Now apply dataclass transform (which operates in-place).
# Do generate a hash function only if not provided by the class.
kw_only_dataclasses.dataclass(
cls,
unsafe_hash='__hash__' not in cls.__dict__,
repr=False,
extra_fields=extra_fields,
) # pytype: disable=wrong-keyword-args
cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign]
def _verify_single_or_no_compact(cls):
"""Statically verifies that at most a single method is labelled compact."""
methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]
n_compact_fns = len(
[
method_name
for method_name in methods
if hasattr(getattr(cls, method_name), 'compact')
]
)
if n_compact_fns > 1:
raise errors.MultipleMethodsCompactError()
def _find_compact_name_scope_methods(cls):
"""Finds all compact_name_scope methods in the class."""
methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]
compact_name_scope_fns = tuple(
method_name
for method_name in methods
if hasattr(getattr(cls, method_name), 'compact_name_scope')
)
cls._compact_name_scope_methods = compact_name_scope_fns
def _wrap_module_attributes(cls):
"""Wraps user-defined non-inherited methods and descriptors with state
management functions.
"""
# wrap methods
method_exclusions = [f.name for f in dataclasses.fields(cls)] + [
'__eq__',
'__repr__',
'__init__',
'__hash__',
'__post_init__',
]
for key in _get_local_method_names(cls, exclude=method_exclusions):
method = getattr(cls, key)
if hasattr(method, 'nowrap'):
continue
setattr(cls, key, wrap_method_once(method))
# wrap descriptors
descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [
'parent',
'__dict__',
]
for key in _get_local_descriptor_names(cls, descriptor_exclusions):
# don't use getattr here, since it will call the descriptor
descriptor = cls.__dict__[key]
if hasattr(descriptor, 'nowrap'):
continue
setattr(cls, key, wrap_descriptor_once(descriptor))
return cls
def _call_wrapped_method(self, fun, args, kwargs):
"""Calls a wrapped method.
This function is responsible for setting up the thread local state
correctly before calling the method and cleaning up afterwards.
This includes storing intermediates, setup of the compact scope,
and making sure setup is called before any other method.
Args:
fun: The wrapped method.
args: Named arguments passed to ``fun``.
kwargs: Keyword arguments passed to ``fun``.
Returns:
The results of calling ``fun``.
"""
is_compact_method = hasattr(fun, 'compact')
fun_name = _get_fn_name(fun)
is_setup_method = fun_name == 'setup'
add_call_info = not is_setup_method and len(_context.call_info_stack) > 0
# We lazily call setup() only when needed.
if is_setup_method:
if self.scope is None:
raise errors.CallSetupUnboundModuleError()
is_recurrent = self._state.in_setup
self._state.in_setup = True
else:
self._try_setup()
if is_compact_method:
if self.scope is None:
raise errors.CallCompactUnboundModuleError()
is_recurrent = self._state.in_compact_method
self._state.in_compact_method = True
_context.module_stack.append(self)
try:
# get call info
if add_call_info:
assert self.scope is not None
call_index = _context.call_info_stack[-1].get_call_index()
if _global_interceptor_stack:
run_fun = functools.partial(run_interceptors, fun)
else:
run_fun = fun
# call method
if _use_named_call:
with jax.named_scope(_derive_profiling_name(self, fun)):
y = run_fun(self, *args, **kwargs)
else:
y = run_fun(self, *args, **kwargs)
if _context.capture_stack:
filter_fn = _context.capture_stack[-1]
if filter_fn and filter_fn(self, fun_name):
self.sow('intermediates', fun_name, y)
if add_call_info:
_args, _kwargs, _y = flax.linen.summary._represent_tree(
(args, kwargs, y)
)
_context.call_info_stack[-1].calls.append(
_CallInfo(
call_index,
self.path,
self.clone(),
self.scope.rngs,
self.scope.mutable,
fun.__name__,
_args,
_kwargs,
_y,
)
)
return y
finally:
_context.module_stack.pop()
if is_compact_method:
object.__setattr__(self, 'scope', self.scope.rewound())
# setup or compact calls can be recurrent for example due to super calls
# resetting the state would cause is compact/setup method
# to be set to False prematurely.
if (is_compact_method or is_setup_method) and not is_recurrent:
self._state.reset()
def __setattr__(self, name: str, val: Any):
"""Sets an attribute on this Module.
We overload setattr solely to support pythonic naming via assignment of
submodules in the special :meth:`setup` function::
self.submodule_name = MyModule(...)
We also support lists and other general pytrees, e.g.::
self.submodules = [MyModule0(..), MyModule1(..), ...]
Args:
name: Attribute to set.
val: Value of the attribute.
"""
fields = self.__dataclass_fields__ # pytype: disable=attribute-error
is_dataclass_attr = name in fields and fields[name].init
if not self._state.in_setup:
if not self._state.is_initialized:
# Setting attributes before end of Module.__post_init__()
object.__setattr__(self, name, val)
return
else:
# We're past all initialization and setup logic:
# Raises a TypeError just like frozen python dataclasses.
raise errors.SetAttributeFrozenModuleError(
self.__class__.__name__, name, val
)
# We're inside the setup() method:
if is_dataclass_attr:
# These names are specified as dataclass fields. They should not be
# initialized within the setup() method, but can be modified freely
# before it.
raise errors.SetAttributeInModuleSetupError()
# Values (that may be variables or submodules) are being defined and
# attached in setup(), we run some extra logic in that case.
self._register_submodules(name, val)
def __getattr__(self, name: str) -> Any:
"""Call setup() before getting any setup-defined attributes."""
# We don't want to return anything for python copy / pickle methods.
if name in _UNDEFINED_COPY_PICKLE_METHODS:
raise AttributeError()
self._try_setup()
if name in self.__dict__:
return self.__dict__[name]
else:
msg = f'"{self.__class__.__name__}" object has no attribute "{name}".'
if self.scope is None:
msg += (
f' If "{name}" is defined in \'.setup()\', remember these fields '
"are only accessible from inside 'init' or 'apply'."
)
raise AttributeError(msg)
def __dir__(self) -> List[str]:
"""Call setup() before listing attributes."""
self._try_setup()
return object.__dir__(self) # type: ignore
def __post_init__(self) -> None:
# DO NOT REMOVE - Marker for internal logging.
# In dataclasses, __init__ is overridden to process dataclass arguments,
# and __post_init__ is called immediately afterwards. Here, depending on the
# type of `parent` passed to initialize the Module, we either defer
# initialization, attach this Module as a submodule of a parent, or bind
# this Module at the top-level to variables and rngs.
object.__setattr__(self, '_id', uuid())
object.__setattr__(self, '_state', _ModuleInternalState())
# Typically we set the parent based on the dynamic module context.
if self.parent is _unspecified_parent: # pytype: disable=attribute-error
object.__setattr__(self, 'parent', _context.module_stack[-1])
# Initialization is deferred for top level Modules or any other "orphan"
# Modules until attachment by __setattr__ i.e. MyModule(..., parent=None)
if self.parent is None:
return
# Register submodule on parent Module.
if isinstance(self.parent, Module):
# When initializing an unnamed Module inside setup()
# initialization is deferred until attachment by __setattr__
# i.e. self.mymodule = MyModule(...)
self.name: Optional[str]
if (
self.parent._state.in_setup and self.name is None
): # pytype: disable=attribute-error
return
if not self.parent._initialization_allowed:
raise errors.AssignSubModuleError(self.__class__.__name__)
# Autonaming of submodules.
if self.name is None: # pytype: disable=attribute-error
prefix = f'{self.__class__.__name__}'
cursor = self.parent._state.autoname_cursor.get(prefix, 0)
self.name = f'{prefix}_{cursor}'
self.parent._state.autoname_cursor[prefix] = cursor + 1
# Allow scope aliasing under transforms for submodules defined in setup.
reuse_scopes = (
self.parent._state.in_setup
and self.parent._state.setup_called == SetupState.TRANSFORMED
)
# Perform name-collision check.
if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes):
parent_class = self.parent.__class__.__name__
raise errors.NameInUseError('submodule', self.name, parent_class)
# Finalize attachment to parent and scope initialization.
self.parent._state.children[self.name] = self
assert self.parent.scope is not None
object.__setattr__(
self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes)
)
# Top-level invocation with a functional Scope.
elif isinstance(self.parent, Scope):
object.__setattr__(self, 'scope', self.parent)
else:
raise ValueError('parent must be None, Module or Scope')
# eagerly bind submodules if scope is available
if self.scope is not None:
for field in dataclasses.fields(self):
if field.name not in ('parent', 'name') and field.init:
self._register_submodules(field.name, getattr(self, field.name))
self._state.is_initialized = True
def __repr__(self) -> str:
return _module_repr(self)
def setup(self) -> None:
"""Initializes a Module lazily (similar to a lazy ``__init__``).
``setup`` is called once lazily on a module instance when a module
is bound, immediately before any other methods like ``__call__`` are
invoked, or before a ``setup``-defined attribute on ``self`` is accessed.
This can happen in three cases:
1. Immediately when invoking :meth:`apply`, :meth:`init` or
:meth:`init_and_output`.
2. Once the module is given a name by being assigned to an attribute of
another module inside the other module's ``setup`` method
(see :meth:`__setattr__`)::
>>> class MyModule(nn.Module):
... def setup(self):
... submodule = nn.Conv(...)
... # Accessing `submodule` attributes does not yet work here.
... # The following line invokes `self.__setattr__`, which gives
... # `submodule` the name "conv1".
... self.conv1 = submodule
... # Accessing `submodule` attributes or methods is now safe and
... # either causes setup() to be called once.
3. Once a module is constructed inside a method wrapped with
:meth:`compact`, immediately before another method is called or
``setup`` defined attribute is accessed.
"""
pass
def _register_submodules(self, name, val):
"""Registers a submodule."""
assert self.scope, 'Trying to register submodules on unbound scope.'
root = self.scope.root
cache = _caches.get(root, weakref.WeakValueDictionary())
_caches[root] = cache
queue = []
preserve_adopted_names = config.flax_preserve_adopted_names
if hasattr(type(self), 'preserve_adopted_names'):
preserve_adopted_names = type(self).preserve_adopted_names
def adopt_attr_modules(cache, queue, suffix, subvalue):
if isinstance(subvalue, Module):
current_name = subvalue.name
adopted_name = None
if subvalue.parent is None:
# Preserve sharing-by-reference relationships during adoption
# via cache keyed on unique instance ids.
key = subvalue._id
# Module was passed from outside. It needs to be cloned.
# Outside modules are named by attachment, not an outer name,
# UNLESS we're using new adopted name policy, in which case an existing
# name will be used, as is often supplied by config systems.
if preserve_adopted_names:
adopted_name = object.__getattribute__(subvalue, 'name')
if key in cache:
subvalue = cache[key]
else:
subvalue = subvalue.clone(name=None)
cache[key] = subvalue
if subvalue.name is None:
object.__setattr__(subvalue, 'parent', self)
if adopted_name is None:
adopted_name = (
f'{name}{suffix}'
if not isinstance(subvalue, CompactNameScope)
else current_name
)
object.__setattr__(subvalue, 'name', adopted_name)
queue.append(subvalue)
return subvalue
val = _freeze_attr(
_map_over_modules_in_tree(
functools.partial(adopt_attr_modules, cache, queue), val
)
)
object.__setattr__(self, name, val)
for x in queue:
x.__post_init__()
def _try_setup(self, shallow: bool = False) -> None:
"""Tries to setup module if scope is available and setup has not been called yet."""
if (
self.scope
and not self._state.in_setup
and self._state.setup_called != SetupState.DONE
):
try:
self._state.in_setup = True
# A shallow setup will only register attribute submodules but it does
# not call the user's setup. This avoids running before a
# transformation.
for field in dataclasses.fields(self):
if field.name not in ('parent', 'name') and field.init:
self._register_submodules(field.name, getattr(self, field.name))
if not shallow:
self.setup()
# create NonTransparent Modules
self._compact_name_scope_modules = {
name: CompactNameScope(
getattr(type(self), name).inner_fun, lambda: self, name=name
)
for name in self._compact_name_scope_methods
}
# We run static checks abstractly once for setup before any transforms
# to detect name collisions and other python errors.
elif self._state.setup_called == SetupState.NEW:
self._validate_setup()
finally:
self._state.in_setup = False
if not shallow:
self._state.setup_called = SetupState.DONE
def _validate_setup(self) -> None:
"""Abstractly evaluates setup only to run static checks."""
def run_setup_only(x):
wrapped_id = wrap_method_once(lambda m, x: x)
with TestScope({}, rngs={}, mutable=True).temporary() as root:
return wrapped_id(self.clone(parent=root), x)
_ = jax.eval_shape(run_setup_only, 0)
def _name_taken(
self,
name: str,
reuse_scopes: bool = False,
collection: Optional[str] = None,
) -> bool:
assert self.scope is not None
if reuse_scopes:
return False
return self.scope.name_reserved(name, collection)
def _initialization_allowed(self):
return (
not self._state.is_initialized # allow eager attachment in post-init
or self._state.in_setup
or self._state.in_compact_method
)
def path(self):
if self.scope is None:
raise ValueError("Can't access module paths on unbound modules.")
return self.scope.path
def clone(
self: M,
*,
parent: Optional[Union[Scope, 'Module', _Sentinel]] = None,
_deep_clone: Union[bool, weakref.WeakValueDictionary] = False,
_reset_names: bool = False,
**updates,
) -> M:
"""Creates a clone of this Module, with optionally updated arguments.
NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used
primarily for internal routines, and ``copy`` offers simpler arguments and
better defaults.
Args:
parent: The parent of the clone. The clone will have no parent if no
explicit parent is specified.
_deep_clone: A boolean or a weak value dictionary to control deep cloning
of submodules. If True, submodules will be cloned recursively. If a weak
value dictionary is passed, it will be used to cache cloned submodules.
This flag is used by init/apply/bind to avoid scope leakage.
_reset_names: If True, ``name=None`` is also passed to submodules when
cloning. Resetting names in submodules is necessary when calling ``.unbind``.
**updates: Attribute updates.
Returns:
A clone of the this Module with the updated attributes and parent.
"""
attrs = {
f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init
}
attrs.update(parent=parent, **updates)
# Here we implement deep cloning of submodules, this is necessary to avoid scope leakage
# from external submodules into init/apply/bind while preserving sharing-by-reference
# relationships between submodules.
if _deep_clone != False:
# We use a weak value dictionary to cache cloned submodules. When a shared
# submodule is cloned, its only cloned once else its fetched from the cache.
cache = (
weakref.WeakValueDictionary()
if isinstance(_deep_clone, bool)
else _deep_clone
)
def clone_fn(m: Module) -> Module:
if hasattr(m, '_id'):
key = m._id
if key in cache:
return cache[key]
else:
if _reset_names:
clone = m.clone(
_deep_clone=cache, _reset_names=_reset_names, name=None
)
else:
clone = m.clone(_deep_clone=cache)
cache[key] = clone
return clone
else:
# If the module doesn't have an _id attribute it could be a mock object
# so we return it as is.
return m
# _map_submodules will map over all submodules inside attrs
# value here can be any pytree, non-module values are ignored
for field_name, value in attrs.items():
if field_name == 'parent':
continue
attrs[field_name] = _map_submodules(clone_fn, value)
module = self.__class__(**attrs)
return module
def copy(
self: M,
*,
parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent,
name: Optional[str] = None,
**updates,
) -> M:
"""Creates a copy of this Module, with optionally updated arguments.
Args:
parent: The parent of the copy. By default the current module is taken
as parent if not explicitly specified.
name: A new name for the copied Module, by default a new automatic name
will be given.
**updates: Attribute updates.
Returns:
A copy of the this Module with the updated name, parent, and attributes.
"""
return self.clone(
parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates
)
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> Variable[meta.AxisMetadata[T]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
"""Declares and returns a variable in this Module.
See :mod:`flax.core.variables` for more information. See also :meth:`param`
for a shorthand way to define read-only variables in the "params"
collection.
Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be
passed on explicitly::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(4)(x)
... key = self.make_rng('stats')
... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape)
... ...
... return x * mean.value
>>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}}
In the example above, the function ``lecun_normal`` expects two arguments:
``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats``
has to be provided explicitly when calling :meth:`init` and :meth:`apply`.
Args:
col: The variable collection name.
name: The variable name.
init_fn: The function that will be called to compute the initial value of
this variable. This function will only be called the first time this
variable is used in this module. If None, the variable must already be
initialized otherwise an error is raised.
*init_args: The positional arguments to pass to init_fn.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: The key-word arguments to pass to init_fn
Returns:
A :class:`flax.core.variables.Variable` that can be read or set via
".value" attribute. Throws an error if the variable exists already.
"""
if not self._initialization_allowed:
raise ValueError(
'Variables must be initialized in `setup()` or in a method '
'wrapped in `@compact`'
)
if self._name_taken(name, collection=col):
raise errors.NameInUseError('variable', name, self.__class__.__name__)
assert self.scope is not None
v = self.scope.variable(
col, name, init_fn, *init_args, unbox=unbox, **init_kwargs
)
self._state.children[name] = col
return v
def param(
self, name: str, init_fn: Callable[..., T], *init_args,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> meta.AxisMetadata[T]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
"""Declares and returns a parameter in this Module.
Parameters are read-only variables in the collection named "params". See
:mod:`flax.core.variables` for more details on variables.
The first argument of ``init_fn`` is assumed to be a PRNG key, which is
provided automatically and does not have to be passed using ``init_args``
or ``init_kwargs``::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(4)(x)
... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape)
... ...
... return x * mean
>>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}}
In the example above, the function ``lecun_normal`` expects two arguments:
``key`` and ``shape``, but only ``shape`` has to be provided explicitly;
``key`` is set automatically using the PRNG for ``params`` that is passed
when initializing the module using :meth:`init`.
Args:
name: The parameter name.
init_fn: The function that will be called to compute the initial value of
this variable. This function will only be called the first time this
parameter is used in this module.
*init_args: The positional arguments to pass to init_fn.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: The key-word arguments to pass to init_fn.
Returns:
The value of the initialized parameter. Throws an error if the parameter
exists already.
"""
if not self._initialization_allowed:
raise ValueError(
'Parameters must be initialized in `setup()` or in a method '
'wrapped in `@compact`'
)
if self._name_taken(name, collection='params'):
raise errors.NameInUseError('param', name, self.__class__.__name__)
assert self.scope is not None
v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs)
self._state.children[name] = 'params'
return v
def has_variable(self, col: str, name: str) -> bool:
"""Checks if a variable of given collection and name exists in this Module.
See :mod:`flax.core.variables` for more explanation on variables and
collections.
Args:
col: The variable collection name.
name: The name of the variable.
Returns:
True if the variable exists.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.has_variable(col, name)
def is_mutable_collection(self, col: str) -> bool:
"""Returns true if the collection ``col`` is mutable."""
if self.scope is None:
raise ValueError("Can't check mutability on unbound modules")
return self.scope.is_mutable_collection(col)
def has_rng(self, name: str) -> bool:
"""Returns true if a PRNGSequence with name ``name`` exists."""
if self.scope is None:
raise ValueError("Can't query for RNGs on unbound modules")
return self.scope.has_rng(name)
def make_rng(self, name: str = 'params') -> PRNGKey:
"""Returns a new RNG key from a given RNG sequence for this Module.
The new RNG key is split from the previous one. Thus, every call to
``make_rng`` returns a new RNG key, while still guaranteeing full
reproducibility.
.. note::
If an invalid name is passed (i.e. no RNG key was passed by
the user in ``.init`` or ``.apply`` for this name), then ``name``
will default to ``'params'``.
Example::
>>> import jax
>>> import flax.linen as nn
>>> class ParamsModule(nn.Module):
... def __call__(self):
... return self.make_rng('params')
>>> class OtherModule(nn.Module):
... def __call__(self):
... return self.make_rng('other')
>>> key = jax.random.key(0)
>>> params_out, _ = ParamsModule().init_with_output({'params': key})
>>> # self.make_rng('other') will default to using the 'params' RNG stream
>>> other_out, _ = OtherModule().init_with_output({'params': key})
>>> assert params_out == other_out
Learn more about RNG's by reading the Flax RNG guide:
https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html
Args:
name: The RNG sequence name.
Returns:
The newly generated RNG key.
"""
if self.scope is None:
raise ValueError("Can't use RNGs on unbound modules")
return self.scope.make_rng(name)
def is_initializing(self) -> bool:
"""Returns True if running under self.init(...) or nn.init(...)().
This is a helper method to handle the common case of simple initialization
where we wish to have setup logic occur when only called under
``module.init`` or ``nn.init``. For more complicated multi-phase
initialization scenarios it is better to test for the mutability of
particular variable collections or for the presence of particular
variables that potentially need to be initialized.
"""
if self.scope is None:
raise ValueError("Can't check if running under init() on unbound modules")
return self.scope.get_flag('initializing', False)
def _module_checks(self):
"""Run standard runtime checks."""
if not isinstance(self, Module):
raise errors.InvalidInstanceModuleError()
overridden_post_init = self.__post_init__ != Module.__post_init__
if overridden_post_init and not hasattr(self, '_id'):
raise errors.IncorrectPostInitOverrideError()
def bind(
self: M,
variables: VariableDict,
*args,
rngs: Optional[RNGSequences] = None,
mutable: CollectionFilter = False,
) -> M:
"""Creates an interactive Module instance by binding variables and RNGs.
``bind`` provides an "interactive" instance of a Module directly without
transforming a function with ``apply``. This is particularly useful for
debugging and interactive use cases like notebooks where a function would
limit the ability to split up code into different cells.
Once the variables (and optionally RNGs) are bound to a ``Module`` it
becomes a stateful object. Note that idiomatic JAX is functional and
therefore an interactive instance does not mix well with vanilla JAX APIs.
``bind()`` should only be used for interactive experimentation, and in all
other cases we strongly encourage users to use ``apply()`` instead.
Example::
>>> import jax
>>> import jax.numpy as jnp
>>> import flax.linen as nn
>>> class AutoEncoder(nn.Module):
... def setup(self):
... self.encoder = nn.Dense(3)
... self.decoder = nn.Dense(5)
...
... def __call__(self, x):
... return self.decoder(self.encoder(x))
>>> x = jnp.ones((16, 9))
>>> ae = AutoEncoder()
>>> variables = ae.init(jax.random.key(0), x)
>>> model = ae.bind(variables)
>>> z = model.encoder(x)
>>> x_reconstructed = model.decoder(z)
Args:
variables: A dictionary containing variables keyed by variable
collections. See :mod:`flax.core.variables` for more details about
variables.
*args: Named arguments (not used).
rngs: a dict of PRNGKeys to initialize the PRNG sequences.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections.
Returns:
A copy of this instance with bound variables and RNGs.
"""
Module._module_checks(self)
del args
scope = core.bind(variables, rngs=rngs, mutable=mutable)
return self.clone(parent=scope, _deep_clone=True)
def unbind(self: M) -> Tuple[M, VariableDict]:
"""Returns an unbound copy of a Module and its variables.
``unbind`` helps create a stateless version of a bound Module.
An example of a common use case: to extract a sub-Module defined inside
``setup()`` and its corresponding variables: 1) temporarily ``bind`` the
parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that
``setup()`` is only called when the Module is bound.)::
>>> class Encoder(nn.Module):
... @nn.compact
... def __call__(self, x):
... ...
... return nn.Dense(256)(x)
>>> class Decoder(nn.Module):
... @nn.compact
... def __call__(self, x):
... ...
... return nn.Dense(784)(x)
>>> class AutoEncoder(nn.Module):
... def setup(self):
... self.encoder = Encoder()
... self.decoder = Decoder()
...
... def __call__(self, x):
... return self.decoder(self.encoder(x))
>>> module = AutoEncoder()
>>> variables = module.init(jax.random.key(0), jnp.ones((1, 784)))
>>> # Extract the Encoder sub-Module and its variables
>>> encoder, encoder_vars = module.bind(variables).encoder.unbind()
Returns:
A tuple with an unbound copy of this Module and its variables.
"""
Module._module_checks(self)
if self.scope is None:
raise errors.CallUnbindOnUnboundModuleError()
variables = self.variables
module = self.clone(_deep_clone=True, _reset_names=True, name=None)
return module, variables
def apply(
self,
variables: VariableDict,
*args,
rngs: Optional[Union[PRNGKey, RNGSequences]] = None,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = False,
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]:
"""Applies a module method to variables and returns output and modified variables.
Note that ``method`` should be set if one would like to call ``apply`` on a
different class method than ``__call__``. For instance, suppose a
Transformer modules has a method called ``encode``, then the following calls
``apply`` on that method::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> import numpy as np
>>> class Transformer(nn.Module):
... def encode(self, x):
... ...
>>> x = jnp.ones((16, 9))
>>> model = Transformer()
>>> variables = model.init(jax.random.key(0), x, method=Transformer.encode)
>>> encoded = model.apply(variables, x, method=Transformer.encode)
If a function instance is provided, the unbound function is used. For
instance, the example below is equivalent to the one above::
>>> encoded = model.apply(variables, x, method=model.encode)
You can also pass a string to a callable attribute of the module. For
example, the previous can be written as::
>>> encoded = model.apply(variables, x, method='encode')
Note ``method`` can also be a function that is not defined in
``Transformer``. In that case, the function should have at least one
argument representing an instance of the Module class::
>>> def other_fn(instance, x):
... # instance.some_module_attr(...)
... instance.encode
... ...
>>> model.apply(variables, x, method=other_fn)
If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'``
RNG stream. If you want to use a different RNG stream or need to use
multiple streams, you can pass a dictionary mapping each RNG stream name
to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)``
is called on an RNG stream name that isn't passed by the user, it will
default to using the ``'params'`` RNG stream.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, add_noise=False):
... x = nn.Dense(16)(x)
... x = nn.relu(x)
...
... if add_noise:
... # Add gaussian noise
... noise_key = self.make_rng('noise')
... x = x + jax.random.normal(noise_key, x.shape)
...
... return nn.Dense(1)(x)
>>> x = jnp.empty((1, 7))
>>> module = Foo()
>>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)}
>>> variables = module.init(rngs, x)
>>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> rngs['noise'] = jax.random.key(0)
>>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> # different output (key(1) vs key(0))
>>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1)
>>> del rngs['noise']
>>> # self.make_rng('noise') will default to using the 'params' RNG stream
>>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> # same output (key(0))
>>> np.testing.assert_allclose(out1, out2)
>>> # passing in a single key is equivalent to passing in {'params': key}
>>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0))
>>> # same output (key(0))
>>> np.testing.assert_allclose(out2, out3)
Args:
variables: A dictionary containing variables keyed by variable
collections. See :mod:`flax.core.variables` for more details about
variables.
*args: Named arguments passed to the specified apply method.
rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params"
PRNG sequence is used to initialize parameters.
method: A function to call apply on. This is generally a function in the
module. If provided, applies this method. If not provided, applies the
``__call__`` method of the module. A string can also be provided to
specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default, only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the specified apply method.
Returns:
If ``mutable`` is False, returns output. If any collections are
mutable, returns ``(output, vars)``, where ``vars`` are is a dict
of the modified collections.
"""
Module._module_checks(self)
if rngs is not None and not isinstance(rngs, dict):
if not core.scope._is_valid_rng(rngs):
raise errors.InvalidRngError(
'RNGs should be of shape (2,) or PRNGKey in Module '
f'{self.__class__.__name__}, but rngs are: {rngs}'
)
rngs = {'params': rngs}
if isinstance(method, str):
attribute_name = method
method = getattr(self, attribute_name)
if not callable(method):
class_name = type(self).__name__
raise TypeError(
f"'{class_name}.{attribute_name}' must be a callable, got"
f' {type(method)}.'
)
# if the `method` string is a submodule, we create a lambda function
# that calls the submodule, forwarding all arguments.
if isinstance(method, Module):
method = lambda self, *args, **kwargs: getattr(self, attribute_name)(
*args, **kwargs
)
elif method is None:
method = self.__call__
method = _get_unbound_fn(method)
return apply(
method,
self,
mutable=mutable,
capture_intermediates=capture_intermediates,
)(variables, *args, **kwargs, rngs=rngs)
def init_with_output(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = DenyList('intermediates'),
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]:
"""Initializes a module method with variables and returns output and modified variables.
Args:
rngs: The rngs for the variable collections.
*args: Named arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method. A string can also be
provided to specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default, all collections except "intermediates"
are mutable.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the init function.
Returns:
``(output, vars)``, where ``vars`` are is a dict of the modified
collections.
"""
Module._module_checks(self)
if not isinstance(rngs, dict):
if not core.scope._is_valid_rng(rngs):
raise errors.InvalidRngError(
'RNGs should be of shape (2,) or PRNGKey in Module '
f'{self.__class__.__name__}, but rngs are: {rngs}'
)
rngs = {'params': rngs}
if isinstance(method, str):
attribute_name = method
method = getattr(self, attribute_name)
if not callable(method):
class_name = type(self).__name__
raise TypeError(
f"'{class_name}.{attribute_name}' must be a callable, got"
f' {type(method)}.'
)
elif method is None:
method = self.__call__
method = _get_unbound_fn(method)
return init_with_output(
method,
self,
mutable=mutable,
capture_intermediates=capture_intermediates,
)(rngs, *args, **kwargs)
def init(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = DenyList('intermediates'),
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Union[FrozenVariableDict, Dict[str, Any]]:
"""Initializes a module method with variables and returns modified variables.
``init`` takes as first argument either a single ``PRNGKey``, or a
dictionary mapping variable collections names to their ``PRNGKeys``, and
will call ``method`` (which is the module's ``__call__`` function by
default) passing ``*args`` and ``**kwargs``, and returns
a dictionary of initialized variables.
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> import numpy as np
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, train):
... x = nn.Dense(16)(x)
... x = nn.BatchNorm(use_running_average=not train)(x)
... x = nn.relu(x)
... return nn.Dense(1)(x)
>>> x = jnp.empty((1, 7))
>>> module = Foo()
>>> key = jax.random.key(0)
>>> variables = module.init(key, x, train=False)
If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'``
RNG stream. If you want to use a different RNG stream or need to use
multiple streams, you can pass a dictionary mapping each RNG stream name
to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)``
is called on an RNG stream name that isn't passed by the user, it will
default to using the ``'params'`` RNG stream.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(16)(x)
... x = nn.relu(x)
...
... other_variable = self.variable(
... 'other_collection',
... 'other_variable',
... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape),
... x,
... )
... x = x + other_variable.value
...
... return nn.Dense(1)(x)
>>> module = Foo()
>>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)}
>>> variables0 = module.init(rngs, x)
>>> rngs['other_rng'] = jax.random.key(0)
>>> variables1 = module.init(rngs, x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables0['params'], variables1['params']
... )
>>> # different other_variable (key(1) vs key(0))
>>> np.testing.assert_raises(
... AssertionError,
... np.testing.assert_allclose,
... variables0['other_collection']['other_variable'],
... variables1['other_collection']['other_variable'],
... )
>>> del rngs['other_rng']
>>> # self.make_rng('other_rng') will default to using the 'params' RNG stream
>>> variables2 = module.init(rngs, x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables1['params'], variables2['params']
... )
>>> # equivalent other_variable (key(0))
>>> np.testing.assert_allclose(
... variables1['other_collection']['other_variable'],
... variables2['other_collection']['other_variable'],
... )
>>> # passing in a single key is equivalent to passing in {'params': key}
>>> variables3 = module.init(jax.random.key(0), x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables2['params'], variables3['params']
... )
>>> # equivalent other_variable (key(0))
>>> np.testing.assert_allclose(
... variables2['other_collection']['other_variable'],
... variables3['other_collection']['other_variable'],
... )
Jitting ``init`` initializes a model lazily using only the shapes of the
provided arguments, and avoids computing the forward pass with actual
values. Example::
>>> module = nn.Dense(1)
>>> init_jit = jax.jit(module.init)
>>> variables = init_jit(jax.random.key(0), x)
``init`` is a light wrapper over ``apply``, so other ``apply`` arguments
like ``method``, ``mutable``, and ``capture_intermediates`` are also
available.
Args:
rngs: The rngs for the variable collections.
*args: Named arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method. A string can also be provided
to specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default all collections except "intermediates"
are mutable.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the init function.
Returns:
The initialized variable dict.
"""
Module._module_checks(self)
_, v_out = self.init_with_output(
rngs,
*args,
method=method,
mutable=mutable,
capture_intermediates=capture_intermediates,
**kwargs,
)
return v_out
def lazy_init(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Optional[Callable[..., Any]] = None,
mutable: CollectionFilter = DenyList('intermediates'),
**kwargs,
) -> FrozenVariableDict:
"""Initializes a module without computing on an actual input.
lazy_init will initialize the variables without doing unnecessary compute.
The input data should be passed as a ``jax.ShapeDtypeStruct`` which
specifies the shape and dtype of the input but no concrete data.
Example::
>>> model = nn.Dense(features=256)
>>> variables = model.lazy_init(
... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32))
The args and kwargs args passed to ``lazy_init`` can be a mix of
concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct)
values. Concrete values are only necessary for arguments that affect
the initialization of variables. For example, the model might expect
a keyword arg that enables/disables a subpart of the model.
In this case, an explicit value (True/Flase) should be passed otherwise
``lazy_init`` cannot infer which variables should be initialized.
Args:
rngs: The rngs for the variable collections.
*args: arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default all collections except "intermediates"
are mutable.
**kwargs: Keyword arguments passed to the init function.
Returns:
The initialized variable dict.
"""
Module._module_checks(self)
def lazy_wrapper(rngs, *args, **kwargs):
return self.init(rngs, *args, method=method, mutable=mutable, **kwargs)
return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs)
def variables(self) -> VariableDict:
"""Returns the variables in this module."""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.variables()
def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T:
"""Retrieves the value of a Variable.
Args:
col: the variable collection.
name: the name of the variable.
default: the default value to return if the variable does not exist in
this scope.
Returns:
The value of the input variable, of the default value if the variable
doesn't exist in this scope.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.get_variable(col, name, default)
def put_variable(self, col: str, name: str, value: Any):
"""Updates the value of the given variable if it is mutable, or an error otherwise.
Args:
col: the variable collection.
name: the name of the variable.
value: the new value of the variable.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
self.scope.put_variable(col, name, value)
def sow(self, col: str, name: str, value: Any) -> bool:
...
def sow(
self,
col: str,
name: str,
value: T,
reduce_fn: Callable[[K, T], K] = tuple_reduce,
init_fn: Callable[[], K] = tuple_init, # type: ignore
) -> bool:
...
def sow(
self,
col: str,
name: str,
value: T,
reduce_fn: Callable[[K, T], K] = tuple_reduce,
init_fn: Callable[[], K] = tuple_init, # type: ignore
) -> bool:
"""Stores a value in a collection.
Collections can be used to collect intermediate values without
the overhead of explicitly passing a container through each Module call.
If the target collection is not mutable ``sow`` behaves like a no-op
and returns ``False``.
Example::
>>> import jax
>>> import jax.numpy as jnp
>>> import flax.linen as nn
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... self.sow('intermediates', 'h', h)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> model = Foo()
>>> variables = model.init(jax.random.key(0), x)
>>> y, state = model.apply(variables, x, mutable=['intermediates'])
>>> print(state['intermediates'])
{'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)}
By default the values are stored in a tuple and each stored value
is appended at the end. This way all intermediates can be tracked when
the same module is called multiple times. Alternatively, a custom
init/reduce function can be passed::
>>> class Foo2(nn.Module):
... @nn.compact
... def __call__(self, x):
... init_fn = lambda: 0
... reduce_fn = lambda a, b: a + b
... self.sow('intermediates', 'h', x,
... init_fn=init_fn, reduce_fn=reduce_fn)
... self.sow('intermediates', 'h', x * 2,
... init_fn=init_fn, reduce_fn=reduce_fn)
... return x
>>> x = jnp.ones((1, 1))
>>> model = Foo2()
>>> variables = model.init(jax.random.key(0), x)
>>> y, state = model.apply(
... variables, x, mutable=['intermediates'])
>>> print(state['intermediates'])
{'h': Array([[3.]], dtype=float32)}
Args:
col: The name of the variable collection.
name: The name of the variable.
value: The value of the variable.
reduce_fn: The function used to combine the existing value with the new
value. The default is to append the value to a tuple.
init_fn: For the first value stored, ``reduce_fn`` will be passed the result
of ``init_fn`` together with the value to be stored. The default is an
empty tuple.
Returns:
``True`` if the value has been stored successfully, ``False`` otherwise.
"""
if self.scope is None:
raise ValueError("Can't store variables on unbound modules")
if not self.scope.is_mutable_collection(col):
return False
if self.scope.has_variable(col, name):
xs = self.scope.get_variable(col, name)
else:
self.scope.reserve(name, col)
self._state.children[name] = col
xs = init_fn()
xs = reduce_fn(xs, value)
self.scope.put_variable(col, name, xs)
return True
def perturb(
self, name: str, value: T, collection: str = 'perturbations'
) -> T:
"""Add an zero-value variable ('perturbation') to the intermediate value.
The gradient of ``value`` would be the same as the gradient of this
perturbation variable. Therefore, if you define your loss function with
both params and perturbations as standalone arguments, you can get the
intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation
argument.
.. note::
This is an experimental API and may be tweaked later for better
performance and usability.
At its current stage, it creates extra dummy variables that occupies extra
memory space. Use it only to debug gradients in training.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(3)(x)
... x = self.perturb('dense3', x)
... return nn.Dense(2)(x)
>>> def loss(variables, inputs, targets):
... preds = model.apply(variables, inputs)
... return jnp.square(preds - targets).mean()
>>> x = jnp.ones((2, 9))
>>> y = jnp.ones((2, 2))
>>> model = Foo()
>>> variables = model.init(jax.random.key(0), x)
>>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y)
>>> print(intm_grads['perturbations']['dense3'])
[[-1.456924 -0.44332537 0.02422847]
[-1.456924 -0.44332537 0.02422847]]
If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op
so you can easily disable the behavior when not needed::
>>> model.apply(variables, x) # works as expected
Array([[-1.0980128 , -0.67961735],
[-1.0980128 , -0.67961735]], dtype=float32)
>>> model.apply({'params': variables['params']}, x) # behaves like a no-op
Array([[-1.0980128 , -0.67961735],
[-1.0980128 , -0.67961735]], dtype=float32)
>>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y)
>>> 'perturbations' not in intm_grads
True
"""
if self.scope is None:
raise ValueError("Can't store variables on unbound modules")
if self.is_mutable_collection(collection):
if not self.scope.has_variable(collection, name):
self.scope.reserve(name, collection)
self._state.children[name] = collection
self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore
if collection in self.scope.root._variables:
if self.scope.has_variable(collection, name):
value += self.scope.get_variable(collection, name) # type: ignore
else:
raise ValueError(f"Perturbation collection {collection} present, but "
f"missing perturbation variable {name}")
return value
def tabulate(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
depth: Optional[int] = None,
show_repeated: bool = False,
mutable: CollectionFilter = DenyList('intermediates'),
console_kwargs: Optional[Mapping[str, Any]] = None,
table_kwargs: Mapping[str, Any] = MappingProxyType({}),
column_kwargs: Mapping[str, Any] = MappingProxyType({}),
compute_flops: bool = False,
compute_vjp_flops: bool = False,
**kwargs,
) -> str:
"""Creates a summary of the Module represented as a table.
This method has the same signature and internally calls ``Module.init``,
but instead of returning the variables, it returns the string summarizing
the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward
computation without consuming any FLOPs or allocating memory.
Additional arguments can be passed into the ``console_kwargs`` argument, for
example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments,
see:
https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> # print(Foo().tabulate(
>>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True))
This gives the following output::
Foo Summary
┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃
┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │
│ │ │ │ │ │ │ float32[4] │
│ │ │ │ │ │ │ kernel: │
│ │ │ │ │ │ │ float32[9,4] │
│ │ │ │ │ │ │ │
│ │ │ │ │ │ │ 40 (160 B) │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │
│ │ │ │ │ │ │ float32[2] │
│ │ │ │ │ │ │ kernel: │
│ │ │ │ │ │ │ float32[4,2] │
│ │ │ │ │ │ │ │
│ │ │ │ │ │ │ 10 (40 B) │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ │ │ │ │ │ Total │ 50 (200 B) │
└─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘
Total Parameters: 50 (200 B)
**Note**: rows order in the table does not represent execution order,
instead it aligns with the order of keys in ``variables`` which are sorted
alphabetically.
**Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable.
Args:
rngs: The rngs for the variable collections as passed to ``Module.init``.
*args: The arguments to the forward computation.
depth: controls how many submodule deep the summary can go. By default,
its ``None`` which means no limit. If a submodule is not shown because of
the depth limit, its parameter count and bytes will be added to the row
of its first shown ancestor such that the sum of all rows always adds
up to the total number of parameters of the Module.
show_repeated: If ``True``, repeated calls to the same module will be shown
in the table, otherwise only the first call will be shown. Default is
``False``.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default, all collections except 'intermediates'
are mutable.
console_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.console.Console`` when rendering the table.
Default arguments are ``{'force_terminal': True, 'force_jupyter':
False}``.
table_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.table.Table`` constructor.
column_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.table.Table.add_column`` when adding columns to
the table.
compute_flops: whether to include a ``flops`` column in the table listing
the estimated FLOPs cost of each module forward pass. Does incur actual
on-device computation / compilation / memory allocation, but still
introduces overhead for large modules (e.g. extra 20 seconds for a
Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5
seconds).
compute_vjp_flops: whether to include a ``vjp_flops`` column in the table
listing the estimated FLOPs cost of each module backward pass.
Introduces a compute overhead of about 2-3X of ``compute_flops``.
**kwargs: keyword arguments to pass to the forward computation.
Returns:
A string summarizing the Module.
"""
from flax.linen import summary
tabulate_fn = summary.tabulate(
self,
rngs,
depth=depth,
show_repeated=show_repeated,
mutable=mutable,
console_kwargs=console_kwargs,
table_kwargs=table_kwargs,
column_kwargs=column_kwargs,
compute_flops=compute_flops,
compute_vjp_flops=compute_vjp_flops,
)
return tabulate_fn(*args, **kwargs)
def module_paths(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
show_repeated: bool = False,
mutable: CollectionFilter = DenyList('intermediates'),
**kwargs,
) -> dict[str, 'Module']:
"""Returns a dictionary mapping module paths to module instances.
This method has the same signature and internally calls ``Module.init``,
but instead of returning the variables, it returns a dictionary mapping
module paths to unbounded copies of module instances that were used
at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward
computation without consuming any FLOPs or allocating memory.
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> modules = Foo().module_paths(jax.random.key(0), x)
>>> print({
... p: type(m).__name__ for p, m in modules.items()
... })
{'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'}
Args:
rngs: The rngs for the variable collections as passed to ``Module.init``.
*args: The arguments to the forward computation.
show_repeated: If ``True``, repeated calls to the same module will be
shown in the table, otherwise only the first call will be shown.
Default is ``False``.
mutable: Can be bool, str, or list. Specifies which collections should
be treated as mutable: ``bool``: all/no collections are mutable.
``str``: The name of a single mutable collection. ``list``: A list of
names of mutable collections. By default, all collections except
'intermediates' are mutable.
**kwargs: keyword arguments to pass to the forward computation.
Returns:
A dict`ionary mapping module paths to module instances.
"""
from flax.linen import summary
table = summary._get_module_table(
module=self,
depth=None,
show_repeated=show_repeated,
compute_flops=False,
compute_vjp_flops=False,
)(rngs, *args, **kwargs, mutable=mutable)
return {'/'.join(row.path): row.module_copy for row in table}
The provided code snippet includes necessary dependencies for implementing the `_module_repr` function. Write a Python function `def _module_repr(module: 'Module', num_spaces: int = 4)` to solve the following problem:
Returns a pretty printed representation of the module.
Here is the function:
def _module_repr(module: 'Module', num_spaces: int = 4):
"""Returns a pretty printed representation of the module."""
cls = type(module)
cls_name = cls.__name__
rep = ''
attributes = {
f.name: f.type
for f in dataclasses.fields(cls)
if f.name not in ('parent', 'name') and f.repr
}
child_modules = {
k: v
for k, v in module._state.children.items() # pytype: disable=attribute-error
if isinstance(v, Module)
}
if attributes:
rep += '# attributes\n'
for attr in attributes.keys():
# TODO(jheek): can we get a nice string representation of attribute types?
value = module.__dict__.get(attr, None)
value_rep = _attr_repr(value)
rep += f'{attr} = {value_rep}\n'
if child_modules:
rep += '# children\n'
for name, child in child_modules.items():
child_rep = _module_repr(child, num_spaces)
rep += f'{name} = {child_rep}\n'
if rep:
return f'{cls_name}(\n{_indent(rep, num_spaces)})'
else:
return f'{cls_name}()' | Returns a pretty printed representation of the module. |
22,650 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
_use_named_call = config.flax_profile
The provided code snippet includes necessary dependencies for implementing the `enable_named_call` function. Write a Python function `def enable_named_call()` to solve the following problem:
Enables named call wrapping for labelling profile traces. When named call wrapping is enabled all JAX ops executed in a Module will be run under ``jax.named_scope``. The ``Module`` class name will show up around the operations belonging to that Module in the Tensorboard profiling UI, simplifying the profiling process. Note that ``jax.named_scope`` only works for compiled functions (e.g.: using jax.jit or jax.pmap).
Here is the function:
def enable_named_call():
"""Enables named call wrapping for labelling profile traces.
When named call wrapping is enabled all JAX ops executed in a Module
will be run under ``jax.named_scope``. The ``Module`` class name will
show up around the operations belonging to that Module in the
Tensorboard profiling UI, simplifying the profiling process.
Note that ``jax.named_scope`` only works for
compiled functions (e.g.: using jax.jit or jax.pmap).
"""
global _use_named_call
_use_named_call = True | Enables named call wrapping for labelling profile traces. When named call wrapping is enabled all JAX ops executed in a Module will be run under ``jax.named_scope``. The ``Module`` class name will show up around the operations belonging to that Module in the Tensorboard profiling UI, simplifying the profiling process. Note that ``jax.named_scope`` only works for compiled functions (e.g.: using jax.jit or jax.pmap). |
22,651 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
_use_named_call = config.flax_profile
The provided code snippet includes necessary dependencies for implementing the `disable_named_call` function. Write a Python function `def disable_named_call()` to solve the following problem:
Disables named call wrapping. See ``enable_named_call``
Here is the function:
def disable_named_call():
"""Disables named call wrapping.
See ``enable_named_call``
"""
global _use_named_call
_use_named_call = False | Disables named call wrapping. See ``enable_named_call`` |
22,652 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
_use_named_call = config.flax_profile
The provided code snippet includes necessary dependencies for implementing the `override_named_call` function. Write a Python function `def override_named_call(enable: bool = True)` to solve the following problem:
Returns a context manager that enables/disables named call wrapping. Args: enable: If true, enables named call wrapping for labelling profile traces. (see ``enabled_named_call``).
Here is the function:
def override_named_call(enable: bool = True):
# pylint: disable=g-doc-return-or-yield
"""Returns a context manager that enables/disables named call wrapping.
Args:
enable: If true, enables named call wrapping for labelling profile traces.
(see ``enabled_named_call``).
"""
# pylint: enable=g-doc-return-or-yield
global _use_named_call
use_named_call_prev = _use_named_call
_use_named_call = enable
try:
yield
finally:
_use_named_call = use_named_call_prev | Returns a context manager that enables/disables named call wrapping. Args: enable: If true, enables named call wrapping for labelling profile traces. (see ``enabled_named_call``). |
22,653 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
Interceptor = Callable[[NextGetter, Args, Kwargs, InterceptorContext], Any]
_global_interceptor_stack = ThreadLocalStack()
The provided code snippet includes necessary dependencies for implementing the `intercept_methods` function. Write a Python function `def intercept_methods(interceptor: Interceptor)` to solve the following problem:
r"""Registers a new method interceptor. Method interceptors allow you to (at a distance) intercept method calls to modules. It works similarly to decorators. You could modify args/kwargs before calling the underlying method and/or modify the result returning from calling the underlying method. Or you could completely skip calling the underlying method and decide to do something differently. For example:: >>> import flax.linen as nn >>> import jax.numpy as jnp ... >>> class Foo(nn.Module): ... def __call__(self, x): ... return x ... >>> def my_interceptor1(next_fun, args, kwargs, context): ... print('calling my_interceptor1') ... return next_fun(*args, **kwargs) ... >>> foo = Foo() >>> with nn.intercept_methods(my_interceptor1): ... _ = foo(jnp.ones([1])) calling my_interceptor1 You could also register multiple interceptors on the same method. Interceptors will run in order. For example:: >>> def my_interceptor2(next_fun, args, kwargs, context): ... print('calling my_interceptor2') ... return next_fun(*args, **kwargs) ... >>> with nn.intercept_methods(my_interceptor1), \ ... nn.intercept_methods(my_interceptor2): ... _ = foo(jnp.ones([1])) calling my_interceptor1 calling my_interceptor2 You could skip other interceptors by directly calling the ``context.orig_method``. For example:: >>> def my_interceptor3(next_fun, args, kwargs, context): ... print('calling my_interceptor3') ... return context.orig_method(*args, **kwargs) >>> with nn.intercept_methods(my_interceptor3), \ ... nn.intercept_methods(my_interceptor1), \ ... nn.intercept_methods(my_interceptor2): ... _ = foo(jnp.ones([1])) calling my_interceptor3 The following methods couldn't be intercepted: 1. Methods decoratored with ``nn.nowrap``. 2. Dunder methods including ``__eq__``, ``__repr__``, ``__init__``, ``__hash__``, and ``__post_init__``. 3. Module dataclass fields. 4. Module descriptors. Args: interceptor: A method interceptor.
Here is the function:
def intercept_methods(interceptor: Interceptor):
# pylint: disable=g-doc-return-or-yield
r"""Registers a new method interceptor.
Method interceptors allow you to (at a distance) intercept method calls to
modules. It works similarly to decorators. You could modify args/kwargs before
calling the underlying method and/or modify the result returning from calling
the underlying method. Or you could completely skip calling the underlying
method and decide to do something differently. For example::
>>> import flax.linen as nn
>>> import jax.numpy as jnp
...
>>> class Foo(nn.Module):
... def __call__(self, x):
... return x
...
>>> def my_interceptor1(next_fun, args, kwargs, context):
... print('calling my_interceptor1')
... return next_fun(*args, **kwargs)
...
>>> foo = Foo()
>>> with nn.intercept_methods(my_interceptor1):
... _ = foo(jnp.ones([1]))
calling my_interceptor1
You could also register multiple interceptors on the same method. Interceptors
will run in order. For example::
>>> def my_interceptor2(next_fun, args, kwargs, context):
... print('calling my_interceptor2')
... return next_fun(*args, **kwargs)
...
>>> with nn.intercept_methods(my_interceptor1), \
... nn.intercept_methods(my_interceptor2):
... _ = foo(jnp.ones([1]))
calling my_interceptor1
calling my_interceptor2
You could skip other interceptors by directly calling the
``context.orig_method``. For example::
>>> def my_interceptor3(next_fun, args, kwargs, context):
... print('calling my_interceptor3')
... return context.orig_method(*args, **kwargs)
>>> with nn.intercept_methods(my_interceptor3), \
... nn.intercept_methods(my_interceptor1), \
... nn.intercept_methods(my_interceptor2):
... _ = foo(jnp.ones([1]))
calling my_interceptor3
The following methods couldn't be intercepted:
1. Methods decoratored with ``nn.nowrap``.
2. Dunder methods including ``__eq__``, ``__repr__``, ``__init__``, ``__hash__``, and ``__post_init__``.
3. Module dataclass fields.
4. Module descriptors.
Args:
interceptor: A method interceptor.
"""
_global_interceptor_stack.push(interceptor)
try:
yield
finally:
assert _global_interceptor_stack.pop() is interceptor | r"""Registers a new method interceptor. Method interceptors allow you to (at a distance) intercept method calls to modules. It works similarly to decorators. You could modify args/kwargs before calling the underlying method and/or modify the result returning from calling the underlying method. Or you could completely skip calling the underlying method and decide to do something differently. For example:: >>> import flax.linen as nn >>> import jax.numpy as jnp ... >>> class Foo(nn.Module): ... def __call__(self, x): ... return x ... >>> def my_interceptor1(next_fun, args, kwargs, context): ... print('calling my_interceptor1') ... return next_fun(*args, **kwargs) ... >>> foo = Foo() >>> with nn.intercept_methods(my_interceptor1): ... _ = foo(jnp.ones([1])) calling my_interceptor1 You could also register multiple interceptors on the same method. Interceptors will run in order. For example:: >>> def my_interceptor2(next_fun, args, kwargs, context): ... print('calling my_interceptor2') ... return next_fun(*args, **kwargs) ... >>> with nn.intercept_methods(my_interceptor1), \ ... nn.intercept_methods(my_interceptor2): ... _ = foo(jnp.ones([1])) calling my_interceptor1 calling my_interceptor2 You could skip other interceptors by directly calling the ``context.orig_method``. For example:: >>> def my_interceptor3(next_fun, args, kwargs, context): ... print('calling my_interceptor3') ... return context.orig_method(*args, **kwargs) >>> with nn.intercept_methods(my_interceptor3), \ ... nn.intercept_methods(my_interceptor1), \ ... nn.intercept_methods(my_interceptor2): ... _ = foo(jnp.ones([1])) calling my_interceptor3 The following methods couldn't be intercepted: 1. Methods decoratored with ``nn.nowrap``. 2. Dunder methods including ``__eq__``, ``__repr__``, ``__init__``, ``__hash__``, and ``__post_init__``. 3. Module dataclass fields. 4. Module descriptors. Args: interceptor: A method interceptor. |
22,654 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
def _get_fn_name(fn):
if isinstance(fn, functools.partial):
return _get_fn_name(fn.func)
return getattr(fn, '__name__', 'unnamed_function')
class InterceptorContext:
"""Read only state showing the calling context for method interceptors.
Attributes:
module: The Module instance whose method is being called.
method_name: The name of the method being called on the module.
orig_method: The original method defined on the module. Calling it will
short circuit all other interceptors.
"""
module: 'Module'
method_name: str
orig_method: Callable[..., Any]
_global_interceptor_stack = ThreadLocalStack()
The provided code snippet includes necessary dependencies for implementing the `run_interceptors` function. Write a Python function `def run_interceptors( orig_method: Callable[..., Any], module: 'Module', *args, **kwargs, ) -> Any` to solve the following problem:
Runs method interceptors.
Here is the function:
def run_interceptors(
orig_method: Callable[..., Any],
module: 'Module',
*args,
**kwargs,
) -> Any:
"""Runs method interceptors."""
method_name = _get_fn_name(orig_method)
fun = functools.partial(orig_method, module)
context = InterceptorContext(module, method_name, fun)
def wrap_interceptor(interceptor, fun):
"""Wraps `fun` with `interceptor`."""
@functools.wraps(fun)
def wrapped(*args, **kwargs):
return interceptor(fun, args, kwargs, context)
return wrapped
# Wraps interceptors around the original method. The innermost interceptor is
# the last one added and directly wrapped around the original bound method.
for interceptor in _global_interceptor_stack:
fun = wrap_interceptor(interceptor, fun)
return fun(*args, **kwargs) | Runs method interceptors. |
22,655 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
def _sorted_items(x):
"""Returns items of a dict ordered by keys."""
return sorted(x.items(), key=lambda x: x[0])
The provided code snippet includes necessary dependencies for implementing the `_get_suffix_value_pairs` function. Write a Python function `def _get_suffix_value_pairs( tree_or_leaf: Any, ) -> List[Tuple[str, Type['Module']]]` to solve the following problem:
Helper for naming pytrees of submodules.
Here is the function:
def _get_suffix_value_pairs(
tree_or_leaf: Any,
) -> List[Tuple[str, Type['Module']]]:
"""Helper for naming pytrees of submodules."""
dict_or_leaf = serialization.to_state_dict(tree_or_leaf)
if not isinstance(dict_or_leaf, dict) or not dict_or_leaf:
return [('', tree_or_leaf)]
else:
flat_dict = traverse_util.flatten_dict(dict_or_leaf)
return [('_' + '_'.join(k), v) for k, v in _sorted_items(flat_dict)] | Helper for naming pytrees of submodules. |
22,656 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
_CallableT = TypeVar('_CallableT', bound=Callable)
The provided code snippet includes necessary dependencies for implementing the `compact` function. Write a Python function `def compact(fun: _CallableT) -> _CallableT` to solve the following problem:
Marks the given module method allowing inlined submodules. Methods wrapped in @compact can define submodules directly within the method. For instance:: >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, features): ... x = nn.Dense(features)(x) ... ... ... return x At most one method in each Module may be wrapped with @compact. Args: fun: The Module method to mark as compact. Returns: The given function ``fun`` marked as compact.
Here is the function:
def compact(fun: _CallableT) -> _CallableT:
"""Marks the given module method allowing inlined submodules.
Methods wrapped in @compact can define submodules directly within the method.
For instance::
>>> import flax.linen as nn
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, features):
... x = nn.Dense(features)(x)
... ...
... return x
At most one method in each Module may be wrapped with @compact.
Args:
fun: The Module method to mark as compact.
Returns:
The given function ``fun`` marked as compact.
"""
fun.compact = True # type: ignore[attr-defined]
return fun | Marks the given module method allowing inlined submodules. Methods wrapped in @compact can define submodules directly within the method. For instance:: >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, features): ... x = nn.Dense(features)(x) ... ... ... return x At most one method in each Module may be wrapped with @compact. Args: fun: The Module method to mark as compact. Returns: The given function ``fun`` marked as compact. |
22,657 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
_CallableT = TypeVar('_CallableT', bound=Callable)
def nowrap(fun: _CallableT) -> _CallableT:
"""Marks the given module method as a helper method that needn't be wrapped.
Methods wrapped in ``@nowrap`` are private helper methods that needn't be wrapped
with the state handler or a separate named_call transform.
This is needed in several concrete instances:
- if you're subclassing a method like Module.param and don't want this
overriden core function decorated with the state management wrapper.
- If you want a method to be callable from an unbound Module (e.g.: a
function of construction of arguments that doesn't depend on params/RNGs).
If you want to learn more about how Flax Modules manage their state read the
[The Flax Module lifecycle](https://flax.readthedocs.io/en/latest/developer_notes/module_lifecycle.html)
guide.
For instance::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... num_features: int
... @nn.nowrap
... def _make_dense(self, num_features):
... return nn.Dense(num_features)
... @nn.compact
... def __call__(self, x):
... # now safe to use constructor helper even if using named_call
... dense = self._make_dense(self.num_features)
... return dense(x)
Args:
fun: The Module method to mark as nowrap.
Returns:
The given function ``fun`` marked as nowrap.
"""
fun.nowrap = True # type: ignore[attr-defined]
return fun
class Module(ModuleBase):
"""Base class for all neural network modules.
Layers and models should subclass this class.
All Flax Modules are Python 3.7
`dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since
dataclasses take over ``__init__``, you should instead override :meth:`setup`,
which is automatically called to initialize the module.
Modules can contain submodules, and in this way can be nested in a tree
structure. Submodels can be assigned as regular attributes inside the
:meth:`setup` method.
You can define arbitrary "forward pass" methods on your Module subclass.
While no methods are special-cased, ``__call__`` is a popular choice because
it allows you to use module instances as if they are functions::
>>> from flax import linen as nn
>>> from typing import Tuple
>>> class Module(nn.Module):
... features: Tuple[int, ...] = (16, 4)
... def setup(self):
... self.dense1 = nn.Dense(self.features[0])
... self.dense2 = nn.Dense(self.features[1])
... def __call__(self, x):
... return self.dense2(nn.relu(self.dense1(x)))
Optionally, for more concise module implementations where submodules
definitions are co-located with their usage, you can use the
:meth:`compact` wrapper.
"""
if typing.TYPE_CHECKING:
name: Optional[str] = module_field(kw_only=True, default=None)
parent: Union['Module', _Sentinel, None] = module_field(
kw_only=True, default=None
)
def __init__(self, *args, **kwargs):
# this stub makes sure pytype accepts constructor arguments.
pass
def __call__(self, *args, **kwargs) -> Any:
# this stub allows pytype to accept Modules as Callables.
pass
def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None:
"""Automatically initializes all subclasses as custom dataclasses."""
super().__init_subclass__(**kwargs)
# All Flax Modules are dataclasses. We force this convention since
# it encourages the stateless behavior needed to clone module instances for
# functional transformation. Instead of using a python metaclass, we
# automatically transform Modules into dataclasses at subclass creation
# time, and we set the last dataclass arguments to `parent` and `name`.
cls._customized_dataclass_transform(kw_only)
# We wrap user-defined methods including setup and __call__ to enforce
# a number of different checks and to provide clear error messages.
cls._verify_single_or_no_compact()
cls._find_compact_name_scope_methods()
cls._wrap_module_attributes()
# Set empty class defaults.
cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined]
cls.scope: Optional[Scope] = None # type: ignore
# Handles weak referencing of parent Modules to prevent reference cycles.
cls._parent_ref = None # type: ignore[attr-defined]
cls.parent = ParentDescriptor() # type: ignore[assignment]
def _customized_dataclass_transform(cls, kw_only: bool):
"""Transforms `cls` into a dataclass, with custom additional behavior.
1. Inject `parent` and `name` fields. (If they are already present,
then check that they have the expected types.)
2. Set compare, hash, and repr to False for non-init fields.
3. Generate a hash function (if not provided by cls).
"""
# Check reserved attributes have expected type annotations.
annotations = dict(cls.__dict__.get('__annotations__', {}))
if annotations.get('parent', _ParentType) != _ParentType:
raise errors.ReservedModuleAttributeError(annotations)
if annotations.get('name', str) not in ('str', str, Optional[str]):
raise errors.ReservedModuleAttributeError(annotations)
# any non-init field will only be set in setup
# During __hash__ and __eq__ the field is not set yet
# so it should not be used in compare, hash or repr.
for field in annotations:
field_meta = getattr(cls, field, None)
if isinstance(field_meta, dataclasses.Field) and not field_meta.init:
field_meta.compare = False
field_meta.hash = False
field_meta.repr = False
extra_fields = [
(
'parent',
_ParentType,
kw_only_dataclasses.field(
repr=False, default=_unspecified_parent, kw_only=True
),
),
(
'name',
Optional[str],
kw_only_dataclasses.field(default=None, kw_only=True),
),
]
if kw_only:
if tuple(sys.version_info)[:3] >= (3, 10, 0):
for (
name,
annotation, # pytype: disable=invalid-annotation
default,
) in extra_fields:
setattr(cls, name, default)
cls.__annotations__[name] = annotation
dataclasses.dataclass( # type: ignore[call-overload]
unsafe_hash='__hash__' not in cls.__dict__,
repr=False,
kw_only=True,
)(cls)
else:
raise TypeError('`kw_only` is not available before Py 3.10.')
else:
# Now apply dataclass transform (which operates in-place).
# Do generate a hash function only if not provided by the class.
kw_only_dataclasses.dataclass(
cls,
unsafe_hash='__hash__' not in cls.__dict__,
repr=False,
extra_fields=extra_fields,
) # pytype: disable=wrong-keyword-args
cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign]
def _verify_single_or_no_compact(cls):
"""Statically verifies that at most a single method is labelled compact."""
methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]
n_compact_fns = len(
[
method_name
for method_name in methods
if hasattr(getattr(cls, method_name), 'compact')
]
)
if n_compact_fns > 1:
raise errors.MultipleMethodsCompactError()
def _find_compact_name_scope_methods(cls):
"""Finds all compact_name_scope methods in the class."""
methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]
compact_name_scope_fns = tuple(
method_name
for method_name in methods
if hasattr(getattr(cls, method_name), 'compact_name_scope')
)
cls._compact_name_scope_methods = compact_name_scope_fns
def _wrap_module_attributes(cls):
"""Wraps user-defined non-inherited methods and descriptors with state
management functions.
"""
# wrap methods
method_exclusions = [f.name for f in dataclasses.fields(cls)] + [
'__eq__',
'__repr__',
'__init__',
'__hash__',
'__post_init__',
]
for key in _get_local_method_names(cls, exclude=method_exclusions):
method = getattr(cls, key)
if hasattr(method, 'nowrap'):
continue
setattr(cls, key, wrap_method_once(method))
# wrap descriptors
descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [
'parent',
'__dict__',
]
for key in _get_local_descriptor_names(cls, descriptor_exclusions):
# don't use getattr here, since it will call the descriptor
descriptor = cls.__dict__[key]
if hasattr(descriptor, 'nowrap'):
continue
setattr(cls, key, wrap_descriptor_once(descriptor))
return cls
def _call_wrapped_method(self, fun, args, kwargs):
"""Calls a wrapped method.
This function is responsible for setting up the thread local state
correctly before calling the method and cleaning up afterwards.
This includes storing intermediates, setup of the compact scope,
and making sure setup is called before any other method.
Args:
fun: The wrapped method.
args: Named arguments passed to ``fun``.
kwargs: Keyword arguments passed to ``fun``.
Returns:
The results of calling ``fun``.
"""
is_compact_method = hasattr(fun, 'compact')
fun_name = _get_fn_name(fun)
is_setup_method = fun_name == 'setup'
add_call_info = not is_setup_method and len(_context.call_info_stack) > 0
# We lazily call setup() only when needed.
if is_setup_method:
if self.scope is None:
raise errors.CallSetupUnboundModuleError()
is_recurrent = self._state.in_setup
self._state.in_setup = True
else:
self._try_setup()
if is_compact_method:
if self.scope is None:
raise errors.CallCompactUnboundModuleError()
is_recurrent = self._state.in_compact_method
self._state.in_compact_method = True
_context.module_stack.append(self)
try:
# get call info
if add_call_info:
assert self.scope is not None
call_index = _context.call_info_stack[-1].get_call_index()
if _global_interceptor_stack:
run_fun = functools.partial(run_interceptors, fun)
else:
run_fun = fun
# call method
if _use_named_call:
with jax.named_scope(_derive_profiling_name(self, fun)):
y = run_fun(self, *args, **kwargs)
else:
y = run_fun(self, *args, **kwargs)
if _context.capture_stack:
filter_fn = _context.capture_stack[-1]
if filter_fn and filter_fn(self, fun_name):
self.sow('intermediates', fun_name, y)
if add_call_info:
_args, _kwargs, _y = flax.linen.summary._represent_tree(
(args, kwargs, y)
)
_context.call_info_stack[-1].calls.append(
_CallInfo(
call_index,
self.path,
self.clone(),
self.scope.rngs,
self.scope.mutable,
fun.__name__,
_args,
_kwargs,
_y,
)
)
return y
finally:
_context.module_stack.pop()
if is_compact_method:
object.__setattr__(self, 'scope', self.scope.rewound())
# setup or compact calls can be recurrent for example due to super calls
# resetting the state would cause is compact/setup method
# to be set to False prematurely.
if (is_compact_method or is_setup_method) and not is_recurrent:
self._state.reset()
def __setattr__(self, name: str, val: Any):
"""Sets an attribute on this Module.
We overload setattr solely to support pythonic naming via assignment of
submodules in the special :meth:`setup` function::
self.submodule_name = MyModule(...)
We also support lists and other general pytrees, e.g.::
self.submodules = [MyModule0(..), MyModule1(..), ...]
Args:
name: Attribute to set.
val: Value of the attribute.
"""
fields = self.__dataclass_fields__ # pytype: disable=attribute-error
is_dataclass_attr = name in fields and fields[name].init
if not self._state.in_setup:
if not self._state.is_initialized:
# Setting attributes before end of Module.__post_init__()
object.__setattr__(self, name, val)
return
else:
# We're past all initialization and setup logic:
# Raises a TypeError just like frozen python dataclasses.
raise errors.SetAttributeFrozenModuleError(
self.__class__.__name__, name, val
)
# We're inside the setup() method:
if is_dataclass_attr:
# These names are specified as dataclass fields. They should not be
# initialized within the setup() method, but can be modified freely
# before it.
raise errors.SetAttributeInModuleSetupError()
# Values (that may be variables or submodules) are being defined and
# attached in setup(), we run some extra logic in that case.
self._register_submodules(name, val)
def __getattr__(self, name: str) -> Any:
"""Call setup() before getting any setup-defined attributes."""
# We don't want to return anything for python copy / pickle methods.
if name in _UNDEFINED_COPY_PICKLE_METHODS:
raise AttributeError()
self._try_setup()
if name in self.__dict__:
return self.__dict__[name]
else:
msg = f'"{self.__class__.__name__}" object has no attribute "{name}".'
if self.scope is None:
msg += (
f' If "{name}" is defined in \'.setup()\', remember these fields '
"are only accessible from inside 'init' or 'apply'."
)
raise AttributeError(msg)
def __dir__(self) -> List[str]:
"""Call setup() before listing attributes."""
self._try_setup()
return object.__dir__(self) # type: ignore
def __post_init__(self) -> None:
# DO NOT REMOVE - Marker for internal logging.
# In dataclasses, __init__ is overridden to process dataclass arguments,
# and __post_init__ is called immediately afterwards. Here, depending on the
# type of `parent` passed to initialize the Module, we either defer
# initialization, attach this Module as a submodule of a parent, or bind
# this Module at the top-level to variables and rngs.
object.__setattr__(self, '_id', uuid())
object.__setattr__(self, '_state', _ModuleInternalState())
# Typically we set the parent based on the dynamic module context.
if self.parent is _unspecified_parent: # pytype: disable=attribute-error
object.__setattr__(self, 'parent', _context.module_stack[-1])
# Initialization is deferred for top level Modules or any other "orphan"
# Modules until attachment by __setattr__ i.e. MyModule(..., parent=None)
if self.parent is None:
return
# Register submodule on parent Module.
if isinstance(self.parent, Module):
# When initializing an unnamed Module inside setup()
# initialization is deferred until attachment by __setattr__
# i.e. self.mymodule = MyModule(...)
self.name: Optional[str]
if (
self.parent._state.in_setup and self.name is None
): # pytype: disable=attribute-error
return
if not self.parent._initialization_allowed:
raise errors.AssignSubModuleError(self.__class__.__name__)
# Autonaming of submodules.
if self.name is None: # pytype: disable=attribute-error
prefix = f'{self.__class__.__name__}'
cursor = self.parent._state.autoname_cursor.get(prefix, 0)
self.name = f'{prefix}_{cursor}'
self.parent._state.autoname_cursor[prefix] = cursor + 1
# Allow scope aliasing under transforms for submodules defined in setup.
reuse_scopes = (
self.parent._state.in_setup
and self.parent._state.setup_called == SetupState.TRANSFORMED
)
# Perform name-collision check.
if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes):
parent_class = self.parent.__class__.__name__
raise errors.NameInUseError('submodule', self.name, parent_class)
# Finalize attachment to parent and scope initialization.
self.parent._state.children[self.name] = self
assert self.parent.scope is not None
object.__setattr__(
self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes)
)
# Top-level invocation with a functional Scope.
elif isinstance(self.parent, Scope):
object.__setattr__(self, 'scope', self.parent)
else:
raise ValueError('parent must be None, Module or Scope')
# eagerly bind submodules if scope is available
if self.scope is not None:
for field in dataclasses.fields(self):
if field.name not in ('parent', 'name') and field.init:
self._register_submodules(field.name, getattr(self, field.name))
self._state.is_initialized = True
def __repr__(self) -> str:
return _module_repr(self)
def setup(self) -> None:
"""Initializes a Module lazily (similar to a lazy ``__init__``).
``setup`` is called once lazily on a module instance when a module
is bound, immediately before any other methods like ``__call__`` are
invoked, or before a ``setup``-defined attribute on ``self`` is accessed.
This can happen in three cases:
1. Immediately when invoking :meth:`apply`, :meth:`init` or
:meth:`init_and_output`.
2. Once the module is given a name by being assigned to an attribute of
another module inside the other module's ``setup`` method
(see :meth:`__setattr__`)::
>>> class MyModule(nn.Module):
... def setup(self):
... submodule = nn.Conv(...)
... # Accessing `submodule` attributes does not yet work here.
... # The following line invokes `self.__setattr__`, which gives
... # `submodule` the name "conv1".
... self.conv1 = submodule
... # Accessing `submodule` attributes or methods is now safe and
... # either causes setup() to be called once.
3. Once a module is constructed inside a method wrapped with
:meth:`compact`, immediately before another method is called or
``setup`` defined attribute is accessed.
"""
pass
def _register_submodules(self, name, val):
"""Registers a submodule."""
assert self.scope, 'Trying to register submodules on unbound scope.'
root = self.scope.root
cache = _caches.get(root, weakref.WeakValueDictionary())
_caches[root] = cache
queue = []
preserve_adopted_names = config.flax_preserve_adopted_names
if hasattr(type(self), 'preserve_adopted_names'):
preserve_adopted_names = type(self).preserve_adopted_names
def adopt_attr_modules(cache, queue, suffix, subvalue):
if isinstance(subvalue, Module):
current_name = subvalue.name
adopted_name = None
if subvalue.parent is None:
# Preserve sharing-by-reference relationships during adoption
# via cache keyed on unique instance ids.
key = subvalue._id
# Module was passed from outside. It needs to be cloned.
# Outside modules are named by attachment, not an outer name,
# UNLESS we're using new adopted name policy, in which case an existing
# name will be used, as is often supplied by config systems.
if preserve_adopted_names:
adopted_name = object.__getattribute__(subvalue, 'name')
if key in cache:
subvalue = cache[key]
else:
subvalue = subvalue.clone(name=None)
cache[key] = subvalue
if subvalue.name is None:
object.__setattr__(subvalue, 'parent', self)
if adopted_name is None:
adopted_name = (
f'{name}{suffix}'
if not isinstance(subvalue, CompactNameScope)
else current_name
)
object.__setattr__(subvalue, 'name', adopted_name)
queue.append(subvalue)
return subvalue
val = _freeze_attr(
_map_over_modules_in_tree(
functools.partial(adopt_attr_modules, cache, queue), val
)
)
object.__setattr__(self, name, val)
for x in queue:
x.__post_init__()
def _try_setup(self, shallow: bool = False) -> None:
"""Tries to setup module if scope is available and setup has not been called yet."""
if (
self.scope
and not self._state.in_setup
and self._state.setup_called != SetupState.DONE
):
try:
self._state.in_setup = True
# A shallow setup will only register attribute submodules but it does
# not call the user's setup. This avoids running before a
# transformation.
for field in dataclasses.fields(self):
if field.name not in ('parent', 'name') and field.init:
self._register_submodules(field.name, getattr(self, field.name))
if not shallow:
self.setup()
# create NonTransparent Modules
self._compact_name_scope_modules = {
name: CompactNameScope(
getattr(type(self), name).inner_fun, lambda: self, name=name
)
for name in self._compact_name_scope_methods
}
# We run static checks abstractly once for setup before any transforms
# to detect name collisions and other python errors.
elif self._state.setup_called == SetupState.NEW:
self._validate_setup()
finally:
self._state.in_setup = False
if not shallow:
self._state.setup_called = SetupState.DONE
def _validate_setup(self) -> None:
"""Abstractly evaluates setup only to run static checks."""
def run_setup_only(x):
wrapped_id = wrap_method_once(lambda m, x: x)
with TestScope({}, rngs={}, mutable=True).temporary() as root:
return wrapped_id(self.clone(parent=root), x)
_ = jax.eval_shape(run_setup_only, 0)
def _name_taken(
self,
name: str,
reuse_scopes: bool = False,
collection: Optional[str] = None,
) -> bool:
assert self.scope is not None
if reuse_scopes:
return False
return self.scope.name_reserved(name, collection)
def _initialization_allowed(self):
return (
not self._state.is_initialized # allow eager attachment in post-init
or self._state.in_setup
or self._state.in_compact_method
)
def path(self):
if self.scope is None:
raise ValueError("Can't access module paths on unbound modules.")
return self.scope.path
def clone(
self: M,
*,
parent: Optional[Union[Scope, 'Module', _Sentinel]] = None,
_deep_clone: Union[bool, weakref.WeakValueDictionary] = False,
_reset_names: bool = False,
**updates,
) -> M:
"""Creates a clone of this Module, with optionally updated arguments.
NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used
primarily for internal routines, and ``copy`` offers simpler arguments and
better defaults.
Args:
parent: The parent of the clone. The clone will have no parent if no
explicit parent is specified.
_deep_clone: A boolean or a weak value dictionary to control deep cloning
of submodules. If True, submodules will be cloned recursively. If a weak
value dictionary is passed, it will be used to cache cloned submodules.
This flag is used by init/apply/bind to avoid scope leakage.
_reset_names: If True, ``name=None`` is also passed to submodules when
cloning. Resetting names in submodules is necessary when calling ``.unbind``.
**updates: Attribute updates.
Returns:
A clone of the this Module with the updated attributes and parent.
"""
attrs = {
f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init
}
attrs.update(parent=parent, **updates)
# Here we implement deep cloning of submodules, this is necessary to avoid scope leakage
# from external submodules into init/apply/bind while preserving sharing-by-reference
# relationships between submodules.
if _deep_clone != False:
# We use a weak value dictionary to cache cloned submodules. When a shared
# submodule is cloned, its only cloned once else its fetched from the cache.
cache = (
weakref.WeakValueDictionary()
if isinstance(_deep_clone, bool)
else _deep_clone
)
def clone_fn(m: Module) -> Module:
if hasattr(m, '_id'):
key = m._id
if key in cache:
return cache[key]
else:
if _reset_names:
clone = m.clone(
_deep_clone=cache, _reset_names=_reset_names, name=None
)
else:
clone = m.clone(_deep_clone=cache)
cache[key] = clone
return clone
else:
# If the module doesn't have an _id attribute it could be a mock object
# so we return it as is.
return m
# _map_submodules will map over all submodules inside attrs
# value here can be any pytree, non-module values are ignored
for field_name, value in attrs.items():
if field_name == 'parent':
continue
attrs[field_name] = _map_submodules(clone_fn, value)
module = self.__class__(**attrs)
return module
def copy(
self: M,
*,
parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent,
name: Optional[str] = None,
**updates,
) -> M:
"""Creates a copy of this Module, with optionally updated arguments.
Args:
parent: The parent of the copy. By default the current module is taken
as parent if not explicitly specified.
name: A new name for the copied Module, by default a new automatic name
will be given.
**updates: Attribute updates.
Returns:
A copy of the this Module with the updated name, parent, and attributes.
"""
return self.clone(
parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates
)
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> Variable[meta.AxisMetadata[T]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
"""Declares and returns a variable in this Module.
See :mod:`flax.core.variables` for more information. See also :meth:`param`
for a shorthand way to define read-only variables in the "params"
collection.
Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be
passed on explicitly::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(4)(x)
... key = self.make_rng('stats')
... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape)
... ...
... return x * mean.value
>>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}}
In the example above, the function ``lecun_normal`` expects two arguments:
``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats``
has to be provided explicitly when calling :meth:`init` and :meth:`apply`.
Args:
col: The variable collection name.
name: The variable name.
init_fn: The function that will be called to compute the initial value of
this variable. This function will only be called the first time this
variable is used in this module. If None, the variable must already be
initialized otherwise an error is raised.
*init_args: The positional arguments to pass to init_fn.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: The key-word arguments to pass to init_fn
Returns:
A :class:`flax.core.variables.Variable` that can be read or set via
".value" attribute. Throws an error if the variable exists already.
"""
if not self._initialization_allowed:
raise ValueError(
'Variables must be initialized in `setup()` or in a method '
'wrapped in `@compact`'
)
if self._name_taken(name, collection=col):
raise errors.NameInUseError('variable', name, self.__class__.__name__)
assert self.scope is not None
v = self.scope.variable(
col, name, init_fn, *init_args, unbox=unbox, **init_kwargs
)
self._state.children[name] = col
return v
def param(
self, name: str, init_fn: Callable[..., T], *init_args,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> meta.AxisMetadata[T]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
"""Declares and returns a parameter in this Module.
Parameters are read-only variables in the collection named "params". See
:mod:`flax.core.variables` for more details on variables.
The first argument of ``init_fn`` is assumed to be a PRNG key, which is
provided automatically and does not have to be passed using ``init_args``
or ``init_kwargs``::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(4)(x)
... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape)
... ...
... return x * mean
>>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}}
In the example above, the function ``lecun_normal`` expects two arguments:
``key`` and ``shape``, but only ``shape`` has to be provided explicitly;
``key`` is set automatically using the PRNG for ``params`` that is passed
when initializing the module using :meth:`init`.
Args:
name: The parameter name.
init_fn: The function that will be called to compute the initial value of
this variable. This function will only be called the first time this
parameter is used in this module.
*init_args: The positional arguments to pass to init_fn.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: The key-word arguments to pass to init_fn.
Returns:
The value of the initialized parameter. Throws an error if the parameter
exists already.
"""
if not self._initialization_allowed:
raise ValueError(
'Parameters must be initialized in `setup()` or in a method '
'wrapped in `@compact`'
)
if self._name_taken(name, collection='params'):
raise errors.NameInUseError('param', name, self.__class__.__name__)
assert self.scope is not None
v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs)
self._state.children[name] = 'params'
return v
def has_variable(self, col: str, name: str) -> bool:
"""Checks if a variable of given collection and name exists in this Module.
See :mod:`flax.core.variables` for more explanation on variables and
collections.
Args:
col: The variable collection name.
name: The name of the variable.
Returns:
True if the variable exists.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.has_variable(col, name)
def is_mutable_collection(self, col: str) -> bool:
"""Returns true if the collection ``col`` is mutable."""
if self.scope is None:
raise ValueError("Can't check mutability on unbound modules")
return self.scope.is_mutable_collection(col)
def has_rng(self, name: str) -> bool:
"""Returns true if a PRNGSequence with name ``name`` exists."""
if self.scope is None:
raise ValueError("Can't query for RNGs on unbound modules")
return self.scope.has_rng(name)
def make_rng(self, name: str = 'params') -> PRNGKey:
"""Returns a new RNG key from a given RNG sequence for this Module.
The new RNG key is split from the previous one. Thus, every call to
``make_rng`` returns a new RNG key, while still guaranteeing full
reproducibility.
.. note::
If an invalid name is passed (i.e. no RNG key was passed by
the user in ``.init`` or ``.apply`` for this name), then ``name``
will default to ``'params'``.
Example::
>>> import jax
>>> import flax.linen as nn
>>> class ParamsModule(nn.Module):
... def __call__(self):
... return self.make_rng('params')
>>> class OtherModule(nn.Module):
... def __call__(self):
... return self.make_rng('other')
>>> key = jax.random.key(0)
>>> params_out, _ = ParamsModule().init_with_output({'params': key})
>>> # self.make_rng('other') will default to using the 'params' RNG stream
>>> other_out, _ = OtherModule().init_with_output({'params': key})
>>> assert params_out == other_out
Learn more about RNG's by reading the Flax RNG guide:
https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html
Args:
name: The RNG sequence name.
Returns:
The newly generated RNG key.
"""
if self.scope is None:
raise ValueError("Can't use RNGs on unbound modules")
return self.scope.make_rng(name)
def is_initializing(self) -> bool:
"""Returns True if running under self.init(...) or nn.init(...)().
This is a helper method to handle the common case of simple initialization
where we wish to have setup logic occur when only called under
``module.init`` or ``nn.init``. For more complicated multi-phase
initialization scenarios it is better to test for the mutability of
particular variable collections or for the presence of particular
variables that potentially need to be initialized.
"""
if self.scope is None:
raise ValueError("Can't check if running under init() on unbound modules")
return self.scope.get_flag('initializing', False)
def _module_checks(self):
"""Run standard runtime checks."""
if not isinstance(self, Module):
raise errors.InvalidInstanceModuleError()
overridden_post_init = self.__post_init__ != Module.__post_init__
if overridden_post_init and not hasattr(self, '_id'):
raise errors.IncorrectPostInitOverrideError()
def bind(
self: M,
variables: VariableDict,
*args,
rngs: Optional[RNGSequences] = None,
mutable: CollectionFilter = False,
) -> M:
"""Creates an interactive Module instance by binding variables and RNGs.
``bind`` provides an "interactive" instance of a Module directly without
transforming a function with ``apply``. This is particularly useful for
debugging and interactive use cases like notebooks where a function would
limit the ability to split up code into different cells.
Once the variables (and optionally RNGs) are bound to a ``Module`` it
becomes a stateful object. Note that idiomatic JAX is functional and
therefore an interactive instance does not mix well with vanilla JAX APIs.
``bind()`` should only be used for interactive experimentation, and in all
other cases we strongly encourage users to use ``apply()`` instead.
Example::
>>> import jax
>>> import jax.numpy as jnp
>>> import flax.linen as nn
>>> class AutoEncoder(nn.Module):
... def setup(self):
... self.encoder = nn.Dense(3)
... self.decoder = nn.Dense(5)
...
... def __call__(self, x):
... return self.decoder(self.encoder(x))
>>> x = jnp.ones((16, 9))
>>> ae = AutoEncoder()
>>> variables = ae.init(jax.random.key(0), x)
>>> model = ae.bind(variables)
>>> z = model.encoder(x)
>>> x_reconstructed = model.decoder(z)
Args:
variables: A dictionary containing variables keyed by variable
collections. See :mod:`flax.core.variables` for more details about
variables.
*args: Named arguments (not used).
rngs: a dict of PRNGKeys to initialize the PRNG sequences.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections.
Returns:
A copy of this instance with bound variables and RNGs.
"""
Module._module_checks(self)
del args
scope = core.bind(variables, rngs=rngs, mutable=mutable)
return self.clone(parent=scope, _deep_clone=True)
def unbind(self: M) -> Tuple[M, VariableDict]:
"""Returns an unbound copy of a Module and its variables.
``unbind`` helps create a stateless version of a bound Module.
An example of a common use case: to extract a sub-Module defined inside
``setup()`` and its corresponding variables: 1) temporarily ``bind`` the
parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that
``setup()`` is only called when the Module is bound.)::
>>> class Encoder(nn.Module):
... @nn.compact
... def __call__(self, x):
... ...
... return nn.Dense(256)(x)
>>> class Decoder(nn.Module):
... @nn.compact
... def __call__(self, x):
... ...
... return nn.Dense(784)(x)
>>> class AutoEncoder(nn.Module):
... def setup(self):
... self.encoder = Encoder()
... self.decoder = Decoder()
...
... def __call__(self, x):
... return self.decoder(self.encoder(x))
>>> module = AutoEncoder()
>>> variables = module.init(jax.random.key(0), jnp.ones((1, 784)))
>>> # Extract the Encoder sub-Module and its variables
>>> encoder, encoder_vars = module.bind(variables).encoder.unbind()
Returns:
A tuple with an unbound copy of this Module and its variables.
"""
Module._module_checks(self)
if self.scope is None:
raise errors.CallUnbindOnUnboundModuleError()
variables = self.variables
module = self.clone(_deep_clone=True, _reset_names=True, name=None)
return module, variables
def apply(
self,
variables: VariableDict,
*args,
rngs: Optional[Union[PRNGKey, RNGSequences]] = None,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = False,
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]:
"""Applies a module method to variables and returns output and modified variables.
Note that ``method`` should be set if one would like to call ``apply`` on a
different class method than ``__call__``. For instance, suppose a
Transformer modules has a method called ``encode``, then the following calls
``apply`` on that method::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> import numpy as np
>>> class Transformer(nn.Module):
... def encode(self, x):
... ...
>>> x = jnp.ones((16, 9))
>>> model = Transformer()
>>> variables = model.init(jax.random.key(0), x, method=Transformer.encode)
>>> encoded = model.apply(variables, x, method=Transformer.encode)
If a function instance is provided, the unbound function is used. For
instance, the example below is equivalent to the one above::
>>> encoded = model.apply(variables, x, method=model.encode)
You can also pass a string to a callable attribute of the module. For
example, the previous can be written as::
>>> encoded = model.apply(variables, x, method='encode')
Note ``method`` can also be a function that is not defined in
``Transformer``. In that case, the function should have at least one
argument representing an instance of the Module class::
>>> def other_fn(instance, x):
... # instance.some_module_attr(...)
... instance.encode
... ...
>>> model.apply(variables, x, method=other_fn)
If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'``
RNG stream. If you want to use a different RNG stream or need to use
multiple streams, you can pass a dictionary mapping each RNG stream name
to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)``
is called on an RNG stream name that isn't passed by the user, it will
default to using the ``'params'`` RNG stream.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, add_noise=False):
... x = nn.Dense(16)(x)
... x = nn.relu(x)
...
... if add_noise:
... # Add gaussian noise
... noise_key = self.make_rng('noise')
... x = x + jax.random.normal(noise_key, x.shape)
...
... return nn.Dense(1)(x)
>>> x = jnp.empty((1, 7))
>>> module = Foo()
>>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)}
>>> variables = module.init(rngs, x)
>>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> rngs['noise'] = jax.random.key(0)
>>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> # different output (key(1) vs key(0))
>>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1)
>>> del rngs['noise']
>>> # self.make_rng('noise') will default to using the 'params' RNG stream
>>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> # same output (key(0))
>>> np.testing.assert_allclose(out1, out2)
>>> # passing in a single key is equivalent to passing in {'params': key}
>>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0))
>>> # same output (key(0))
>>> np.testing.assert_allclose(out2, out3)
Args:
variables: A dictionary containing variables keyed by variable
collections. See :mod:`flax.core.variables` for more details about
variables.
*args: Named arguments passed to the specified apply method.
rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params"
PRNG sequence is used to initialize parameters.
method: A function to call apply on. This is generally a function in the
module. If provided, applies this method. If not provided, applies the
``__call__`` method of the module. A string can also be provided to
specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default, only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the specified apply method.
Returns:
If ``mutable`` is False, returns output. If any collections are
mutable, returns ``(output, vars)``, where ``vars`` are is a dict
of the modified collections.
"""
Module._module_checks(self)
if rngs is not None and not isinstance(rngs, dict):
if not core.scope._is_valid_rng(rngs):
raise errors.InvalidRngError(
'RNGs should be of shape (2,) or PRNGKey in Module '
f'{self.__class__.__name__}, but rngs are: {rngs}'
)
rngs = {'params': rngs}
if isinstance(method, str):
attribute_name = method
method = getattr(self, attribute_name)
if not callable(method):
class_name = type(self).__name__
raise TypeError(
f"'{class_name}.{attribute_name}' must be a callable, got"
f' {type(method)}.'
)
# if the `method` string is a submodule, we create a lambda function
# that calls the submodule, forwarding all arguments.
if isinstance(method, Module):
method = lambda self, *args, **kwargs: getattr(self, attribute_name)(
*args, **kwargs
)
elif method is None:
method = self.__call__
method = _get_unbound_fn(method)
return apply(
method,
self,
mutable=mutable,
capture_intermediates=capture_intermediates,
)(variables, *args, **kwargs, rngs=rngs)
def init_with_output(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = DenyList('intermediates'),
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]:
"""Initializes a module method with variables and returns output and modified variables.
Args:
rngs: The rngs for the variable collections.
*args: Named arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method. A string can also be
provided to specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default, all collections except "intermediates"
are mutable.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the init function.
Returns:
``(output, vars)``, where ``vars`` are is a dict of the modified
collections.
"""
Module._module_checks(self)
if not isinstance(rngs, dict):
if not core.scope._is_valid_rng(rngs):
raise errors.InvalidRngError(
'RNGs should be of shape (2,) or PRNGKey in Module '
f'{self.__class__.__name__}, but rngs are: {rngs}'
)
rngs = {'params': rngs}
if isinstance(method, str):
attribute_name = method
method = getattr(self, attribute_name)
if not callable(method):
class_name = type(self).__name__
raise TypeError(
f"'{class_name}.{attribute_name}' must be a callable, got"
f' {type(method)}.'
)
elif method is None:
method = self.__call__
method = _get_unbound_fn(method)
return init_with_output(
method,
self,
mutable=mutable,
capture_intermediates=capture_intermediates,
)(rngs, *args, **kwargs)
def init(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = DenyList('intermediates'),
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Union[FrozenVariableDict, Dict[str, Any]]:
"""Initializes a module method with variables and returns modified variables.
``init`` takes as first argument either a single ``PRNGKey``, or a
dictionary mapping variable collections names to their ``PRNGKeys``, and
will call ``method`` (which is the module's ``__call__`` function by
default) passing ``*args`` and ``**kwargs``, and returns
a dictionary of initialized variables.
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> import numpy as np
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, train):
... x = nn.Dense(16)(x)
... x = nn.BatchNorm(use_running_average=not train)(x)
... x = nn.relu(x)
... return nn.Dense(1)(x)
>>> x = jnp.empty((1, 7))
>>> module = Foo()
>>> key = jax.random.key(0)
>>> variables = module.init(key, x, train=False)
If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'``
RNG stream. If you want to use a different RNG stream or need to use
multiple streams, you can pass a dictionary mapping each RNG stream name
to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)``
is called on an RNG stream name that isn't passed by the user, it will
default to using the ``'params'`` RNG stream.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(16)(x)
... x = nn.relu(x)
...
... other_variable = self.variable(
... 'other_collection',
... 'other_variable',
... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape),
... x,
... )
... x = x + other_variable.value
...
... return nn.Dense(1)(x)
>>> module = Foo()
>>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)}
>>> variables0 = module.init(rngs, x)
>>> rngs['other_rng'] = jax.random.key(0)
>>> variables1 = module.init(rngs, x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables0['params'], variables1['params']
... )
>>> # different other_variable (key(1) vs key(0))
>>> np.testing.assert_raises(
... AssertionError,
... np.testing.assert_allclose,
... variables0['other_collection']['other_variable'],
... variables1['other_collection']['other_variable'],
... )
>>> del rngs['other_rng']
>>> # self.make_rng('other_rng') will default to using the 'params' RNG stream
>>> variables2 = module.init(rngs, x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables1['params'], variables2['params']
... )
>>> # equivalent other_variable (key(0))
>>> np.testing.assert_allclose(
... variables1['other_collection']['other_variable'],
... variables2['other_collection']['other_variable'],
... )
>>> # passing in a single key is equivalent to passing in {'params': key}
>>> variables3 = module.init(jax.random.key(0), x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables2['params'], variables3['params']
... )
>>> # equivalent other_variable (key(0))
>>> np.testing.assert_allclose(
... variables2['other_collection']['other_variable'],
... variables3['other_collection']['other_variable'],
... )
Jitting ``init`` initializes a model lazily using only the shapes of the
provided arguments, and avoids computing the forward pass with actual
values. Example::
>>> module = nn.Dense(1)
>>> init_jit = jax.jit(module.init)
>>> variables = init_jit(jax.random.key(0), x)
``init`` is a light wrapper over ``apply``, so other ``apply`` arguments
like ``method``, ``mutable``, and ``capture_intermediates`` are also
available.
Args:
rngs: The rngs for the variable collections.
*args: Named arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method. A string can also be provided
to specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default all collections except "intermediates"
are mutable.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the init function.
Returns:
The initialized variable dict.
"""
Module._module_checks(self)
_, v_out = self.init_with_output(
rngs,
*args,
method=method,
mutable=mutable,
capture_intermediates=capture_intermediates,
**kwargs,
)
return v_out
def lazy_init(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Optional[Callable[..., Any]] = None,
mutable: CollectionFilter = DenyList('intermediates'),
**kwargs,
) -> FrozenVariableDict:
"""Initializes a module without computing on an actual input.
lazy_init will initialize the variables without doing unnecessary compute.
The input data should be passed as a ``jax.ShapeDtypeStruct`` which
specifies the shape and dtype of the input but no concrete data.
Example::
>>> model = nn.Dense(features=256)
>>> variables = model.lazy_init(
... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32))
The args and kwargs args passed to ``lazy_init`` can be a mix of
concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct)
values. Concrete values are only necessary for arguments that affect
the initialization of variables. For example, the model might expect
a keyword arg that enables/disables a subpart of the model.
In this case, an explicit value (True/Flase) should be passed otherwise
``lazy_init`` cannot infer which variables should be initialized.
Args:
rngs: The rngs for the variable collections.
*args: arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default all collections except "intermediates"
are mutable.
**kwargs: Keyword arguments passed to the init function.
Returns:
The initialized variable dict.
"""
Module._module_checks(self)
def lazy_wrapper(rngs, *args, **kwargs):
return self.init(rngs, *args, method=method, mutable=mutable, **kwargs)
return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs)
def variables(self) -> VariableDict:
"""Returns the variables in this module."""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.variables()
def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T:
"""Retrieves the value of a Variable.
Args:
col: the variable collection.
name: the name of the variable.
default: the default value to return if the variable does not exist in
this scope.
Returns:
The value of the input variable, of the default value if the variable
doesn't exist in this scope.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.get_variable(col, name, default)
def put_variable(self, col: str, name: str, value: Any):
"""Updates the value of the given variable if it is mutable, or an error otherwise.
Args:
col: the variable collection.
name: the name of the variable.
value: the new value of the variable.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
self.scope.put_variable(col, name, value)
def sow(self, col: str, name: str, value: Any) -> bool:
...
def sow(
self,
col: str,
name: str,
value: T,
reduce_fn: Callable[[K, T], K] = tuple_reduce,
init_fn: Callable[[], K] = tuple_init, # type: ignore
) -> bool:
...
def sow(
self,
col: str,
name: str,
value: T,
reduce_fn: Callable[[K, T], K] = tuple_reduce,
init_fn: Callable[[], K] = tuple_init, # type: ignore
) -> bool:
"""Stores a value in a collection.
Collections can be used to collect intermediate values without
the overhead of explicitly passing a container through each Module call.
If the target collection is not mutable ``sow`` behaves like a no-op
and returns ``False``.
Example::
>>> import jax
>>> import jax.numpy as jnp
>>> import flax.linen as nn
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... self.sow('intermediates', 'h', h)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> model = Foo()
>>> variables = model.init(jax.random.key(0), x)
>>> y, state = model.apply(variables, x, mutable=['intermediates'])
>>> print(state['intermediates'])
{'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)}
By default the values are stored in a tuple and each stored value
is appended at the end. This way all intermediates can be tracked when
the same module is called multiple times. Alternatively, a custom
init/reduce function can be passed::
>>> class Foo2(nn.Module):
... @nn.compact
... def __call__(self, x):
... init_fn = lambda: 0
... reduce_fn = lambda a, b: a + b
... self.sow('intermediates', 'h', x,
... init_fn=init_fn, reduce_fn=reduce_fn)
... self.sow('intermediates', 'h', x * 2,
... init_fn=init_fn, reduce_fn=reduce_fn)
... return x
>>> x = jnp.ones((1, 1))
>>> model = Foo2()
>>> variables = model.init(jax.random.key(0), x)
>>> y, state = model.apply(
... variables, x, mutable=['intermediates'])
>>> print(state['intermediates'])
{'h': Array([[3.]], dtype=float32)}
Args:
col: The name of the variable collection.
name: The name of the variable.
value: The value of the variable.
reduce_fn: The function used to combine the existing value with the new
value. The default is to append the value to a tuple.
init_fn: For the first value stored, ``reduce_fn`` will be passed the result
of ``init_fn`` together with the value to be stored. The default is an
empty tuple.
Returns:
``True`` if the value has been stored successfully, ``False`` otherwise.
"""
if self.scope is None:
raise ValueError("Can't store variables on unbound modules")
if not self.scope.is_mutable_collection(col):
return False
if self.scope.has_variable(col, name):
xs = self.scope.get_variable(col, name)
else:
self.scope.reserve(name, col)
self._state.children[name] = col
xs = init_fn()
xs = reduce_fn(xs, value)
self.scope.put_variable(col, name, xs)
return True
def perturb(
self, name: str, value: T, collection: str = 'perturbations'
) -> T:
"""Add an zero-value variable ('perturbation') to the intermediate value.
The gradient of ``value`` would be the same as the gradient of this
perturbation variable. Therefore, if you define your loss function with
both params and perturbations as standalone arguments, you can get the
intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation
argument.
.. note::
This is an experimental API and may be tweaked later for better
performance and usability.
At its current stage, it creates extra dummy variables that occupies extra
memory space. Use it only to debug gradients in training.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(3)(x)
... x = self.perturb('dense3', x)
... return nn.Dense(2)(x)
>>> def loss(variables, inputs, targets):
... preds = model.apply(variables, inputs)
... return jnp.square(preds - targets).mean()
>>> x = jnp.ones((2, 9))
>>> y = jnp.ones((2, 2))
>>> model = Foo()
>>> variables = model.init(jax.random.key(0), x)
>>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y)
>>> print(intm_grads['perturbations']['dense3'])
[[-1.456924 -0.44332537 0.02422847]
[-1.456924 -0.44332537 0.02422847]]
If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op
so you can easily disable the behavior when not needed::
>>> model.apply(variables, x) # works as expected
Array([[-1.0980128 , -0.67961735],
[-1.0980128 , -0.67961735]], dtype=float32)
>>> model.apply({'params': variables['params']}, x) # behaves like a no-op
Array([[-1.0980128 , -0.67961735],
[-1.0980128 , -0.67961735]], dtype=float32)
>>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y)
>>> 'perturbations' not in intm_grads
True
"""
if self.scope is None:
raise ValueError("Can't store variables on unbound modules")
if self.is_mutable_collection(collection):
if not self.scope.has_variable(collection, name):
self.scope.reserve(name, collection)
self._state.children[name] = collection
self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore
if collection in self.scope.root._variables:
if self.scope.has_variable(collection, name):
value += self.scope.get_variable(collection, name) # type: ignore
else:
raise ValueError(f"Perturbation collection {collection} present, but "
f"missing perturbation variable {name}")
return value
def tabulate(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
depth: Optional[int] = None,
show_repeated: bool = False,
mutable: CollectionFilter = DenyList('intermediates'),
console_kwargs: Optional[Mapping[str, Any]] = None,
table_kwargs: Mapping[str, Any] = MappingProxyType({}),
column_kwargs: Mapping[str, Any] = MappingProxyType({}),
compute_flops: bool = False,
compute_vjp_flops: bool = False,
**kwargs,
) -> str:
"""Creates a summary of the Module represented as a table.
This method has the same signature and internally calls ``Module.init``,
but instead of returning the variables, it returns the string summarizing
the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward
computation without consuming any FLOPs or allocating memory.
Additional arguments can be passed into the ``console_kwargs`` argument, for
example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments,
see:
https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> # print(Foo().tabulate(
>>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True))
This gives the following output::
Foo Summary
┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃
┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │
│ │ │ │ │ │ │ float32[4] │
│ │ │ │ │ │ │ kernel: │
│ │ │ │ │ │ │ float32[9,4] │
│ │ │ │ │ │ │ │
│ │ │ │ │ │ │ 40 (160 B) │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │
│ │ │ │ │ │ │ float32[2] │
│ │ │ │ │ │ │ kernel: │
│ │ │ │ │ │ │ float32[4,2] │
│ │ │ │ │ │ │ │
│ │ │ │ │ │ │ 10 (40 B) │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ │ │ │ │ │ Total │ 50 (200 B) │
└─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘
Total Parameters: 50 (200 B)
**Note**: rows order in the table does not represent execution order,
instead it aligns with the order of keys in ``variables`` which are sorted
alphabetically.
**Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable.
Args:
rngs: The rngs for the variable collections as passed to ``Module.init``.
*args: The arguments to the forward computation.
depth: controls how many submodule deep the summary can go. By default,
its ``None`` which means no limit. If a submodule is not shown because of
the depth limit, its parameter count and bytes will be added to the row
of its first shown ancestor such that the sum of all rows always adds
up to the total number of parameters of the Module.
show_repeated: If ``True``, repeated calls to the same module will be shown
in the table, otherwise only the first call will be shown. Default is
``False``.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default, all collections except 'intermediates'
are mutable.
console_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.console.Console`` when rendering the table.
Default arguments are ``{'force_terminal': True, 'force_jupyter':
False}``.
table_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.table.Table`` constructor.
column_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.table.Table.add_column`` when adding columns to
the table.
compute_flops: whether to include a ``flops`` column in the table listing
the estimated FLOPs cost of each module forward pass. Does incur actual
on-device computation / compilation / memory allocation, but still
introduces overhead for large modules (e.g. extra 20 seconds for a
Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5
seconds).
compute_vjp_flops: whether to include a ``vjp_flops`` column in the table
listing the estimated FLOPs cost of each module backward pass.
Introduces a compute overhead of about 2-3X of ``compute_flops``.
**kwargs: keyword arguments to pass to the forward computation.
Returns:
A string summarizing the Module.
"""
from flax.linen import summary
tabulate_fn = summary.tabulate(
self,
rngs,
depth=depth,
show_repeated=show_repeated,
mutable=mutable,
console_kwargs=console_kwargs,
table_kwargs=table_kwargs,
column_kwargs=column_kwargs,
compute_flops=compute_flops,
compute_vjp_flops=compute_vjp_flops,
)
return tabulate_fn(*args, **kwargs)
def module_paths(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
show_repeated: bool = False,
mutable: CollectionFilter = DenyList('intermediates'),
**kwargs,
) -> dict[str, 'Module']:
"""Returns a dictionary mapping module paths to module instances.
This method has the same signature and internally calls ``Module.init``,
but instead of returning the variables, it returns a dictionary mapping
module paths to unbounded copies of module instances that were used
at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward
computation without consuming any FLOPs or allocating memory.
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> modules = Foo().module_paths(jax.random.key(0), x)
>>> print({
... p: type(m).__name__ for p, m in modules.items()
... })
{'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'}
Args:
rngs: The rngs for the variable collections as passed to ``Module.init``.
*args: The arguments to the forward computation.
show_repeated: If ``True``, repeated calls to the same module will be
shown in the table, otherwise only the first call will be shown.
Default is ``False``.
mutable: Can be bool, str, or list. Specifies which collections should
be treated as mutable: ``bool``: all/no collections are mutable.
``str``: The name of a single mutable collection. ``list``: A list of
names of mutable collections. By default, all collections except
'intermediates' are mutable.
**kwargs: keyword arguments to pass to the forward computation.
Returns:
A dict`ionary mapping module paths to module instances.
"""
from flax.linen import summary
table = summary._get_module_table(
module=self,
depth=None,
show_repeated=show_repeated,
compute_flops=False,
compute_vjp_flops=False,
)(rngs, *args, **kwargs, mutable=mutable)
return {'/'.join(row.path): row.module_copy for row in table}
The provided code snippet includes necessary dependencies for implementing the `compact_name_scope` function. Write a Python function `def compact_name_scope(fun: _CallableT) -> _CallableT` to solve the following problem:
Creates compact submodules from a method. This is a decorator that allows you to define compact submodules from a method. It's intention is to make it easier to port code Haiku code to Flax by providing the same functionality. Example:: >>> import flax.linen as nn >>> import jax >>> import jax.numpy as jnp >>> from flax.core import pretty_repr ... >>> class Foo(nn.Module): ... @nn.compact_name_scope ... def up(self, x): ... return nn.Dense(3)(x) ... ... @nn.compact_name_scope ... def down(self, x): ... return nn.Dense(3)(x) ... ... def __call__(self, x): ... return self.up(x) + self.down(x) ... >>> module = Foo() >>> variables = module.init(jax.random.PRNGKey(0), jnp.ones((1, 2))) >>> params = variables['params'] >>> print(pretty_repr(jax.tree_util.tree_map(jnp.shape, params))) { down: { Dense_0: { bias: (3,), kernel: (2, 3), }, }, up: { Dense_0: { bias: (3,), kernel: (2, 3), }, }, } You can also use ``compact_name_scope`` inside ``@compact`` methods or even other ``compact_name_scope`` methods. Methods that are decorated with ``compact_name_scope`` can also be called directly from ``init`` or ``apply`` via the ``method`` argument:: >>> y_down = module.apply({'params': params}, jnp.ones((1, 2)), method='down') >>> y_down.shape (1, 3) Args: fun: The Module method to mark as compact_name_scope. Returns: The given function ``fun`` marked as compact_name_scope.
Here is the function:
def compact_name_scope(fun: _CallableT) -> _CallableT:
"""Creates compact submodules from a method.
This is a decorator that allows you to define compact submodules from a
method. It's intention is to make it easier to port code Haiku code to Flax
by providing the same functionality.
Example::
>>> import flax.linen as nn
>>> import jax
>>> import jax.numpy as jnp
>>> from flax.core import pretty_repr
...
>>> class Foo(nn.Module):
... @nn.compact_name_scope
... def up(self, x):
... return nn.Dense(3)(x)
...
... @nn.compact_name_scope
... def down(self, x):
... return nn.Dense(3)(x)
...
... def __call__(self, x):
... return self.up(x) + self.down(x)
...
>>> module = Foo()
>>> variables = module.init(jax.random.PRNGKey(0), jnp.ones((1, 2)))
>>> params = variables['params']
>>> print(pretty_repr(jax.tree_util.tree_map(jnp.shape, params)))
{
down: {
Dense_0: {
bias: (3,),
kernel: (2, 3),
},
},
up: {
Dense_0: {
bias: (3,),
kernel: (2, 3),
},
},
}
You can also use ``compact_name_scope`` inside ``@compact`` methods or even
other
``compact_name_scope`` methods. Methods that are decorated with
``compact_name_scope``
can also be called directly from ``init`` or ``apply`` via the ``method``
argument::
>>> y_down = module.apply({'params': params}, jnp.ones((1, 2)), method='down')
>>> y_down.shape
(1, 3)
Args:
fun: The Module method to mark as compact_name_scope.
Returns:
The given function ``fun`` marked as compact_name_scope.
"""
@functools.wraps(fun)
def compact_name_scope_wrapper(self: nn.Module, *args, **kwargs):
name = fun.__name__
if not hasattr(self, '_compact_name_scope_modules'):
raise ValueError(
f'Cannot call compact_name_scope method {name!r} on a Module that has not been '
f'setup. This is likely because you are calling {name!r} '
'from outside of init or apply.'
)
module = self._compact_name_scope_modules[name]
return module(*args, **kwargs)
compact_name_scope_wrapper.compact_name_scope = True # type: ignore[attr-defined]
compact_name_scope_wrapper.inner_fun = fun # type: ignore[attr-defined]
compact_name_scope_wrapper.nowrap = True # type: ignore[attr-defined]
return compact_name_scope_wrapper # type: ignore[return-value] | Creates compact submodules from a method. This is a decorator that allows you to define compact submodules from a method. It's intention is to make it easier to port code Haiku code to Flax by providing the same functionality. Example:: >>> import flax.linen as nn >>> import jax >>> import jax.numpy as jnp >>> from flax.core import pretty_repr ... >>> class Foo(nn.Module): ... @nn.compact_name_scope ... def up(self, x): ... return nn.Dense(3)(x) ... ... @nn.compact_name_scope ... def down(self, x): ... return nn.Dense(3)(x) ... ... def __call__(self, x): ... return self.up(x) + self.down(x) ... >>> module = Foo() >>> variables = module.init(jax.random.PRNGKey(0), jnp.ones((1, 2))) >>> params = variables['params'] >>> print(pretty_repr(jax.tree_util.tree_map(jnp.shape, params))) { down: { Dense_0: { bias: (3,), kernel: (2, 3), }, }, up: { Dense_0: { bias: (3,), kernel: (2, 3), }, }, } You can also use ``compact_name_scope`` inside ``@compact`` methods or even other ``compact_name_scope`` methods. Methods that are decorated with ``compact_name_scope`` can also be called directly from ``init`` or ``apply`` via the ``method`` argument:: >>> y_down = module.apply({'params': params}, jnp.ones((1, 2)), method='down') >>> y_down.shape (1, 3) Args: fun: The Module method to mark as compact_name_scope. Returns: The given function ``fun`` marked as compact_name_scope. |
22,658 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
The provided code snippet includes necessary dependencies for implementing the `_get_local_method_names` function. Write a Python function `def _get_local_method_names( cls: Any, exclude: Iterable[str] = () ) -> Tuple[str, ...]` to solve the following problem:
Gets method names of a class, excluding class and static methods. Args: cls: The class to get method names for. exclude: Names to exclude from output. Returns: A list of method names.
Here is the function:
def _get_local_method_names(
cls: Any, exclude: Iterable[str] = ()
) -> Tuple[str, ...]:
"""Gets method names of a class, excluding class and static methods.
Args:
cls: The class to get method names for.
exclude: Names to exclude from output.
Returns:
A list of method names.
"""
true_methods = set()
for m in cls.__dict__:
if callable(cls.__dict__[m]) and not inspect.isclass(
cls.__dict__[m]
): # pytype: disable=not-supported-yet
mtype = type(cls.__dict__[m])
if mtype != staticmethod and mtype != classmethod:
true_methods.add(m)
return tuple(true_methods.difference(set(exclude))) | Gets method names of a class, excluding class and static methods. Args: cls: The class to get method names for. exclude: Names to exclude from output. Returns: A list of method names. |
22,659 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
The provided code snippet includes necessary dependencies for implementing the `_get_local_descriptor_names` function. Write a Python function `def _get_local_descriptor_names( cls: Any, exclude: Iterable[str] = () ) -> Tuple[str, ...]` to solve the following problem:
Gets descriptor names of a class. Args: cls: The class to get property names for. exclude: Names to exclude from output. Returns: A list of property names.
Here is the function:
def _get_local_descriptor_names(
cls: Any, exclude: Iterable[str] = ()
) -> Tuple[str, ...]:
"""Gets descriptor names of a class.
Args:
cls: The class to get property names for.
exclude: Names to exclude from output.
Returns:
A list of property names.
"""
true_properties = set()
for m, attr in cls.__dict__.items():
if not callable(attr) and (
hasattr(attr, '__get__')
or hasattr(attr, '__set__')
or hasattr(attr, '__delete__')
):
mtype = type(attr)
if mtype != staticmethod and mtype != classmethod:
true_properties.add(m)
return tuple(true_properties.difference(set(exclude))) | Gets descriptor names of a class. Args: cls: The class to get property names for. exclude: Names to exclude from output. Returns: A list of property names. |
22,660 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
class DescriptorWrapper:
pass
def create_descriptor_wrapper(descriptor: Descriptor):
"""Creates a descriptor wrapper that calls a get_fn on the descriptor."""
class _DescriptorWrapper(DescriptorWrapper):
"""A descriptor that can wrap any descriptor."""
if hasattr(descriptor, '__isabstractmethod__'):
__isabstractmethod__ = descriptor.__isabstractmethod__
def __init__(self, wrapped: Descriptor):
self.wrapped = wrapped
# conditionally define descriptor methods
if hasattr(descriptor, '__get__'):
def __get__(self, *args, **kwargs):
# here we will catch internal AttributeError and re-raise it as a
# more informative and correct error message.
try:
return self.wrapped.__get__(*args, **kwargs)
except AttributeError as e:
raise errors.DescriptorAttributeError() from e
if hasattr(descriptor, '__set__'):
def __set__(self, *args, **kwargs):
return self.wrapped.__set__(*args, **kwargs)
if hasattr(descriptor, '__delete__'):
def __delete__(self, *args, **kwargs):
return self.wrapped.__delete__(*args, **kwargs)
if hasattr(descriptor, '__set_name__'):
def __set_name__(self, *args, **kwargs):
self.wrapped.__set_name__(*args, **kwargs)
def __getattr__(self, name):
if 'wrapped' not in vars(self):
raise AttributeError()
return getattr(self.wrapped, name)
return _DescriptorWrapper(descriptor)
The provided code snippet includes necessary dependencies for implementing the `wrap_descriptor_once` function. Write a Python function `def wrap_descriptor_once(descriptor) -> 'DescriptorWrapper'` to solve the following problem:
Wraps a descriptor to give better error messages. Args: descriptor: User-defined Module attribute descriptor. Returns: Wrapped descriptor.
Here is the function:
def wrap_descriptor_once(descriptor) -> 'DescriptorWrapper':
"""Wraps a descriptor to give better error messages.
Args:
descriptor: User-defined Module attribute descriptor.
Returns:
Wrapped descriptor.
"""
# Don't rewrap descriptors.
if isinstance(descriptor, DescriptorWrapper):
return descriptor
return create_descriptor_wrapper(descriptor) | Wraps a descriptor to give better error messages. Args: descriptor: User-defined Module attribute descriptor. Returns: Wrapped descriptor. |
22,661 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
The provided code snippet includes necessary dependencies for implementing the `_wrap_hash` function. Write a Python function `def _wrap_hash(hash_fn: Callable[..., Any]) -> Callable[..., Any]` to solve the following problem:
Wraps a hash function with some check for Flax Modules.
Here is the function:
def _wrap_hash(hash_fn: Callable[..., Any]) -> Callable[..., Any]:
"""Wraps a hash function with some check for Flax Modules."""
@functools.wraps(hash_fn)
def wrapped(self):
if self.scope is not None:
raise TypeError("Can't call __hash__ on modules that hold variables.")
try:
hash_value = hash_fn(self)
except TypeError as exc:
raise TypeError(
'Failed to hash Flax Module. '
'The module probably contains unhashable attributes. '
f'Module={self}'
) from exc
return hash_value
return wrapped | Wraps a hash function with some check for Flax Modules. |
22,662 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
def _map_over_modules_in_tree(fn, tree_or_leaf):
"""Helper for mapping function over submodules."""
dict_or_leaf = serialization.to_state_dict(tree_or_leaf)
if not isinstance(dict_or_leaf, dict) or not dict_or_leaf:
return fn('', tree_or_leaf)
else:
flat_dict = traverse_util.flatten_dict(dict_or_leaf, keep_empty_nodes=True)
mapped_flat_dict = {
k: fn('_' + '_'.join(k), v) for k, v in _sorted_items(flat_dict)
}
return serialization.from_state_dict(
tree_or_leaf, traverse_util.unflatten_dict(mapped_flat_dict)
)
def _freeze_attr(val: Any) -> Any:
"""Recursively wrap the given attribute `var` in ``FrozenDict``."""
if isinstance(val, (dict, FrozenDict)):
return FrozenDict({k: _freeze_attr(v) for k, v in val.items()})
elif isinstance(val, tuple):
# Special case namedtuples and special JAX tuple structures otherwise they
# would be downgraded to normal tuples.
if hasattr(val, '_fields') or type(val).__name__ == 'PartitionSpec':
return type(val)(*[_freeze_attr(v) for v in val])
else:
return tuple(_freeze_attr(v) for v in val)
elif isinstance(val, list):
return tuple(_freeze_attr(v) for v in val)
else:
return val
class Module(ModuleBase):
"""Base class for all neural network modules.
Layers and models should subclass this class.
All Flax Modules are Python 3.7
`dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since
dataclasses take over ``__init__``, you should instead override :meth:`setup`,
which is automatically called to initialize the module.
Modules can contain submodules, and in this way can be nested in a tree
structure. Submodels can be assigned as regular attributes inside the
:meth:`setup` method.
You can define arbitrary "forward pass" methods on your Module subclass.
While no methods are special-cased, ``__call__`` is a popular choice because
it allows you to use module instances as if they are functions::
>>> from flax import linen as nn
>>> from typing import Tuple
>>> class Module(nn.Module):
... features: Tuple[int, ...] = (16, 4)
... def setup(self):
... self.dense1 = nn.Dense(self.features[0])
... self.dense2 = nn.Dense(self.features[1])
... def __call__(self, x):
... return self.dense2(nn.relu(self.dense1(x)))
Optionally, for more concise module implementations where submodules
definitions are co-located with their usage, you can use the
:meth:`compact` wrapper.
"""
if typing.TYPE_CHECKING:
name: Optional[str] = module_field(kw_only=True, default=None)
parent: Union['Module', _Sentinel, None] = module_field(
kw_only=True, default=None
)
def __init__(self, *args, **kwargs):
# this stub makes sure pytype accepts constructor arguments.
pass
def __call__(self, *args, **kwargs) -> Any:
# this stub allows pytype to accept Modules as Callables.
pass
def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None:
"""Automatically initializes all subclasses as custom dataclasses."""
super().__init_subclass__(**kwargs)
# All Flax Modules are dataclasses. We force this convention since
# it encourages the stateless behavior needed to clone module instances for
# functional transformation. Instead of using a python metaclass, we
# automatically transform Modules into dataclasses at subclass creation
# time, and we set the last dataclass arguments to `parent` and `name`.
cls._customized_dataclass_transform(kw_only)
# We wrap user-defined methods including setup and __call__ to enforce
# a number of different checks and to provide clear error messages.
cls._verify_single_or_no_compact()
cls._find_compact_name_scope_methods()
cls._wrap_module_attributes()
# Set empty class defaults.
cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined]
cls.scope: Optional[Scope] = None # type: ignore
# Handles weak referencing of parent Modules to prevent reference cycles.
cls._parent_ref = None # type: ignore[attr-defined]
cls.parent = ParentDescriptor() # type: ignore[assignment]
def _customized_dataclass_transform(cls, kw_only: bool):
"""Transforms `cls` into a dataclass, with custom additional behavior.
1. Inject `parent` and `name` fields. (If they are already present,
then check that they have the expected types.)
2. Set compare, hash, and repr to False for non-init fields.
3. Generate a hash function (if not provided by cls).
"""
# Check reserved attributes have expected type annotations.
annotations = dict(cls.__dict__.get('__annotations__', {}))
if annotations.get('parent', _ParentType) != _ParentType:
raise errors.ReservedModuleAttributeError(annotations)
if annotations.get('name', str) not in ('str', str, Optional[str]):
raise errors.ReservedModuleAttributeError(annotations)
# any non-init field will only be set in setup
# During __hash__ and __eq__ the field is not set yet
# so it should not be used in compare, hash or repr.
for field in annotations:
field_meta = getattr(cls, field, None)
if isinstance(field_meta, dataclasses.Field) and not field_meta.init:
field_meta.compare = False
field_meta.hash = False
field_meta.repr = False
extra_fields = [
(
'parent',
_ParentType,
kw_only_dataclasses.field(
repr=False, default=_unspecified_parent, kw_only=True
),
),
(
'name',
Optional[str],
kw_only_dataclasses.field(default=None, kw_only=True),
),
]
if kw_only:
if tuple(sys.version_info)[:3] >= (3, 10, 0):
for (
name,
annotation, # pytype: disable=invalid-annotation
default,
) in extra_fields:
setattr(cls, name, default)
cls.__annotations__[name] = annotation
dataclasses.dataclass( # type: ignore[call-overload]
unsafe_hash='__hash__' not in cls.__dict__,
repr=False,
kw_only=True,
)(cls)
else:
raise TypeError('`kw_only` is not available before Py 3.10.')
else:
# Now apply dataclass transform (which operates in-place).
# Do generate a hash function only if not provided by the class.
kw_only_dataclasses.dataclass(
cls,
unsafe_hash='__hash__' not in cls.__dict__,
repr=False,
extra_fields=extra_fields,
) # pytype: disable=wrong-keyword-args
cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign]
def _verify_single_or_no_compact(cls):
"""Statically verifies that at most a single method is labelled compact."""
methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]
n_compact_fns = len(
[
method_name
for method_name in methods
if hasattr(getattr(cls, method_name), 'compact')
]
)
if n_compact_fns > 1:
raise errors.MultipleMethodsCompactError()
def _find_compact_name_scope_methods(cls):
"""Finds all compact_name_scope methods in the class."""
methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]
compact_name_scope_fns = tuple(
method_name
for method_name in methods
if hasattr(getattr(cls, method_name), 'compact_name_scope')
)
cls._compact_name_scope_methods = compact_name_scope_fns
def _wrap_module_attributes(cls):
"""Wraps user-defined non-inherited methods and descriptors with state
management functions.
"""
# wrap methods
method_exclusions = [f.name for f in dataclasses.fields(cls)] + [
'__eq__',
'__repr__',
'__init__',
'__hash__',
'__post_init__',
]
for key in _get_local_method_names(cls, exclude=method_exclusions):
method = getattr(cls, key)
if hasattr(method, 'nowrap'):
continue
setattr(cls, key, wrap_method_once(method))
# wrap descriptors
descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [
'parent',
'__dict__',
]
for key in _get_local_descriptor_names(cls, descriptor_exclusions):
# don't use getattr here, since it will call the descriptor
descriptor = cls.__dict__[key]
if hasattr(descriptor, 'nowrap'):
continue
setattr(cls, key, wrap_descriptor_once(descriptor))
return cls
def _call_wrapped_method(self, fun, args, kwargs):
"""Calls a wrapped method.
This function is responsible for setting up the thread local state
correctly before calling the method and cleaning up afterwards.
This includes storing intermediates, setup of the compact scope,
and making sure setup is called before any other method.
Args:
fun: The wrapped method.
args: Named arguments passed to ``fun``.
kwargs: Keyword arguments passed to ``fun``.
Returns:
The results of calling ``fun``.
"""
is_compact_method = hasattr(fun, 'compact')
fun_name = _get_fn_name(fun)
is_setup_method = fun_name == 'setup'
add_call_info = not is_setup_method and len(_context.call_info_stack) > 0
# We lazily call setup() only when needed.
if is_setup_method:
if self.scope is None:
raise errors.CallSetupUnboundModuleError()
is_recurrent = self._state.in_setup
self._state.in_setup = True
else:
self._try_setup()
if is_compact_method:
if self.scope is None:
raise errors.CallCompactUnboundModuleError()
is_recurrent = self._state.in_compact_method
self._state.in_compact_method = True
_context.module_stack.append(self)
try:
# get call info
if add_call_info:
assert self.scope is not None
call_index = _context.call_info_stack[-1].get_call_index()
if _global_interceptor_stack:
run_fun = functools.partial(run_interceptors, fun)
else:
run_fun = fun
# call method
if _use_named_call:
with jax.named_scope(_derive_profiling_name(self, fun)):
y = run_fun(self, *args, **kwargs)
else:
y = run_fun(self, *args, **kwargs)
if _context.capture_stack:
filter_fn = _context.capture_stack[-1]
if filter_fn and filter_fn(self, fun_name):
self.sow('intermediates', fun_name, y)
if add_call_info:
_args, _kwargs, _y = flax.linen.summary._represent_tree(
(args, kwargs, y)
)
_context.call_info_stack[-1].calls.append(
_CallInfo(
call_index,
self.path,
self.clone(),
self.scope.rngs,
self.scope.mutable,
fun.__name__,
_args,
_kwargs,
_y,
)
)
return y
finally:
_context.module_stack.pop()
if is_compact_method:
object.__setattr__(self, 'scope', self.scope.rewound())
# setup or compact calls can be recurrent for example due to super calls
# resetting the state would cause is compact/setup method
# to be set to False prematurely.
if (is_compact_method or is_setup_method) and not is_recurrent:
self._state.reset()
def __setattr__(self, name: str, val: Any):
"""Sets an attribute on this Module.
We overload setattr solely to support pythonic naming via assignment of
submodules in the special :meth:`setup` function::
self.submodule_name = MyModule(...)
We also support lists and other general pytrees, e.g.::
self.submodules = [MyModule0(..), MyModule1(..), ...]
Args:
name: Attribute to set.
val: Value of the attribute.
"""
fields = self.__dataclass_fields__ # pytype: disable=attribute-error
is_dataclass_attr = name in fields and fields[name].init
if not self._state.in_setup:
if not self._state.is_initialized:
# Setting attributes before end of Module.__post_init__()
object.__setattr__(self, name, val)
return
else:
# We're past all initialization and setup logic:
# Raises a TypeError just like frozen python dataclasses.
raise errors.SetAttributeFrozenModuleError(
self.__class__.__name__, name, val
)
# We're inside the setup() method:
if is_dataclass_attr:
# These names are specified as dataclass fields. They should not be
# initialized within the setup() method, but can be modified freely
# before it.
raise errors.SetAttributeInModuleSetupError()
# Values (that may be variables or submodules) are being defined and
# attached in setup(), we run some extra logic in that case.
self._register_submodules(name, val)
def __getattr__(self, name: str) -> Any:
"""Call setup() before getting any setup-defined attributes."""
# We don't want to return anything for python copy / pickle methods.
if name in _UNDEFINED_COPY_PICKLE_METHODS:
raise AttributeError()
self._try_setup()
if name in self.__dict__:
return self.__dict__[name]
else:
msg = f'"{self.__class__.__name__}" object has no attribute "{name}".'
if self.scope is None:
msg += (
f' If "{name}" is defined in \'.setup()\', remember these fields '
"are only accessible from inside 'init' or 'apply'."
)
raise AttributeError(msg)
def __dir__(self) -> List[str]:
"""Call setup() before listing attributes."""
self._try_setup()
return object.__dir__(self) # type: ignore
def __post_init__(self) -> None:
# DO NOT REMOVE - Marker for internal logging.
# In dataclasses, __init__ is overridden to process dataclass arguments,
# and __post_init__ is called immediately afterwards. Here, depending on the
# type of `parent` passed to initialize the Module, we either defer
# initialization, attach this Module as a submodule of a parent, or bind
# this Module at the top-level to variables and rngs.
object.__setattr__(self, '_id', uuid())
object.__setattr__(self, '_state', _ModuleInternalState())
# Typically we set the parent based on the dynamic module context.
if self.parent is _unspecified_parent: # pytype: disable=attribute-error
object.__setattr__(self, 'parent', _context.module_stack[-1])
# Initialization is deferred for top level Modules or any other "orphan"
# Modules until attachment by __setattr__ i.e. MyModule(..., parent=None)
if self.parent is None:
return
# Register submodule on parent Module.
if isinstance(self.parent, Module):
# When initializing an unnamed Module inside setup()
# initialization is deferred until attachment by __setattr__
# i.e. self.mymodule = MyModule(...)
self.name: Optional[str]
if (
self.parent._state.in_setup and self.name is None
): # pytype: disable=attribute-error
return
if not self.parent._initialization_allowed:
raise errors.AssignSubModuleError(self.__class__.__name__)
# Autonaming of submodules.
if self.name is None: # pytype: disable=attribute-error
prefix = f'{self.__class__.__name__}'
cursor = self.parent._state.autoname_cursor.get(prefix, 0)
self.name = f'{prefix}_{cursor}'
self.parent._state.autoname_cursor[prefix] = cursor + 1
# Allow scope aliasing under transforms for submodules defined in setup.
reuse_scopes = (
self.parent._state.in_setup
and self.parent._state.setup_called == SetupState.TRANSFORMED
)
# Perform name-collision check.
if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes):
parent_class = self.parent.__class__.__name__
raise errors.NameInUseError('submodule', self.name, parent_class)
# Finalize attachment to parent and scope initialization.
self.parent._state.children[self.name] = self
assert self.parent.scope is not None
object.__setattr__(
self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes)
)
# Top-level invocation with a functional Scope.
elif isinstance(self.parent, Scope):
object.__setattr__(self, 'scope', self.parent)
else:
raise ValueError('parent must be None, Module or Scope')
# eagerly bind submodules if scope is available
if self.scope is not None:
for field in dataclasses.fields(self):
if field.name not in ('parent', 'name') and field.init:
self._register_submodules(field.name, getattr(self, field.name))
self._state.is_initialized = True
def __repr__(self) -> str:
return _module_repr(self)
def setup(self) -> None:
"""Initializes a Module lazily (similar to a lazy ``__init__``).
``setup`` is called once lazily on a module instance when a module
is bound, immediately before any other methods like ``__call__`` are
invoked, or before a ``setup``-defined attribute on ``self`` is accessed.
This can happen in three cases:
1. Immediately when invoking :meth:`apply`, :meth:`init` or
:meth:`init_and_output`.
2. Once the module is given a name by being assigned to an attribute of
another module inside the other module's ``setup`` method
(see :meth:`__setattr__`)::
>>> class MyModule(nn.Module):
... def setup(self):
... submodule = nn.Conv(...)
... # Accessing `submodule` attributes does not yet work here.
... # The following line invokes `self.__setattr__`, which gives
... # `submodule` the name "conv1".
... self.conv1 = submodule
... # Accessing `submodule` attributes or methods is now safe and
... # either causes setup() to be called once.
3. Once a module is constructed inside a method wrapped with
:meth:`compact`, immediately before another method is called or
``setup`` defined attribute is accessed.
"""
pass
def _register_submodules(self, name, val):
"""Registers a submodule."""
assert self.scope, 'Trying to register submodules on unbound scope.'
root = self.scope.root
cache = _caches.get(root, weakref.WeakValueDictionary())
_caches[root] = cache
queue = []
preserve_adopted_names = config.flax_preserve_adopted_names
if hasattr(type(self), 'preserve_adopted_names'):
preserve_adopted_names = type(self).preserve_adopted_names
def adopt_attr_modules(cache, queue, suffix, subvalue):
if isinstance(subvalue, Module):
current_name = subvalue.name
adopted_name = None
if subvalue.parent is None:
# Preserve sharing-by-reference relationships during adoption
# via cache keyed on unique instance ids.
key = subvalue._id
# Module was passed from outside. It needs to be cloned.
# Outside modules are named by attachment, not an outer name,
# UNLESS we're using new adopted name policy, in which case an existing
# name will be used, as is often supplied by config systems.
if preserve_adopted_names:
adopted_name = object.__getattribute__(subvalue, 'name')
if key in cache:
subvalue = cache[key]
else:
subvalue = subvalue.clone(name=None)
cache[key] = subvalue
if subvalue.name is None:
object.__setattr__(subvalue, 'parent', self)
if adopted_name is None:
adopted_name = (
f'{name}{suffix}'
if not isinstance(subvalue, CompactNameScope)
else current_name
)
object.__setattr__(subvalue, 'name', adopted_name)
queue.append(subvalue)
return subvalue
val = _freeze_attr(
_map_over_modules_in_tree(
functools.partial(adopt_attr_modules, cache, queue), val
)
)
object.__setattr__(self, name, val)
for x in queue:
x.__post_init__()
def _try_setup(self, shallow: bool = False) -> None:
"""Tries to setup module if scope is available and setup has not been called yet."""
if (
self.scope
and not self._state.in_setup
and self._state.setup_called != SetupState.DONE
):
try:
self._state.in_setup = True
# A shallow setup will only register attribute submodules but it does
# not call the user's setup. This avoids running before a
# transformation.
for field in dataclasses.fields(self):
if field.name not in ('parent', 'name') and field.init:
self._register_submodules(field.name, getattr(self, field.name))
if not shallow:
self.setup()
# create NonTransparent Modules
self._compact_name_scope_modules = {
name: CompactNameScope(
getattr(type(self), name).inner_fun, lambda: self, name=name
)
for name in self._compact_name_scope_methods
}
# We run static checks abstractly once for setup before any transforms
# to detect name collisions and other python errors.
elif self._state.setup_called == SetupState.NEW:
self._validate_setup()
finally:
self._state.in_setup = False
if not shallow:
self._state.setup_called = SetupState.DONE
def _validate_setup(self) -> None:
"""Abstractly evaluates setup only to run static checks."""
def run_setup_only(x):
wrapped_id = wrap_method_once(lambda m, x: x)
with TestScope({}, rngs={}, mutable=True).temporary() as root:
return wrapped_id(self.clone(parent=root), x)
_ = jax.eval_shape(run_setup_only, 0)
def _name_taken(
self,
name: str,
reuse_scopes: bool = False,
collection: Optional[str] = None,
) -> bool:
assert self.scope is not None
if reuse_scopes:
return False
return self.scope.name_reserved(name, collection)
def _initialization_allowed(self):
return (
not self._state.is_initialized # allow eager attachment in post-init
or self._state.in_setup
or self._state.in_compact_method
)
def path(self):
if self.scope is None:
raise ValueError("Can't access module paths on unbound modules.")
return self.scope.path
def clone(
self: M,
*,
parent: Optional[Union[Scope, 'Module', _Sentinel]] = None,
_deep_clone: Union[bool, weakref.WeakValueDictionary] = False,
_reset_names: bool = False,
**updates,
) -> M:
"""Creates a clone of this Module, with optionally updated arguments.
NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used
primarily for internal routines, and ``copy`` offers simpler arguments and
better defaults.
Args:
parent: The parent of the clone. The clone will have no parent if no
explicit parent is specified.
_deep_clone: A boolean or a weak value dictionary to control deep cloning
of submodules. If True, submodules will be cloned recursively. If a weak
value dictionary is passed, it will be used to cache cloned submodules.
This flag is used by init/apply/bind to avoid scope leakage.
_reset_names: If True, ``name=None`` is also passed to submodules when
cloning. Resetting names in submodules is necessary when calling ``.unbind``.
**updates: Attribute updates.
Returns:
A clone of the this Module with the updated attributes and parent.
"""
attrs = {
f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init
}
attrs.update(parent=parent, **updates)
# Here we implement deep cloning of submodules, this is necessary to avoid scope leakage
# from external submodules into init/apply/bind while preserving sharing-by-reference
# relationships between submodules.
if _deep_clone != False:
# We use a weak value dictionary to cache cloned submodules. When a shared
# submodule is cloned, its only cloned once else its fetched from the cache.
cache = (
weakref.WeakValueDictionary()
if isinstance(_deep_clone, bool)
else _deep_clone
)
def clone_fn(m: Module) -> Module:
if hasattr(m, '_id'):
key = m._id
if key in cache:
return cache[key]
else:
if _reset_names:
clone = m.clone(
_deep_clone=cache, _reset_names=_reset_names, name=None
)
else:
clone = m.clone(_deep_clone=cache)
cache[key] = clone
return clone
else:
# If the module doesn't have an _id attribute it could be a mock object
# so we return it as is.
return m
# _map_submodules will map over all submodules inside attrs
# value here can be any pytree, non-module values are ignored
for field_name, value in attrs.items():
if field_name == 'parent':
continue
attrs[field_name] = _map_submodules(clone_fn, value)
module = self.__class__(**attrs)
return module
def copy(
self: M,
*,
parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent,
name: Optional[str] = None,
**updates,
) -> M:
"""Creates a copy of this Module, with optionally updated arguments.
Args:
parent: The parent of the copy. By default the current module is taken
as parent if not explicitly specified.
name: A new name for the copied Module, by default a new automatic name
will be given.
**updates: Attribute updates.
Returns:
A copy of the this Module with the updated name, parent, and attributes.
"""
return self.clone(
parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates
)
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> Variable[meta.AxisMetadata[T]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
"""Declares and returns a variable in this Module.
See :mod:`flax.core.variables` for more information. See also :meth:`param`
for a shorthand way to define read-only variables in the "params"
collection.
Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be
passed on explicitly::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(4)(x)
... key = self.make_rng('stats')
... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape)
... ...
... return x * mean.value
>>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}}
In the example above, the function ``lecun_normal`` expects two arguments:
``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats``
has to be provided explicitly when calling :meth:`init` and :meth:`apply`.
Args:
col: The variable collection name.
name: The variable name.
init_fn: The function that will be called to compute the initial value of
this variable. This function will only be called the first time this
variable is used in this module. If None, the variable must already be
initialized otherwise an error is raised.
*init_args: The positional arguments to pass to init_fn.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: The key-word arguments to pass to init_fn
Returns:
A :class:`flax.core.variables.Variable` that can be read or set via
".value" attribute. Throws an error if the variable exists already.
"""
if not self._initialization_allowed:
raise ValueError(
'Variables must be initialized in `setup()` or in a method '
'wrapped in `@compact`'
)
if self._name_taken(name, collection=col):
raise errors.NameInUseError('variable', name, self.__class__.__name__)
assert self.scope is not None
v = self.scope.variable(
col, name, init_fn, *init_args, unbox=unbox, **init_kwargs
)
self._state.children[name] = col
return v
def param(
self, name: str, init_fn: Callable[..., T], *init_args,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> meta.AxisMetadata[T]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
"""Declares and returns a parameter in this Module.
Parameters are read-only variables in the collection named "params". See
:mod:`flax.core.variables` for more details on variables.
The first argument of ``init_fn`` is assumed to be a PRNG key, which is
provided automatically and does not have to be passed using ``init_args``
or ``init_kwargs``::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(4)(x)
... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape)
... ...
... return x * mean
>>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}}
In the example above, the function ``lecun_normal`` expects two arguments:
``key`` and ``shape``, but only ``shape`` has to be provided explicitly;
``key`` is set automatically using the PRNG for ``params`` that is passed
when initializing the module using :meth:`init`.
Args:
name: The parameter name.
init_fn: The function that will be called to compute the initial value of
this variable. This function will only be called the first time this
parameter is used in this module.
*init_args: The positional arguments to pass to init_fn.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: The key-word arguments to pass to init_fn.
Returns:
The value of the initialized parameter. Throws an error if the parameter
exists already.
"""
if not self._initialization_allowed:
raise ValueError(
'Parameters must be initialized in `setup()` or in a method '
'wrapped in `@compact`'
)
if self._name_taken(name, collection='params'):
raise errors.NameInUseError('param', name, self.__class__.__name__)
assert self.scope is not None
v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs)
self._state.children[name] = 'params'
return v
def has_variable(self, col: str, name: str) -> bool:
"""Checks if a variable of given collection and name exists in this Module.
See :mod:`flax.core.variables` for more explanation on variables and
collections.
Args:
col: The variable collection name.
name: The name of the variable.
Returns:
True if the variable exists.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.has_variable(col, name)
def is_mutable_collection(self, col: str) -> bool:
"""Returns true if the collection ``col`` is mutable."""
if self.scope is None:
raise ValueError("Can't check mutability on unbound modules")
return self.scope.is_mutable_collection(col)
def has_rng(self, name: str) -> bool:
"""Returns true if a PRNGSequence with name ``name`` exists."""
if self.scope is None:
raise ValueError("Can't query for RNGs on unbound modules")
return self.scope.has_rng(name)
def make_rng(self, name: str = 'params') -> PRNGKey:
"""Returns a new RNG key from a given RNG sequence for this Module.
The new RNG key is split from the previous one. Thus, every call to
``make_rng`` returns a new RNG key, while still guaranteeing full
reproducibility.
.. note::
If an invalid name is passed (i.e. no RNG key was passed by
the user in ``.init`` or ``.apply`` for this name), then ``name``
will default to ``'params'``.
Example::
>>> import jax
>>> import flax.linen as nn
>>> class ParamsModule(nn.Module):
... def __call__(self):
... return self.make_rng('params')
>>> class OtherModule(nn.Module):
... def __call__(self):
... return self.make_rng('other')
>>> key = jax.random.key(0)
>>> params_out, _ = ParamsModule().init_with_output({'params': key})
>>> # self.make_rng('other') will default to using the 'params' RNG stream
>>> other_out, _ = OtherModule().init_with_output({'params': key})
>>> assert params_out == other_out
Learn more about RNG's by reading the Flax RNG guide:
https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html
Args:
name: The RNG sequence name.
Returns:
The newly generated RNG key.
"""
if self.scope is None:
raise ValueError("Can't use RNGs on unbound modules")
return self.scope.make_rng(name)
def is_initializing(self) -> bool:
"""Returns True if running under self.init(...) or nn.init(...)().
This is a helper method to handle the common case of simple initialization
where we wish to have setup logic occur when only called under
``module.init`` or ``nn.init``. For more complicated multi-phase
initialization scenarios it is better to test for the mutability of
particular variable collections or for the presence of particular
variables that potentially need to be initialized.
"""
if self.scope is None:
raise ValueError("Can't check if running under init() on unbound modules")
return self.scope.get_flag('initializing', False)
def _module_checks(self):
"""Run standard runtime checks."""
if not isinstance(self, Module):
raise errors.InvalidInstanceModuleError()
overridden_post_init = self.__post_init__ != Module.__post_init__
if overridden_post_init and not hasattr(self, '_id'):
raise errors.IncorrectPostInitOverrideError()
def bind(
self: M,
variables: VariableDict,
*args,
rngs: Optional[RNGSequences] = None,
mutable: CollectionFilter = False,
) -> M:
"""Creates an interactive Module instance by binding variables and RNGs.
``bind`` provides an "interactive" instance of a Module directly without
transforming a function with ``apply``. This is particularly useful for
debugging and interactive use cases like notebooks where a function would
limit the ability to split up code into different cells.
Once the variables (and optionally RNGs) are bound to a ``Module`` it
becomes a stateful object. Note that idiomatic JAX is functional and
therefore an interactive instance does not mix well with vanilla JAX APIs.
``bind()`` should only be used for interactive experimentation, and in all
other cases we strongly encourage users to use ``apply()`` instead.
Example::
>>> import jax
>>> import jax.numpy as jnp
>>> import flax.linen as nn
>>> class AutoEncoder(nn.Module):
... def setup(self):
... self.encoder = nn.Dense(3)
... self.decoder = nn.Dense(5)
...
... def __call__(self, x):
... return self.decoder(self.encoder(x))
>>> x = jnp.ones((16, 9))
>>> ae = AutoEncoder()
>>> variables = ae.init(jax.random.key(0), x)
>>> model = ae.bind(variables)
>>> z = model.encoder(x)
>>> x_reconstructed = model.decoder(z)
Args:
variables: A dictionary containing variables keyed by variable
collections. See :mod:`flax.core.variables` for more details about
variables.
*args: Named arguments (not used).
rngs: a dict of PRNGKeys to initialize the PRNG sequences.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections.
Returns:
A copy of this instance with bound variables and RNGs.
"""
Module._module_checks(self)
del args
scope = core.bind(variables, rngs=rngs, mutable=mutable)
return self.clone(parent=scope, _deep_clone=True)
def unbind(self: M) -> Tuple[M, VariableDict]:
"""Returns an unbound copy of a Module and its variables.
``unbind`` helps create a stateless version of a bound Module.
An example of a common use case: to extract a sub-Module defined inside
``setup()`` and its corresponding variables: 1) temporarily ``bind`` the
parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that
``setup()`` is only called when the Module is bound.)::
>>> class Encoder(nn.Module):
... @nn.compact
... def __call__(self, x):
... ...
... return nn.Dense(256)(x)
>>> class Decoder(nn.Module):
... @nn.compact
... def __call__(self, x):
... ...
... return nn.Dense(784)(x)
>>> class AutoEncoder(nn.Module):
... def setup(self):
... self.encoder = Encoder()
... self.decoder = Decoder()
...
... def __call__(self, x):
... return self.decoder(self.encoder(x))
>>> module = AutoEncoder()
>>> variables = module.init(jax.random.key(0), jnp.ones((1, 784)))
>>> # Extract the Encoder sub-Module and its variables
>>> encoder, encoder_vars = module.bind(variables).encoder.unbind()
Returns:
A tuple with an unbound copy of this Module and its variables.
"""
Module._module_checks(self)
if self.scope is None:
raise errors.CallUnbindOnUnboundModuleError()
variables = self.variables
module = self.clone(_deep_clone=True, _reset_names=True, name=None)
return module, variables
def apply(
self,
variables: VariableDict,
*args,
rngs: Optional[Union[PRNGKey, RNGSequences]] = None,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = False,
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]:
"""Applies a module method to variables and returns output and modified variables.
Note that ``method`` should be set if one would like to call ``apply`` on a
different class method than ``__call__``. For instance, suppose a
Transformer modules has a method called ``encode``, then the following calls
``apply`` on that method::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> import numpy as np
>>> class Transformer(nn.Module):
... def encode(self, x):
... ...
>>> x = jnp.ones((16, 9))
>>> model = Transformer()
>>> variables = model.init(jax.random.key(0), x, method=Transformer.encode)
>>> encoded = model.apply(variables, x, method=Transformer.encode)
If a function instance is provided, the unbound function is used. For
instance, the example below is equivalent to the one above::
>>> encoded = model.apply(variables, x, method=model.encode)
You can also pass a string to a callable attribute of the module. For
example, the previous can be written as::
>>> encoded = model.apply(variables, x, method='encode')
Note ``method`` can also be a function that is not defined in
``Transformer``. In that case, the function should have at least one
argument representing an instance of the Module class::
>>> def other_fn(instance, x):
... # instance.some_module_attr(...)
... instance.encode
... ...
>>> model.apply(variables, x, method=other_fn)
If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'``
RNG stream. If you want to use a different RNG stream or need to use
multiple streams, you can pass a dictionary mapping each RNG stream name
to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)``
is called on an RNG stream name that isn't passed by the user, it will
default to using the ``'params'`` RNG stream.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, add_noise=False):
... x = nn.Dense(16)(x)
... x = nn.relu(x)
...
... if add_noise:
... # Add gaussian noise
... noise_key = self.make_rng('noise')
... x = x + jax.random.normal(noise_key, x.shape)
...
... return nn.Dense(1)(x)
>>> x = jnp.empty((1, 7))
>>> module = Foo()
>>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)}
>>> variables = module.init(rngs, x)
>>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> rngs['noise'] = jax.random.key(0)
>>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> # different output (key(1) vs key(0))
>>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1)
>>> del rngs['noise']
>>> # self.make_rng('noise') will default to using the 'params' RNG stream
>>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs)
>>> # same output (key(0))
>>> np.testing.assert_allclose(out1, out2)
>>> # passing in a single key is equivalent to passing in {'params': key}
>>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0))
>>> # same output (key(0))
>>> np.testing.assert_allclose(out2, out3)
Args:
variables: A dictionary containing variables keyed by variable
collections. See :mod:`flax.core.variables` for more details about
variables.
*args: Named arguments passed to the specified apply method.
rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params"
PRNG sequence is used to initialize parameters.
method: A function to call apply on. This is generally a function in the
module. If provided, applies this method. If not provided, applies the
``__call__`` method of the module. A string can also be provided to
specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default, only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the specified apply method.
Returns:
If ``mutable`` is False, returns output. If any collections are
mutable, returns ``(output, vars)``, where ``vars`` are is a dict
of the modified collections.
"""
Module._module_checks(self)
if rngs is not None and not isinstance(rngs, dict):
if not core.scope._is_valid_rng(rngs):
raise errors.InvalidRngError(
'RNGs should be of shape (2,) or PRNGKey in Module '
f'{self.__class__.__name__}, but rngs are: {rngs}'
)
rngs = {'params': rngs}
if isinstance(method, str):
attribute_name = method
method = getattr(self, attribute_name)
if not callable(method):
class_name = type(self).__name__
raise TypeError(
f"'{class_name}.{attribute_name}' must be a callable, got"
f' {type(method)}.'
)
# if the `method` string is a submodule, we create a lambda function
# that calls the submodule, forwarding all arguments.
if isinstance(method, Module):
method = lambda self, *args, **kwargs: getattr(self, attribute_name)(
*args, **kwargs
)
elif method is None:
method = self.__call__
method = _get_unbound_fn(method)
return apply(
method,
self,
mutable=mutable,
capture_intermediates=capture_intermediates,
)(variables, *args, **kwargs, rngs=rngs)
def init_with_output(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = DenyList('intermediates'),
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]:
"""Initializes a module method with variables and returns output and modified variables.
Args:
rngs: The rngs for the variable collections.
*args: Named arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method. A string can also be
provided to specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default, all collections except "intermediates"
are mutable.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the init function.
Returns:
``(output, vars)``, where ``vars`` are is a dict of the modified
collections.
"""
Module._module_checks(self)
if not isinstance(rngs, dict):
if not core.scope._is_valid_rng(rngs):
raise errors.InvalidRngError(
'RNGs should be of shape (2,) or PRNGKey in Module '
f'{self.__class__.__name__}, but rngs are: {rngs}'
)
rngs = {'params': rngs}
if isinstance(method, str):
attribute_name = method
method = getattr(self, attribute_name)
if not callable(method):
class_name = type(self).__name__
raise TypeError(
f"'{class_name}.{attribute_name}' must be a callable, got"
f' {type(method)}.'
)
elif method is None:
method = self.__call__
method = _get_unbound_fn(method)
return init_with_output(
method,
self,
mutable=mutable,
capture_intermediates=capture_intermediates,
)(rngs, *args, **kwargs)
def init(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Union[Callable[..., Any], str, None] = None,
mutable: CollectionFilter = DenyList('intermediates'),
capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False,
**kwargs,
) -> Union[FrozenVariableDict, Dict[str, Any]]:
"""Initializes a module method with variables and returns modified variables.
``init`` takes as first argument either a single ``PRNGKey``, or a
dictionary mapping variable collections names to their ``PRNGKeys``, and
will call ``method`` (which is the module's ``__call__`` function by
default) passing ``*args`` and ``**kwargs``, and returns
a dictionary of initialized variables.
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> import numpy as np
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x, train):
... x = nn.Dense(16)(x)
... x = nn.BatchNorm(use_running_average=not train)(x)
... x = nn.relu(x)
... return nn.Dense(1)(x)
>>> x = jnp.empty((1, 7))
>>> module = Foo()
>>> key = jax.random.key(0)
>>> variables = module.init(key, x, train=False)
If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'``
RNG stream. If you want to use a different RNG stream or need to use
multiple streams, you can pass a dictionary mapping each RNG stream name
to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)``
is called on an RNG stream name that isn't passed by the user, it will
default to using the ``'params'`` RNG stream.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(16)(x)
... x = nn.relu(x)
...
... other_variable = self.variable(
... 'other_collection',
... 'other_variable',
... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape),
... x,
... )
... x = x + other_variable.value
...
... return nn.Dense(1)(x)
>>> module = Foo()
>>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)}
>>> variables0 = module.init(rngs, x)
>>> rngs['other_rng'] = jax.random.key(0)
>>> variables1 = module.init(rngs, x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables0['params'], variables1['params']
... )
>>> # different other_variable (key(1) vs key(0))
>>> np.testing.assert_raises(
... AssertionError,
... np.testing.assert_allclose,
... variables0['other_collection']['other_variable'],
... variables1['other_collection']['other_variable'],
... )
>>> del rngs['other_rng']
>>> # self.make_rng('other_rng') will default to using the 'params' RNG stream
>>> variables2 = module.init(rngs, x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables1['params'], variables2['params']
... )
>>> # equivalent other_variable (key(0))
>>> np.testing.assert_allclose(
... variables1['other_collection']['other_variable'],
... variables2['other_collection']['other_variable'],
... )
>>> # passing in a single key is equivalent to passing in {'params': key}
>>> variables3 = module.init(jax.random.key(0), x)
>>> # equivalent params (key(0))
>>> _ = jax.tree_util.tree_map(
... np.testing.assert_allclose, variables2['params'], variables3['params']
... )
>>> # equivalent other_variable (key(0))
>>> np.testing.assert_allclose(
... variables2['other_collection']['other_variable'],
... variables3['other_collection']['other_variable'],
... )
Jitting ``init`` initializes a model lazily using only the shapes of the
provided arguments, and avoids computing the forward pass with actual
values. Example::
>>> module = nn.Dense(1)
>>> init_jit = jax.jit(module.init)
>>> variables = init_jit(jax.random.key(0), x)
``init`` is a light wrapper over ``apply``, so other ``apply`` arguments
like ``method``, ``mutable``, and ``capture_intermediates`` are also
available.
Args:
rngs: The rngs for the variable collections.
*args: Named arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method. A string can also be provided
to specify a method by name.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default all collections except "intermediates"
are mutable.
capture_intermediates: If ``True``, captures intermediate return values of
all Modules inside the "intermediates" collection. By default only the
return values of all ``__call__`` methods are stored. A function can be
passed to change the filter behavior. The filter function takes the
Module instance and method name and returns a bool indicating whether
the output of that method invocation should be stored.
**kwargs: Keyword arguments passed to the init function.
Returns:
The initialized variable dict.
"""
Module._module_checks(self)
_, v_out = self.init_with_output(
rngs,
*args,
method=method,
mutable=mutable,
capture_intermediates=capture_intermediates,
**kwargs,
)
return v_out
def lazy_init(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
method: Optional[Callable[..., Any]] = None,
mutable: CollectionFilter = DenyList('intermediates'),
**kwargs,
) -> FrozenVariableDict:
"""Initializes a module without computing on an actual input.
lazy_init will initialize the variables without doing unnecessary compute.
The input data should be passed as a ``jax.ShapeDtypeStruct`` which
specifies the shape and dtype of the input but no concrete data.
Example::
>>> model = nn.Dense(features=256)
>>> variables = model.lazy_init(
... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32))
The args and kwargs args passed to ``lazy_init`` can be a mix of
concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct)
values. Concrete values are only necessary for arguments that affect
the initialization of variables. For example, the model might expect
a keyword arg that enables/disables a subpart of the model.
In this case, an explicit value (True/Flase) should be passed otherwise
``lazy_init`` cannot infer which variables should be initialized.
Args:
rngs: The rngs for the variable collections.
*args: arguments passed to the init function.
method: An optional method. If provided, applies this method. If not
provided, applies the ``__call__`` method.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default all collections except "intermediates"
are mutable.
**kwargs: Keyword arguments passed to the init function.
Returns:
The initialized variable dict.
"""
Module._module_checks(self)
def lazy_wrapper(rngs, *args, **kwargs):
return self.init(rngs, *args, method=method, mutable=mutable, **kwargs)
return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs)
def variables(self) -> VariableDict:
"""Returns the variables in this module."""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.variables()
def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T:
"""Retrieves the value of a Variable.
Args:
col: the variable collection.
name: the name of the variable.
default: the default value to return if the variable does not exist in
this scope.
Returns:
The value of the input variable, of the default value if the variable
doesn't exist in this scope.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
return self.scope.get_variable(col, name, default)
def put_variable(self, col: str, name: str, value: Any):
"""Updates the value of the given variable if it is mutable, or an error otherwise.
Args:
col: the variable collection.
name: the name of the variable.
value: the new value of the variable.
"""
if self.scope is None:
raise ValueError("Can't access variables on unbound modules")
self.scope.put_variable(col, name, value)
def sow(self, col: str, name: str, value: Any) -> bool:
...
def sow(
self,
col: str,
name: str,
value: T,
reduce_fn: Callable[[K, T], K] = tuple_reduce,
init_fn: Callable[[], K] = tuple_init, # type: ignore
) -> bool:
...
def sow(
self,
col: str,
name: str,
value: T,
reduce_fn: Callable[[K, T], K] = tuple_reduce,
init_fn: Callable[[], K] = tuple_init, # type: ignore
) -> bool:
"""Stores a value in a collection.
Collections can be used to collect intermediate values without
the overhead of explicitly passing a container through each Module call.
If the target collection is not mutable ``sow`` behaves like a no-op
and returns ``False``.
Example::
>>> import jax
>>> import jax.numpy as jnp
>>> import flax.linen as nn
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... self.sow('intermediates', 'h', h)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> model = Foo()
>>> variables = model.init(jax.random.key(0), x)
>>> y, state = model.apply(variables, x, mutable=['intermediates'])
>>> print(state['intermediates'])
{'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ],
[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)}
By default the values are stored in a tuple and each stored value
is appended at the end. This way all intermediates can be tracked when
the same module is called multiple times. Alternatively, a custom
init/reduce function can be passed::
>>> class Foo2(nn.Module):
... @nn.compact
... def __call__(self, x):
... init_fn = lambda: 0
... reduce_fn = lambda a, b: a + b
... self.sow('intermediates', 'h', x,
... init_fn=init_fn, reduce_fn=reduce_fn)
... self.sow('intermediates', 'h', x * 2,
... init_fn=init_fn, reduce_fn=reduce_fn)
... return x
>>> x = jnp.ones((1, 1))
>>> model = Foo2()
>>> variables = model.init(jax.random.key(0), x)
>>> y, state = model.apply(
... variables, x, mutable=['intermediates'])
>>> print(state['intermediates'])
{'h': Array([[3.]], dtype=float32)}
Args:
col: The name of the variable collection.
name: The name of the variable.
value: The value of the variable.
reduce_fn: The function used to combine the existing value with the new
value. The default is to append the value to a tuple.
init_fn: For the first value stored, ``reduce_fn`` will be passed the result
of ``init_fn`` together with the value to be stored. The default is an
empty tuple.
Returns:
``True`` if the value has been stored successfully, ``False`` otherwise.
"""
if self.scope is None:
raise ValueError("Can't store variables on unbound modules")
if not self.scope.is_mutable_collection(col):
return False
if self.scope.has_variable(col, name):
xs = self.scope.get_variable(col, name)
else:
self.scope.reserve(name, col)
self._state.children[name] = col
xs = init_fn()
xs = reduce_fn(xs, value)
self.scope.put_variable(col, name, xs)
return True
def perturb(
self, name: str, value: T, collection: str = 'perturbations'
) -> T:
"""Add an zero-value variable ('perturbation') to the intermediate value.
The gradient of ``value`` would be the same as the gradient of this
perturbation variable. Therefore, if you define your loss function with
both params and perturbations as standalone arguments, you can get the
intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation
argument.
.. note::
This is an experimental API and may be tweaked later for better
performance and usability.
At its current stage, it creates extra dummy variables that occupies extra
memory space. Use it only to debug gradients in training.
Example::
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(3)(x)
... x = self.perturb('dense3', x)
... return nn.Dense(2)(x)
>>> def loss(variables, inputs, targets):
... preds = model.apply(variables, inputs)
... return jnp.square(preds - targets).mean()
>>> x = jnp.ones((2, 9))
>>> y = jnp.ones((2, 2))
>>> model = Foo()
>>> variables = model.init(jax.random.key(0), x)
>>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y)
>>> print(intm_grads['perturbations']['dense3'])
[[-1.456924 -0.44332537 0.02422847]
[-1.456924 -0.44332537 0.02422847]]
If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op
so you can easily disable the behavior when not needed::
>>> model.apply(variables, x) # works as expected
Array([[-1.0980128 , -0.67961735],
[-1.0980128 , -0.67961735]], dtype=float32)
>>> model.apply({'params': variables['params']}, x) # behaves like a no-op
Array([[-1.0980128 , -0.67961735],
[-1.0980128 , -0.67961735]], dtype=float32)
>>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y)
>>> 'perturbations' not in intm_grads
True
"""
if self.scope is None:
raise ValueError("Can't store variables on unbound modules")
if self.is_mutable_collection(collection):
if not self.scope.has_variable(collection, name):
self.scope.reserve(name, collection)
self._state.children[name] = collection
self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore
if collection in self.scope.root._variables:
if self.scope.has_variable(collection, name):
value += self.scope.get_variable(collection, name) # type: ignore
else:
raise ValueError(f"Perturbation collection {collection} present, but "
f"missing perturbation variable {name}")
return value
def tabulate(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
depth: Optional[int] = None,
show_repeated: bool = False,
mutable: CollectionFilter = DenyList('intermediates'),
console_kwargs: Optional[Mapping[str, Any]] = None,
table_kwargs: Mapping[str, Any] = MappingProxyType({}),
column_kwargs: Mapping[str, Any] = MappingProxyType({}),
compute_flops: bool = False,
compute_vjp_flops: bool = False,
**kwargs,
) -> str:
"""Creates a summary of the Module represented as a table.
This method has the same signature and internally calls ``Module.init``,
but instead of returning the variables, it returns the string summarizing
the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward
computation without consuming any FLOPs or allocating memory.
Additional arguments can be passed into the ``console_kwargs`` argument, for
example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments,
see:
https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> # print(Foo().tabulate(
>>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True))
This gives the following output::
Foo Summary
┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃
┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │
│ │ │ │ │ │ │ float32[4] │
│ │ │ │ │ │ │ kernel: │
│ │ │ │ │ │ │ float32[9,4] │
│ │ │ │ │ │ │ │
│ │ │ │ │ │ │ 40 (160 B) │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │
│ │ │ │ │ │ │ float32[2] │
│ │ │ │ │ │ │ kernel: │
│ │ │ │ │ │ │ float32[4,2] │
│ │ │ │ │ │ │ │
│ │ │ │ │ │ │ 10 (40 B) │
├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤
│ │ │ │ │ │ Total │ 50 (200 B) │
└─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘
Total Parameters: 50 (200 B)
**Note**: rows order in the table does not represent execution order,
instead it aligns with the order of keys in ``variables`` which are sorted
alphabetically.
**Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable.
Args:
rngs: The rngs for the variable collections as passed to ``Module.init``.
*args: The arguments to the forward computation.
depth: controls how many submodule deep the summary can go. By default,
its ``None`` which means no limit. If a submodule is not shown because of
the depth limit, its parameter count and bytes will be added to the row
of its first shown ancestor such that the sum of all rows always adds
up to the total number of parameters of the Module.
show_repeated: If ``True``, repeated calls to the same module will be shown
in the table, otherwise only the first call will be shown. Default is
``False``.
mutable: Can be bool, str, or list. Specifies which collections should be
treated as mutable: ``bool``: all/no collections are mutable. ``str``:
The name of a single mutable collection. ``list``: A list of names of
mutable collections. By default, all collections except 'intermediates'
are mutable.
console_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.console.Console`` when rendering the table.
Default arguments are ``{'force_terminal': True, 'force_jupyter':
False}``.
table_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.table.Table`` constructor.
column_kwargs: An optional dictionary with additional keyword arguments
that are passed to ``rich.table.Table.add_column`` when adding columns to
the table.
compute_flops: whether to include a ``flops`` column in the table listing
the estimated FLOPs cost of each module forward pass. Does incur actual
on-device computation / compilation / memory allocation, but still
introduces overhead for large modules (e.g. extra 20 seconds for a
Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5
seconds).
compute_vjp_flops: whether to include a ``vjp_flops`` column in the table
listing the estimated FLOPs cost of each module backward pass.
Introduces a compute overhead of about 2-3X of ``compute_flops``.
**kwargs: keyword arguments to pass to the forward computation.
Returns:
A string summarizing the Module.
"""
from flax.linen import summary
tabulate_fn = summary.tabulate(
self,
rngs,
depth=depth,
show_repeated=show_repeated,
mutable=mutable,
console_kwargs=console_kwargs,
table_kwargs=table_kwargs,
column_kwargs=column_kwargs,
compute_flops=compute_flops,
compute_vjp_flops=compute_vjp_flops,
)
return tabulate_fn(*args, **kwargs)
def module_paths(
self,
rngs: Union[PRNGKey, RNGSequences],
*args,
show_repeated: bool = False,
mutable: CollectionFilter = DenyList('intermediates'),
**kwargs,
) -> dict[str, 'Module']:
"""Returns a dictionary mapping module paths to module instances.
This method has the same signature and internally calls ``Module.init``,
but instead of returning the variables, it returns a dictionary mapping
module paths to unbounded copies of module instances that were used
at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward
computation without consuming any FLOPs or allocating memory.
Example::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> class Foo(nn.Module):
... @nn.compact
... def __call__(self, x):
... h = nn.Dense(4)(x)
... return nn.Dense(2)(h)
>>> x = jnp.ones((16, 9))
>>> modules = Foo().module_paths(jax.random.key(0), x)
>>> print({
... p: type(m).__name__ for p, m in modules.items()
... })
{'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'}
Args:
rngs: The rngs for the variable collections as passed to ``Module.init``.
*args: The arguments to the forward computation.
show_repeated: If ``True``, repeated calls to the same module will be
shown in the table, otherwise only the first call will be shown.
Default is ``False``.
mutable: Can be bool, str, or list. Specifies which collections should
be treated as mutable: ``bool``: all/no collections are mutable.
``str``: The name of a single mutable collection. ``list``: A list of
names of mutable collections. By default, all collections except
'intermediates' are mutable.
**kwargs: keyword arguments to pass to the forward computation.
Returns:
A dict`ionary mapping module paths to module instances.
"""
from flax.linen import summary
table = summary._get_module_table(
module=self,
depth=None,
show_repeated=show_repeated,
compute_flops=False,
compute_vjp_flops=False,
)(rngs, *args, **kwargs, mutable=mutable)
return {'/'.join(row.path): row.module_copy for row in table}
The provided code snippet includes necessary dependencies for implementing the `_map_submodules` function. Write a Python function `def _map_submodules(fn: Callable[['Module'], Any], tree)` to solve the following problem:
Map a function over all submodules in a tree.
Here is the function:
def _map_submodules(fn: Callable[['Module'], Any], tree):
"""Map a function over all submodules in a tree."""
g = lambda _, x: fn(x) if isinstance(x, Module) else x
return _freeze_attr(_map_over_modules_in_tree(g, tree)) | Map a function over all submodules in a tree. |
22,663 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
def module_field(*, kw_only: bool = False, default: Optional[Any] = ...) -> Any:
... | null |
22,664 | import contextlib
import dataclasses
import enum
import functools
import inspect
import sys
import threading
import typing
import weakref
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import jax
import jax.numpy as jnp
import typing_extensions as tpe
import flax
import flax.linen as nn
from flax import (
config,
core,
errors,
serialization,
traceback_util,
traverse_util,
)
from flax.core import Scope, meta, partial_eval
from flax.core.frozen_dict import FrozenDict
from flax.core.scope import (
CollectionFilter,
DenyList,
Variable,
union_filters,
)
from flax.ids import FlaxId, uuid
from flax.linen import kw_only_dataclasses
from flax.typing import (
RNGSequences,
PRNGKey,
FrozenVariableDict,
VariableDict,
)
T = TypeVar('T')
The provided code snippet includes necessary dependencies for implementing the `merge_param` function. Write a Python function `def merge_param(name: str, a: Optional[T], b: Optional[T]) -> T` to solve the following problem:
Merges construction- and call-time argument. This is a utility for supporting a pattern where a Module hyperparameter can be passed either to ``__init__`` or ``__call__``, and the value that is not ``None`` will be used. Example:: >>> import flax.linen as nn >>> from typing import Optional >>> class Foo(nn.Module): ... train: Optional[bool] = None ... def __call__(self, train: Optional[bool] = None): ... train = nn.merge_param('train', self.train, train) An error is thrown when both arguments are ``None`` or both values are not ``None``. Args: name: the name of the parameter. Used for error messages. a: option a b: option b Returns: a or b whichever is not ``None``.
Here is the function:
def merge_param(name: str, a: Optional[T], b: Optional[T]) -> T:
"""Merges construction- and call-time argument.
This is a utility for supporting a pattern where a Module hyperparameter
can be passed either to ``__init__`` or ``__call__``, and the value that is
not ``None`` will be used.
Example::
>>> import flax.linen as nn
>>> from typing import Optional
>>> class Foo(nn.Module):
... train: Optional[bool] = None
... def __call__(self, train: Optional[bool] = None):
... train = nn.merge_param('train', self.train, train)
An error is thrown when both arguments are ``None`` or both values are not
``None``.
Args:
name: the name of the parameter. Used for error messages.
a: option a
b: option b
Returns:
a or b whichever is not ``None``.
"""
if a is None and b is None:
raise ValueError(
f'Parameter "{name}" must be passed to the constructor or at call time.'
)
if a is not None and b is not None:
raise ValueError(
f'Parameter "{name}" was passed to the constructor and at call time.'
' Should be passed just once.'
)
if a is None:
assert b is not None
return b
return a | Merges construction- and call-time argument. This is a utility for supporting a pattern where a Module hyperparameter can be passed either to ``__init__`` or ``__call__``, and the value that is not ``None`` will be used. Example:: >>> import flax.linen as nn >>> from typing import Optional >>> class Foo(nn.Module): ... train: Optional[bool] = None ... def __call__(self, train: Optional[bool] = None): ... train = nn.merge_param('train', self.train, train) An error is thrown when both arguments are ``None`` or both values are not ``None``. Args: name: the name of the parameter. Used for error messages. a: option a b: option b Returns: a or b whichever is not ``None``. |
22,665 | import functools
from collections.abc import Iterable
from typing import Any, Callable, Union
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax, random
from flax import struct
from flax.core import Scope
from flax.linen import initializers
from .linear import default_kernel_init, dense_general
def dot_product_attention(
scope,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.0,
deterministic=False,
precision=None,
):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights. This
function supports multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of `[batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels]`.
key: keys for calculating attention with shape of `[batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels]`.
value: values to be used in attention with shape of `[batch_size, dim1,
dim2,..., dimN, num_heads, value_channels]`.
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`.
"""
assert key.shape[:-1] == value.shape[:-1]
assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1]
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
depth = query.shape[-1]
n = key.ndim
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(np.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, channels, <attention dims>)
v_perm = batch_dims + (n - 1,) + axis
value = value.transpose(v_perm)
query = query / jnp.sqrt(depth).astype(dtype)
batch_dims_t = tuple(range(len(batch_dims)))
attn_weights = lax.dot_general(
query,
key,
(((n - 1,), (n - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision,
)
# apply attention bias: masking, droput, proximity bias, ect.
if bias is not None:
attn_weights = attn_weights + bias
# normalize the attention weights
norm_dims = tuple(range(attn_weights.ndim - len(axis), attn_weights.ndim))
attn_weights = lax.exp(
attn_weights
- jax.scipy.special.logsumexp(attn_weights, axis=norm_dims, keepdims=True)
)
attn_weights = attn_weights.astype(dtype)
# apply dropout
if not deterministic and dropout_rate > 0.0:
if dropout_rng is None:
dropout_rng = scope.make_rng('dropout')
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch+head+non-attention dimension
dropout_dims = attn_weights.shape[-(2 * len(axis)) :]
dropout_shape = tuple([1] * len(batch_dims_t)) + dropout_dims
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
# compute the new values given the attention weights
wv_contracting_dims = (norm_dims, range(value.ndim - len(axis), value.ndim))
y = lax.dot_general(
attn_weights,
value,
(wv_contracting_dims, (batch_dims_t, batch_dims_t)),
precision=precision,
)
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
y = y.transpose(perm_inv)
return y
class CacheEntry(struct.PyTreeNode):
key: np.ndarray
value: np.ndarray
i: np.ndarray
def make_padding_mask(
padding_mask_query,
padding_mask_key,
query_shape,
key_shape,
attention_axis=None,
segmentation_mask=False,
):
"""Makes padding mask for attention weights.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len].
Args:
padding_mask_query: padding mask of query <bs, qdim1,.., qdimn>
padding_mask_key: padding mask of query <bs, key1,.., keyn>
query_shape: shape of the query
key_shape: shape of the key, which is equal to the shape of value.
attention_axis: axis over which attention is applied.
segmentation_mask: bool: if true use equality on cartesian product rather
than outer product for constructing segmentation masks.
Returns:
The padding mask for attention weights.
"""
assert query_shape[0] == key_shape[0]
assert len(query_shape) == len(key_shape)
ndim = len(key_shape)
if attention_axis is None:
attention_axis = tuple(range(1, ndim - 2))
assert isinstance(attention_axis, tuple)
for ax in attention_axis:
if not (ndim >= 3 and 1 <= ax < ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
mask_shape_final = (query_shape[0], 1) # batch_size, 1 (for all heads)s
for ax in attention_axis:
mask_shape_final += (query_shape[ax],)
for ax in attention_axis:
mask_shape_final += (key_shape[ax],)
padding_mask_query = padding_mask_query[..., None]
padding_mask_key = padding_mask_key[..., None]
perm = (0,) + tuple(np.flip(np.arange(padding_mask_key.ndim)))[:-1]
if segmentation_mask:
mask = jnp.equal(padding_mask_query, padding_mask_key.transpose(perm))
else:
mask = jnp.multiply(padding_mask_query, padding_mask_key.transpose(perm))
mask = mask.reshape(mask_shape_final)
mask = jax.lax.convert_element_type(mask, jnp.float32)
return mask
def _make_causal_mask(key, attention_axis=None, self_mask=False):
"""Makes a causal mask, to be used for masking out the future for attention.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len] with
zeros in upper triangle and ones in lower triangle.
Args:
key: shape of the key, which is equal to the shape of value and is
assumed to be equal to the shape of the query (since this is used in
self-attention when decoding).
attention_axis: axis over which attention is applied.
self_mask: if mask out the diagonal or not.
Returns:
A causal mask to be used to mask out future positions.
"""
if attention_axis is None:
attention_axis = tuple(range(1, key.ndim - 2))
assert isinstance(attention_axis, tuple)
for ax in attention_axis:
if not (key.ndim >= 3 and 1 <= ax < key.ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
mask_shape = tuple([1] * (key.ndim - len(attention_axis) - 1))
mask_shape_final = mask_shape
for _ in range(2):
flatten_dim = 1
for ax in attention_axis:
mask_shape_final += (key.shape[ax],)
flatten_dim *= key.shape[ax]
mask_shape += (flatten_dim,)
def tri(n, m, k=0):
# Tie in the key to avoid the mask becoming a constant.
# This way XLA can construct the mask during computation and fuse it
# with the attention ops.
x = jnp.arange(n, dtype=jnp.int32)
y = jnp.arange(m, dtype=jnp.int32)
mask = lax.ge(
(lax.broadcast_in_dim(x, shape=(n, m), broadcast_dimensions=(0,))) + k,
lax.broadcast(y, [n]),
)
return mask
k = -1 if self_mask else 0
mask = tri(*mask_shape[-2:], k=k).reshape(mask_shape_final)
return mask
default_kernel_init = initializers.lecun_normal()
def dense_general(
scope,
inputs,
features,
axis=-1,
batch_dims=(),
bias=True,
dtype=jnp.float32,
kernel_init=default_kernel_init,
bias_init=initializers.zeros_init(),
precision=None,
):
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
features: tuple with numbers of output features.
axis: tuple with axes to apply the transformation on.
batch_dims: tuple with batch axes.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
The transformed input.
"""
inputs = jnp.asarray(inputs, dtype)
if not isinstance(features, Iterable):
features = (features,)
if not isinstance(axis, Iterable):
axis = (axis,)
if not isinstance(batch_dims, Iterable):
batch_dims = (batch_dims,)
features, axis, batch_dims = tuple(features), tuple(axis), tuple(batch_dims)
if batch_dims:
max_dim = np.max(batch_dims)
if set(batch_dims) != set(range(max_dim + 1)):
raise ValueError(
'batch_dims %s must be consecutive leading '
'dimensions starting from 0.' % str(batch_dims)
)
ndim = inputs.ndim
n_batch_dims = len(batch_dims)
axis = _normalize_axes(axis, ndim)
batch_dims = _normalize_axes(batch_dims, ndim)
n_axis, n_features = len(axis), len(features)
def kernel_init_wrap(rng, shape, dtype=jnp.float32):
size_batch_dims = np.prod(shape[:n_batch_dims], dtype=np.int32)
flat_shape = (
np.prod(shape[n_batch_dims : n_axis + n_batch_dims]),
np.prod(shape[-n_features:]),
)
kernel = jnp.concatenate(
[kernel_init(rng, flat_shape, dtype) for _ in range(size_batch_dims)],
axis=0,
)
return jnp.reshape(kernel, shape)
batch_shape = tuple(inputs.shape[ax] for ax in batch_dims)
kernel_shape = tuple(inputs.shape[ax] for ax in axis) + features
kernel = scope.param('kernel', kernel_init_wrap, batch_shape + kernel_shape)
kernel = jnp.asarray(kernel, dtype)
batch_ind = tuple(range(n_batch_dims))
contract_ind = tuple(range(n_batch_dims, n_axis + n_batch_dims))
out = lax.dot_general(
inputs,
kernel,
((axis, contract_ind), (batch_dims, batch_ind)),
precision=precision,
)
if bias:
def bias_init_wrap(rng, shape, dtype=jnp.float32):
size_batch_dims = np.prod(shape[:n_batch_dims], dtype=np.int32)
flat_shape = (np.prod(shape[-n_features:]),)
bias = jnp.concatenate(
[bias_init(rng, flat_shape, dtype) for _ in range(size_batch_dims)],
axis=0,
)
return jnp.reshape(bias, shape)
bias = scope.param('bias', bias_init_wrap, batch_shape + features)
# Reshape bias for broadcast.
expand_dims = sorted(set(range(inputs.ndim)) - set(axis) - set(batch_dims))
for ax in expand_dims:
bias = jnp.expand_dims(bias, ax)
bias = jnp.asarray(bias, dtype)
out = out + bias
return out
The provided code snippet includes necessary dependencies for implementing the `multi_head_dot_product_attention` function. Write a Python function `def multi_head_dot_product_attention( scope: Scope, inputs_q, inputs_kv, num_heads, dtype=jnp.float32, qkv_features=None, out_features=None, attention_axis=None, causal_mask=False, padding_mask=None, key_padding_mask=None, segmentation=None, key_segmentation=None, cache=False, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, kernel_init=default_kernel_init, bias_init=initializers.zeros_init(), bias=True, attention_fn=dot_product_attention, )` to solve the following problem:
Applies multi-head dot product attention on the input data. Projects the inputs into multi-headed query, key, and value vectors, applies dot-product attention and project the results to an output vector. This can be used for encoder-decoder attention by specifying both `inputs_q` and `inputs_kv` orfor self-attention by only specifying `inputs_q` and setting `inputs_kv` to None. Args: inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`. inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]` or None for self-attention, inn which case key/values will be derived from inputs_q. num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1]) should be divisible by the number of heads. dtype: the dtype of the computation (default: float32) qkv_features: dimension of the key, query, and value. out_features: dimension of the last projection attention_axis: axes over which the attention is applied ( 'None' means attention over all axes, but batch, heads, and features). causal_mask: boolean specifying whether to apply a causal mask on the attention weights. If True, the output at timestep `t` will not depend on inputs at timesteps strictly greater than `t`. padding_mask: boolean specifying query tokens that are pad token. key_padding_mask: boolean specifying key-value tokens that are pad token. segmentation: segment indices for packed inputs_q data. key_segmentation: segment indices for packed inputs_kv data. cache: an instance of `flax.deprecated.nn.attention.Cache` used for efficient autoregressive decoding. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the kernel of the Dense layers. bias_init: initializer for the bias of the Dense layers. bias: bool: whether pointwise QKVO dense transforms use bias. attention_fn: dot_product_attention or compatible function. Accepts query, key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`` Returns: output of shape `[bs, dim1, dim2, ..., dimN, features]`.
Here is the function:
def multi_head_dot_product_attention(
scope: Scope,
inputs_q,
inputs_kv,
num_heads,
dtype=jnp.float32,
qkv_features=None,
out_features=None,
attention_axis=None,
causal_mask=False,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
cache=False,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.0,
deterministic=False,
precision=None,
kernel_init=default_kernel_init,
bias_init=initializers.zeros_init(),
bias=True,
attention_fn=dot_product_attention,
):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`.
inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]`
or None for self-attention, inn which case key/values will be derived
from inputs_q.
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
attention_axis: axes over which the attention is applied ( 'None' means
attention over all axes, but batch, heads, and features).
causal_mask: boolean specifying whether to apply a causal mask on the
attention weights. If True, the output at timestep `t` will not depend
on inputs at timesteps strictly greater than `t`.
padding_mask: boolean specifying query tokens that are pad token.
key_padding_mask: boolean specifying key-value tokens that are pad token.
segmentation: segment indices for packed inputs_q data.
key_segmentation: segment indices for packed inputs_kv data.
cache: an instance of `flax.deprecated.nn.attention.Cache` used for
efficient autoregressive decoding.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
bias: bool: whether pointwise QKVO dense transforms use bias.
attention_fn: dot_product_attention or compatible function. Accepts
query, key, value, and returns output of shape
`[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]``
Returns:
output of shape `[bs, dim1, dim2, ..., dimN, features]`.
"""
assert (
causal_mask or not cache
), 'Caching is only support for causal attention.'
if inputs_kv is None:
inputs_kv = inputs_q
if attention_axis is None:
attention_axis = tuple(range(1, inputs_q.ndim - 1))
features = out_features or inputs_q.shape[-1]
qkv_features = qkv_features or inputs_q.shape[-1]
assert (
qkv_features % num_heads == 0
), 'Memory dimension must be divisible by number of heads.'
head_dim = qkv_features // num_heads
dense = functools.partial(
dense_general,
axis=-1,
dtype=dtype,
features=(num_heads, head_dim),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
precision=precision,
)
# project inputs_q to multi-headed q/k/v
# dimensions are then [bs, dims..., n_heads, n_features_per_head]
query = scope.child(dense, 'query')(inputs_q)
key = scope.child(dense, 'key')(inputs_kv)
value = scope.child(dense, 'value')(inputs_kv)
if cache:
cache_entry: Union[Callable[[Any], CacheEntry], CacheEntry]
if not scope.has_variable('cache', 'entry'):
ndim, tail_shape = (key.ndim, key.shape[-2:])
def init_fn(shape, dtype=jnp.float32):
full_shape = shape + tail_shape
if len(full_shape) != ndim:
raise ValueError(
'Shape should be a tuple with the shape of the batch'
'and attention dims.'
)
return CacheEntry(
key=jnp.zeros(full_shape, dtype),
value=jnp.zeros(full_shape, dtype),
i=jnp.zeros((), jnp.uint32),
)
cache_entry = init_fn
else:
cache_entry = scope.get_variable('cache', 'entry')
if not isinstance(cache_entry, CacheEntry):
raise ValueError('Cache is not initialized.')
expected_shape = list(cache_entry.key.shape[:-2])
for attn_dim in attention_axis:
expected_shape[attn_dim] = 1
expected_shape = tuple(expected_shape) + inputs_q.shape[-1:]
if expected_shape != inputs_q.shape:
raise ValueError(
'Invalid shape provided, expected shape %s instead got %s.'
% (expected_shape, inputs_q.shape)
)
cshape = cache_entry.key.shape
indices = [0] * len(cshape)
i = cache_entry.i
attn_size = np.prod(np.take(cshape, attention_axis))
for attn_dim in attention_axis:
attn_size //= cshape[attn_dim]
indices[attn_dim] = i // attn_size
i = i % attn_size
key = lax.dynamic_update_slice(cache_entry.key, key, indices) # type: ignore
value = lax.dynamic_update_slice(cache_entry.value, value, indices) # type: ignore
one = jnp.array(1, jnp.uint32)
cache_entry = cache_entry.replace(
i=cache_entry.i + one, key=key, value=value
)
# TODO(levskaya): verify this is still needed in translation decoding.
key_padding_mask = jnp.broadcast_to(
(jnp.arange(cshape[1]) < cache_entry.i), cshape[:2]
)
key_padding_mask = key_padding_mask.astype(jnp.float32)[..., None]
scope.put_variable('cache', 'entry', cache_entry)
# create attention masks
mask_components = []
if causal_mask:
if cache and isinstance(cache_entry, CacheEntry):
bias_pre_shape = (1,) * (key.ndim - 1)
attn_shape = tuple(np.take(key.shape, attention_axis))
attn_size = np.prod(attn_shape)
ii = jnp.arange(attn_size, dtype=jnp.uint32)
mask = ii < cache_entry.i
mask_components.append(mask.reshape(bias_pre_shape + attn_shape))
else:
mask_components.append(_make_causal_mask(key, attention_axis))
if padding_mask is not None:
if key_padding_mask is None:
key_padding_mask = padding_mask
padding_mask = make_padding_mask(
padding_mask_query=padding_mask,
padding_mask_key=key_padding_mask,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis,
)
mask_components.append(padding_mask)
if segmentation is not None:
if key_segmentation is None:
key_segmentation = segmentation
segmentation_mask = make_padding_mask(
padding_mask_query=segmentation,
padding_mask_key=key_segmentation,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis,
segmentation_mask=True,
)
mask_components.append(segmentation_mask)
if mask_components:
attention_mask = mask_components[0]
for component in mask_components[1:]:
attention_mask = jnp.logical_and(attention_mask, component)
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(dtype),
jnp.full(attention_mask.shape, -1e10).astype(dtype),
)
else:
attention_bias = None
# apply attention
x = scope.child(attention_fn)(
query,
key,
value,
dtype=dtype,
axis=attention_axis,
bias=attention_bias,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic,
)
# back to the original inputs dimensions
out = scope.child(dense_general, name='out')(
x,
features=features,
axis=(-2, -1),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
dtype=dtype,
precision=precision,
)
return out | Applies multi-head dot product attention on the input data. Projects the inputs into multi-headed query, key, and value vectors, applies dot-product attention and project the results to an output vector. This can be used for encoder-decoder attention by specifying both `inputs_q` and `inputs_kv` orfor self-attention by only specifying `inputs_q` and setting `inputs_kv` to None. Args: inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`. inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]` or None for self-attention, inn which case key/values will be derived from inputs_q. num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1]) should be divisible by the number of heads. dtype: the dtype of the computation (default: float32) qkv_features: dimension of the key, query, and value. out_features: dimension of the last projection attention_axis: axes over which the attention is applied ( 'None' means attention over all axes, but batch, heads, and features). causal_mask: boolean specifying whether to apply a causal mask on the attention weights. If True, the output at timestep `t` will not depend on inputs at timesteps strictly greater than `t`. padding_mask: boolean specifying query tokens that are pad token. key_padding_mask: boolean specifying key-value tokens that are pad token. segmentation: segment indices for packed inputs_q data. key_segmentation: segment indices for packed inputs_kv data. cache: an instance of `flax.deprecated.nn.attention.Cache` used for efficient autoregressive decoding. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the kernel of the Dense layers. bias_init: initializer for the bias of the Dense layers. bias: bool: whether pointwise QKVO dense transforms use bias. attention_fn: dot_product_attention or compatible function. Accepts query, key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`` Returns: output of shape `[bs, dim1, dim2, ..., dimN, features]`. |
22,666 | import jax.numpy as jnp
from jax import lax
from flax.core import Scope
from flax.linen import initializers
def _absolute_dims(ndim, dims):
def batch_norm(
scope: Scope,
x,
use_running_average=False,
axis=-1,
momentum=0.99,
epsilon=1e-5,
dtype=jnp.float32,
bias=True,
scale=True,
bias_init=initializers.zeros_init(),
scale_init=initializers.ones_init(),
axis_name=None,
axis_index_groups=None,
kind='batch_stats',
):
x = jnp.asarray(x, jnp.float32)
axis = axis if isinstance(axis, tuple) else (axis,)
axis = _absolute_dims(x.ndim, axis)
redux = tuple(i for i in range(x.ndim) if i not in axis)
def pmean(x):
m = jnp.mean(x, redux, keepdims=True)
if axis_name is not None:
m = lax.pmean(m, axis_name=axis_name, axis_index_groups=axis_index_groups)
return m
mean = pmean(x)
squeeze_shape = jnp.squeeze(mean).shape
mean2 = pmean(jnp.square(x))
var = mean2 - jnp.square(mean)
is_init = not scope.has_variable(kind, 'mean')
ra_mean = scope.variable(kind, 'mean', jnp.zeros, squeeze_shape)
ra_var = scope.variable(kind, 'var', jnp.ones, squeeze_shape)
if use_running_average:
# if ra_mean is not None:
# raise ValueError('batch_stats should be provided if use_running_averages=True')
mean = jnp.reshape(ra_mean.value, mean.shape)
var = jnp.reshape(ra_var.value, var.shape)
else:
if not is_init:
beta = 1.0 - momentum
ra_mean.value += beta * (jnp.squeeze(mean) - ra_mean.value)
ra_var.value += beta * (jnp.squeeze(var) - ra_var.value)
y = x - mean
mul = lax.rsqrt(var + epsilon)
if scale:
mul = mul * scope.param('scale', scale_init, squeeze_shape).reshape(
mean.shape
)
y = y * mul
if bias:
y = y + scope.param('bias', bias_init, squeeze_shape).reshape(mean.shape)
return jnp.asarray(y, dtype) | null |
22,667 | import jax.numpy as jnp
from jax import lax
from flax.core import Scope
from flax.linen import initializers
The provided code snippet includes necessary dependencies for implementing the `layer_norm` function. Write a Python function `def layer_norm( scope: Scope, x, epsilon=1e-6, dtype=jnp.float32, bias=True, scale=True, bias_init=initializers.zeros_init(), scale_init=initializers.ones_init(), )` to solve the following problem:
Applies layer normalization on the input. It normalizes the activations of the layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Args: x: the inputs epsilon: A small float added to variance to avoid dividing by zero. dtype: the dtype of the computation (default: float32). bias: If True, bias (beta) is added. scale: If True, multiply by scale (gamma). When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. bias_init: Initializer for bias, by default, zero. scale_init: Initializer for scale, by default, one. Returns: Normalized inputs (the same shape as inputs).
Here is the function:
def layer_norm(
scope: Scope,
x,
epsilon=1e-6,
dtype=jnp.float32,
bias=True,
scale=True,
bias_init=initializers.zeros_init(),
scale_init=initializers.ones_init(),
):
"""Applies layer normalization on the input.
It normalizes the activations of the layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within
each example close to 0 and the activation standard deviation close to 1.
Args:
x: the inputs
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
bias: If True, bias (beta) is added.
scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
Returns:
Normalized inputs (the same shape as inputs).
"""
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
var = mean2 - lax.square(mean)
mul = lax.rsqrt(var + epsilon)
if scale:
mul = mul * jnp.asarray(
scope.param('scale', scale_init, (features,)), dtype
)
y = (x - mean) * mul
if bias:
y = y + jnp.asarray(scope.param('bias', bias_init, (features,)), dtype)
return y | Applies layer normalization on the input. It normalizes the activations of the layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Args: x: the inputs epsilon: A small float added to variance to avoid dividing by zero. dtype: the dtype of the computation (default: float32). bias: If True, bias (beta) is added. scale: If True, multiply by scale (gamma). When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. bias_init: Initializer for bias, by default, zero. scale_init: Initializer for scale, by default, one. Returns: Normalized inputs (the same shape as inputs). |
22,668 | import jax.numpy as jnp
from jax import lax
from flax.core import Scope
from flax.linen import initializers
The provided code snippet includes necessary dependencies for implementing the `group_norm` function. Write a Python function `def group_norm( scope, x, num_groups=32, group_size=None, epsilon=1e-6, dtype=jnp.float32, bias=True, scale=True, bias_init=initializers.zeros_init(), scale_init=initializers.ones_init(), )` to solve the following problem:
Applies group normalization to the input (arxiv.org/abs/1803.08494). This op is similar to batch normalization, but statistics are shared across equally-sized groups of channels and not shared across batch dimension. Thus, group normalization does not depend on the batch composition and does not require maintaining internal state for storing statistics. The user should either specify the total number of channel groups or the number of channels per group. Args: x: the input of shape N...C, where N is a batch dimension and C is a channels dimensions. `...` represents an arbitrary number of extra dimensions that are used to accumulate statistics over. num_groups: the total number of channel groups. The default value of 32 is proposed by the original group normalization paper. group_size: the number of channels in a group. epsilon: A small float added to variance to avoid dividing by zero. dtype: the dtype of the computation (default: float32). bias: If True, bias (beta) is added. scale: If True, multiply by scale (gamma). When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. bias_init: Initializer for bias, by default, zero. scale_init: Initializer for scale, by default, one. Returns: Normalized inputs (the same shape as inputs).
Here is the function:
def group_norm(
scope,
x,
num_groups=32,
group_size=None,
epsilon=1e-6,
dtype=jnp.float32,
bias=True,
scale=True,
bias_init=initializers.zeros_init(),
scale_init=initializers.ones_init(),
):
"""Applies group normalization to the input (arxiv.org/abs/1803.08494).
This op is similar to batch normalization, but statistics are shared across
equally-sized groups of channels and not shared across batch dimension.
Thus, group normalization does not depend on the batch composition and does
not require maintaining internal state for storing statistics.
The user should either specify the total number of channel groups or the
number of channels per group.
Args:
x: the input of shape N...C, where N is a batch dimension and C is a
channels dimensions. `...` represents an arbitrary number of extra
dimensions that are used to accumulate statistics over.
num_groups: the total number of channel groups. The default value of 32 is
proposed by the original group normalization paper.
group_size: the number of channels in a group.
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
bias: If True, bias (beta) is added.
scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
if (num_groups is None and group_size is None) or (
num_groups is not None and group_size is not None
):
raise ValueError(
'Either `num_groups` or `group_size` should be '
'specified, but not both of them.'
)
if group_size is not None:
channels = x.shape[-1]
if channels % group_size != 0:
raise ValueError(
'Number of channels ({}) is not multiple of the '
'group size ({}).'.format(channels, group_size)
)
num_groups = channels // group_size
input_shape = x.shape
group_shape = x.shape[:-1] + (num_groups, x.shape[-1] // num_groups)
x = x.reshape(group_shape)
reduction_axis = list(range(1, x.ndim - 2)) + [x.ndim - 1]
mean = jnp.mean(x, axis=reduction_axis, keepdims=True)
mean_of_squares = jnp.mean(jnp.square(x), axis=reduction_axis, keepdims=True)
var = mean_of_squares - jnp.square(mean)
x = (x - mean) * lax.rsqrt(var + epsilon)
x = x.reshape(input_shape)
feature_shape = tuple([1 for d in input_shape[:-1]] + [input_shape[-1]])
if scale:
x = x * scope.param('scale', scale_init, feature_shape)
if bias:
x = x + scope.param('bias', bias_init, feature_shape)
return x.astype(dtype) | Applies group normalization to the input (arxiv.org/abs/1803.08494). This op is similar to batch normalization, but statistics are shared across equally-sized groups of channels and not shared across batch dimension. Thus, group normalization does not depend on the batch composition and does not require maintaining internal state for storing statistics. The user should either specify the total number of channel groups or the number of channels per group. Args: x: the input of shape N...C, where N is a batch dimension and C is a channels dimensions. `...` represents an arbitrary number of extra dimensions that are used to accumulate statistics over. num_groups: the total number of channel groups. The default value of 32 is proposed by the original group normalization paper. group_size: the number of channels in a group. epsilon: A small float added to variance to avoid dividing by zero. dtype: the dtype of the computation (default: float32). bias: If True, bias (beta) is added. scale: If True, multiply by scale (gamma). When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. bias_init: Initializer for bias, by default, zero. scale_init: Initializer for scale, by default, one. Returns: Normalized inputs (the same shape as inputs). |
22,669 | import jax.numpy as jnp
from jax import lax, random
The provided code snippet includes necessary dependencies for implementing the `dropout` function. Write a Python function `def dropout(scope, inputs, rate, deterministic=False, rng=None)` to solve the following problem:
Applies a random dropout mask to the input. Args: inputs: the inputs that should be randomly masked. rate: the probablity of masking out a value. deterministic: if false the inputs are scaled by `1 / (1 - rate)` and masked, whereas if true, no mask is applied and the inputs are returned as is. rng: an optional `jax.random.PRNGKey`. By default `nn.make_rng()` will be used. Returns: The masked inputs.
Here is the function:
def dropout(scope, inputs, rate, deterministic=False, rng=None):
"""Applies a random dropout mask to the input.
Args:
inputs: the inputs that should be randomly masked.
rate: the probablity of masking out a value.
deterministic: if false the inputs are scaled by `1 / (1 - rate)` and
masked, whereas if true, no mask is applied and the inputs are returned as
is.
rng: an optional `jax.random.PRNGKey`. By default `nn.make_rng()` will
be used.
Returns:
The masked inputs.
"""
if rate == 0.0:
return inputs
keep_prob = 1.0 - rate
if deterministic:
return inputs
else:
if rng is None:
rng = scope.make_rng('dropout')
mask = random.bernoulli(rng, p=keep_prob, shape=inputs.shape)
return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs)) | Applies a random dropout mask to the input. Args: inputs: the inputs that should be randomly masked. rate: the probablity of masking out a value. deterministic: if false the inputs are scaled by `1 / (1 - rate)` and masked, whereas if true, no mask is applied and the inputs are returned as is. rng: an optional `jax.random.PRNGKey`. By default `nn.make_rng()` will be used. Returns: The masked inputs. |
22,670 | from collections.abc import Iterable
import jax.numpy as jnp
import numpy as np
from jax import lax
from flax import struct
from flax.core import Scope
from flax.linen import initializers
default_kernel_init = initializers.lecun_normal()
def _conv_dimension_numbers(input_shape):
"""Computes the dimension numbers based on the input shape."""
ndim = len(input_shape)
lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1))
rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2))
out_spec = lhs_spec
return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
The provided code snippet includes necessary dependencies for implementing the `conv` function. Write a Python function `def conv( scope, inputs, features, kernel_size, strides=None, padding='SAME', input_dilation=None, kernel_dilation=None, feature_group_count=1, bias=True, dtype=jnp.float32, precision=None, kernel_init=default_kernel_init, bias_init=initializers.zeros_init(), )` to solve the following problem:
Applies a convolution to the inputs. Args: inputs: input data with dimensions (batch, spatial_dims..., features). features: number of convolution filters. kernel_size: shape of the convolutional kernel. strides: a sequence of `n` integers, representing the inter-window strides. padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of `n` `(low, high)` integer pairs that give the padding to apply before and after each spatial dimension. input_dilation: `None`, or a sequence of `n` integers, giving the dilation factor to apply in each spatial dimension of `inputs`. Convolution with input dilation `d` is equivalent to transposed convolution with stride `d`. kernel_dilation: `None`, or a sequence of `n` integers, giving the dilation factor to apply in each spatial dimension of the convolution kernel. Convolution with kernel dilation is also known as 'atrous convolution'. feature_group_count: integer, default 1. If specified divides the input features into groups. bias: whether to add a bias to the output (default: True). dtype: the dtype of the computation (default: float32). precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the convolutional kernel. bias_init: initializer for the bias. Returns: The convolved data.
Here is the function:
def conv(
scope,
inputs,
features,
kernel_size,
strides=None,
padding='SAME',
input_dilation=None,
kernel_dilation=None,
feature_group_count=1,
bias=True,
dtype=jnp.float32,
precision=None,
kernel_init=default_kernel_init,
bias_init=initializers.zeros_init(),
):
"""Applies a convolution to the inputs.
Args:
inputs: input data with dimensions (batch, spatial_dims..., features).
features: number of convolution filters.
kernel_size: shape of the convolutional kernel.
strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
input_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `inputs`.
Convolution with input dilation `d` is equivalent to transposed
convolution with stride `d`.
kernel_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel. Convolution with kernel dilation is also known as 'atrous
convolution'.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
Returns:
The convolved data.
"""
inputs = jnp.asarray(inputs, dtype)
if strides is None:
strides = (1,) * (inputs.ndim - 2)
in_features = inputs.shape[-1]
assert in_features % feature_group_count == 0
kernel_shape = kernel_size + (in_features // feature_group_count, features)
kernel = scope.param('kernel', kernel_init, kernel_shape)
kernel = jnp.asarray(kernel, dtype)
dimension_numbers = _conv_dimension_numbers(inputs.shape)
y = lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=precision,
)
if bias:
bias = scope.param('bias', bias_init, (features,))
bias = jnp.asarray(bias, dtype)
y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))
return y | Applies a convolution to the inputs. Args: inputs: input data with dimensions (batch, spatial_dims..., features). features: number of convolution filters. kernel_size: shape of the convolutional kernel. strides: a sequence of `n` integers, representing the inter-window strides. padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of `n` `(low, high)` integer pairs that give the padding to apply before and after each spatial dimension. input_dilation: `None`, or a sequence of `n` integers, giving the dilation factor to apply in each spatial dimension of `inputs`. Convolution with input dilation `d` is equivalent to transposed convolution with stride `d`. kernel_dilation: `None`, or a sequence of `n` integers, giving the dilation factor to apply in each spatial dimension of the convolution kernel. Convolution with kernel dilation is also known as 'atrous convolution'. feature_group_count: integer, default 1. If specified divides the input features into groups. bias: whether to add a bias to the output (default: True). dtype: the dtype of the computation (default: float32). precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the convolutional kernel. bias_init: initializer for the bias. Returns: The convolved data. |
22,671 | from collections.abc import Iterable
import jax.numpy as jnp
import numpy as np
from jax import lax
from flax import struct
from flax.core import Scope
from flax.linen import initializers
default_kernel_init = initializers.lecun_normal()
The provided code snippet includes necessary dependencies for implementing the `conv_transpose` function. Write a Python function `def conv_transpose( scope, inputs, features, kernel_size, strides=None, padding='SAME', kernel_dilation=None, bias=True, dtype=jnp.float32, precision=None, kernel_init=default_kernel_init, bias_init=initializers.zeros_init(), )` to solve the following problem:
Applies a transposed convolution to the inputs. Behaviour mirrors that of `jax.lax.conv_transpose`. Args: scope: functional scope. inputs: input data with dimensions (batch, spatial_dims..., features). features: number of convolution filters. kernel_size: shape of the convolutional kernel. strides: a sequence of `n` integers, representing the inter-window strides. padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of `n` `(low, high)` integer pairs that give the padding to apply before and after each spatial dimension. kernel_dilation: `None`, or a sequence of `n` integers, giving the dilation factor to apply in each spatial dimension of the convolution kernel. Convolution with kernel dilation is also known as 'atrous convolution'. bias: whether to add a bias to the output (default: True). dtype: the dtype of the computation (default: float32). precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the convolutional kernel. bias_init: initializer for the bias. Returns: The convolved data.
Here is the function:
def conv_transpose(
scope,
inputs,
features,
kernel_size,
strides=None,
padding='SAME',
kernel_dilation=None,
bias=True,
dtype=jnp.float32,
precision=None,
kernel_init=default_kernel_init,
bias_init=initializers.zeros_init(),
):
"""Applies a transposed convolution to the inputs. Behaviour mirrors that of
`jax.lax.conv_transpose`.
Args:
scope: functional scope.
inputs: input data with dimensions (batch, spatial_dims..., features).
features: number of convolution filters.
kernel_size: shape of the convolutional kernel.
strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
kernel_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel. Convolution with kernel dilation is also known as 'atrous
convolution'.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
Returns:
The convolved data.
"""
inputs = jnp.asarray(inputs, dtype)
strides = strides or (1,) * (inputs.ndim - 2)
in_features = inputs.shape[-1]
kernel_shape = kernel_size + (in_features, features)
kernel = scope.param('kernel', kernel_init, kernel_shape)
kernel = jnp.asarray(kernel, dtype)
y = lax.conv_transpose(
inputs,
kernel,
strides,
padding,
rhs_dilation=kernel_dilation,
precision=precision,
)
if bias:
bias = scope.param('bias', bias_init, (features,))
bias = jnp.asarray(bias, dtype)
y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))
return y | Applies a transposed convolution to the inputs. Behaviour mirrors that of `jax.lax.conv_transpose`. Args: scope: functional scope. inputs: input data with dimensions (batch, spatial_dims..., features). features: number of convolution filters. kernel_size: shape of the convolutional kernel. strides: a sequence of `n` integers, representing the inter-window strides. padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of `n` `(low, high)` integer pairs that give the padding to apply before and after each spatial dimension. kernel_dilation: `None`, or a sequence of `n` integers, giving the dilation factor to apply in each spatial dimension of the convolution kernel. Convolution with kernel dilation is also known as 'atrous convolution'. bias: whether to add a bias to the output (default: True). dtype: the dtype of the computation (default: float32). precision: numerical precision of the computation see `jax.lax.Precision` for details. kernel_init: initializer for the convolutional kernel. bias_init: initializer for the bias. Returns: The convolved data. |
22,672 | from collections.abc import Iterable
import jax.numpy as jnp
import numpy as np
from jax import lax
from flax import struct
from flax.core import Scope
from flax.linen import initializers
default_embed_init = initializers.variance_scaling(
1.0, 'fan_in', 'normal', out_axis=0
)
class Embedding:
table: np.ndarray
def lookup(self, indices):
"""Embeds the inputs along the last dimension.
Args:
indices: input data, all dimensions are considered batch dimensions.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional `features` dimension appended.
"""
if indices.dtype not in [jnp.int32, jnp.int64, jnp.uint32, jnp.uint64]:
raise ValueError('Input type must be an integer or unsigned integer.')
return self.table[indices]
def attend(self, query):
"""Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An array with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
return jnp.dot(query, self.table.T)
The provided code snippet includes necessary dependencies for implementing the `embedding` function. Write a Python function `def embedding( scope: Scope, num_embeddings: int, features: int, init_fn=default_embed_init ) -> Embedding` to solve the following problem:
Creates embedding dataclass. Args: num_embeddings: number of embeddings. features: Number of feature dimensions for each embedding. embedding_init: embedding initializer. Returns: Embedding dataclass with lookup and attend methods.
Here is the function:
def embedding(
scope: Scope, num_embeddings: int, features: int, init_fn=default_embed_init
) -> Embedding:
"""Creates embedding dataclass.
Args:
num_embeddings: number of embeddings.
features: Number of feature dimensions for each embedding.
embedding_init: embedding initializer.
Returns:
Embedding dataclass with lookup and attend methods.
"""
table = scope.param('table', init_fn, (num_embeddings, features))
return Embedding(table) # type: ignore | Creates embedding dataclass. Args: num_embeddings: number of embeddings. features: Number of feature dimensions for each embedding. embedding_init: embedding initializer. Returns: Embedding dataclass with lookup and attend methods. |
22,673 | import collections
import functools
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
from flax import traceback_util
from flax.typing import (
In,
Out,
InOutAxis,
InOutScanAxis,
)
import jax
from jax import random
from . import axes_scan, meta
from .frozen_dict import freeze, unfreeze
from .scope import (
CollectionFilter,
DenyList, # pylint: disable=g-multiple-import
Filter,
PRNGSequenceFilter,
Scope,
group_collections,
in_filter,
intersect_filters,
is_filter_empty,
subtract_filters,
union_filters,
)
def map_variables(
fn: Callable[..., Any],
mapped_collections: CollectionFilter,
map_in_fn: Callable[..., Any] = id_fn,
map_out_fn: Callable[..., Any] = id_fn,
init: bool = False,
mutable: bool = False,
rngs: PRNGSequenceFilter = True,
variables: CollectionFilter = True,
) -> Callable[..., Any]:
"""Map Variables inside a scope.
Args:
fn: the function to be transformed.
mapped_collections: the collection(s) to be transformed.
map_in_fn: creates a view of the target variables.
map_out_fn: transforms the updated variables in the view after mutation.
init: If True, variables are initialized before transformation.
mutable: If True, the mapped variable collections will be mutable.
rngs: PRNGSequences added to the transformed scope (default: all).
variables: Additional Variable collections added to the transformed scope.
Besides those specified by `target` (default: all).
Returns:
A callable expecting a scope as the first argument.
"""
is_target_out = mutable or init
def wrapper(scope_fn, repack, variable_groups, rng_groups, *args, **kwargs):
target, variables = variable_groups
if init:
scopes = scope_fn((target, variables), rng_groups)
has_mutable_cols = any(
not is_filter_empty(scope.mutable)
for scope in jax.tree_util.tree_leaves(scopes)
)
if has_mutable_cols:
fn(scopes, *args, **kwargs)
target, _ = repack(scopes)
target = tuple(map_out_fn(x) for x in target)
target = tuple(map_in_fn(unfreeze(x)) for x in target)
mfilter = True
if not is_target_out:
# mapped collections should not be mutable
# unless the mapping supports it (by init=True or mutable=True)
mfilter = subtract_filters(mfilter, mapped_collections)
scopes = scope_fn((target, variables), rng_groups, mutable_filter=mfilter)
y = fn(scopes, *args, **kwargs)
out_target, out_vars = repack(scopes)
if is_target_out:
out_target = tuple(map_out_fn(x) for x in out_target)
return y, (out_target, out_vars)
in_vars = (mapped_collections, variables)
out_vars = (
in_vars
if is_target_out
else (False, subtract_filters(variables, mapped_collections))
)
return pack(
wrapper,
in_vars,
out_vars,
(rngs,),
enable_kwargs=True,
name='map_variables',
)
The provided code snippet includes necessary dependencies for implementing the `swap_collection` function. Write a Python function `def swap_collection(fn: Callable[..., Any], col_a: str, col_b: str)` to solve the following problem:
Swap two collections.
Here is the function:
def swap_collection(fn: Callable[..., Any], col_a: str, col_b: str):
"""Swap two collections."""
def swap(target):
a = target[col_a] if col_a in target else {}
b = target[col_b] if col_b in target else {}
target[col_b], target[col_a] = a, b
return target
return map_variables(fn, (col_a, col_b), swap, swap, mutable=True) | Swap two collections. |
22,674 | import collections
import functools
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
from flax import traceback_util
from flax.typing import (
In,
Out,
InOutAxis,
InOutScanAxis,
)
import jax
from jax import random
from . import axes_scan, meta
from .frozen_dict import freeze, unfreeze
from .scope import (
CollectionFilter,
DenyList, # pylint: disable=g-multiple-import
Filter,
PRNGSequenceFilter,
Scope,
group_collections,
in_filter,
intersect_filters,
is_filter_empty,
subtract_filters,
union_filters,
)
def pack(
fn: Callable[..., Any],
in_variable_filters: Sequence[CollectionFilter],
out_variable_filters: Sequence[CollectionFilter],
rng_filters: Sequence[PRNGSequenceFilter],
name=None,
enable_kwargs=False,
) -> Callable[..., Any]:
"""Pack variables and rngs for functional transformations.
The pack function is the building block for all other lifted transformations.
Args:
fn: The function to pack. `fn` has the signature
`(scope_fn, repack_fn, variable_groups, rng_groups, *args) ->
(output, packed_variables)`.
in_variable_filters: Input variable filters.
out_variable_filters: Output variable filters.
rng_filters: RNG filters.
name: The name of the packed scope.
enable_kwargs: Whether to enable kwargs or not.
Returns:
A callable which expects a scope as the first argument.
"""
def wrapper(scope_tree: Scope, *args, **kwargs):
if not enable_kwargs and kwargs:
msg = 'kwargs are not supported in {}, so "{}" is(are) ignored'
warnings.warn(msg.format(name, ', '.join(kwargs.keys())), RuntimeWarning)
(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
publish_results_fn,
invalidate_scopes_fn,
) = _partial_pack(scope_tree, in_variable_filters, out_variable_filters, rng_filters, name)
try:
if enable_kwargs:
y, out_variable_groups_xs_t = fn(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
*args,
**kwargs,
)
else:
y, out_variable_groups_xs_t = fn(
scope_fn, repack_fn, variable_groups_xs_t, rng_groups_xs_t, *args
)
finally:
invalidate_scopes_fn()
publish_results_fn(out_variable_groups_xs_t)
return y
return wrapper
CollectionFilter = Filter
PRNGSequenceFilter = Filter
class Scope:
"""A Scope allows easy access to variables and manages RNGS of a neural network layer.
Scopes are purely functional and encapsulated in
:class:`flax.linen.module.Module`, so users writing neural network code
usually generally do not interact with ``Scopes`` directly.
See `core design tests
<https://github.com/google/flax/tree/main/tests/core/design>`_
for a number of examples using ``Scopes``.
"""
reservations: Dict[str, Set[Optional[str]]]
def __init__(
self,
variables: MutableVariableDict,
rngs: Optional[Union[RNGSequences, Dict[str, LazyRng]]] = None,
name: Optional[str] = None,
mutable: CollectionFilter = False,
parent: Optional['Scope'] = None,
path: Iterable[str] = (),
debug_path: Iterable[str] = (),
flags: Optional[Mapping] = None,
):
"""Initializes a Scope.
Args:
variables: VariableDict to initialize the Scope with.
rngs: RNGs used in this scope or one of the child scopes.
name: name of this scope.
mutable: A CollectionFilter determining which variables are mutable.
parent: The parent scope.
path: The path in the variable tree from the root scope to this scope. It
exactly matches the module path.
debug_path: Similar to path but could contain transformation decorators.
flags: internal flags.
"""
rngs = {k: LazyRng.create(v) for k, v in rngs.items()} if rngs else {}
self._variables = variables
self.parent = parent
self.name = name
self.path = tuple(path)
self.debug_path = tuple(debug_path) or self.path
self.rngs = rngs
self.mutable = mutable
self.flags = freeze({} if flags is None else flags)
self._root = parent.root if parent else None
self.trace_level = tracers.trace_level(tracers.current_trace())
self.rng_counters = {key: 0 for key in self.rngs}
self.reservations = collections.defaultdict(set)
self._invalid = False
def __eq__(self, other: Any) -> bool:
# If the root variable dict and path are the same, then two scopes behave
# identically. Effectively, a scope is nothing more than a cursor into a
# variable dict and an rng counter dict.
if not isinstance(other, Scope):
return False
if self is other:
return True
return (
self.root._variables is other.root._variables
and self.path == other.path
and self.rng_counters is other.rng_counters
)
def __hash__(self) -> int:
# see __eq__
return hash((id(self.root._variables), self.path, id(self.rng_counters)))
def root(self) -> 'Scope':
return self._root or self
def path_text(self) -> str:
"""Returns the debug path as a human readable string."""
return '/' + '/'.join(self.debug_path)
def invalid(self) -> bool:
"""Returns true if this scope is invalidated as a result of `Scope.temporary`."""
return self._invalid
def _check_valid(self):
if self._invalid:
raise errors.InvalidScopeError(self.name)
def temporary(self):
"""Returns a context manager that will invalidate this Scope when leaving the context."""
try:
yield self
finally:
self.invalidate()
def invalidate(self):
"""Invalidates the Scope."""
self._invalid = True
def mutable_variables(self) -> Union[VariableDict, Dict[str, Any]]:
"""Returns an immutable copy of the mutable variables belonging to this Scope."""
self._populate_collections()
xs = {
k: v for k, v in self._variables.items() if in_filter(self.mutable, k)
}
if config.flax_return_frozendict:
return freeze(xs)
return xs
def variables(self) -> Union[VariableDict, Dict[str, Any]]:
"""Returns an immutable copy of the variables belonging to this Scope."""
self._populate_collections()
if config.flax_return_frozendict:
return freeze(self._variables)
return self._variables
def _validate_trace_level(self):
tracers.check_trace_level(self.trace_level)
def rewound(self, rewind_rngs: bool = False) -> 'Scope':
"""Returns a rewound version of this Scope.
Args:
rewind_rngs: if true, reset the RNG counter of this scope.
Returns:
A rewound version of this scope, which means reservations are
emptied, and the rng counter is optionally rewound.
"""
self._check_valid()
scope = Scope(
self._variables,
self.rngs,
self.name,
self.mutable,
self.parent,
path=self.path,
debug_path=self.debug_path,
flags=self.flags,
)
if not rewind_rngs:
scope.rng_counters = self.rng_counters
return scope
def name_reserved(self, name: str, col: Optional[str] = None) -> bool:
"""Checks whether a name for a child Scope or Variable is taken.
Args:
name: the name to check for collision.
col: if a variable, the collection used.
"""
if name in self.reservations:
# allow the same name for two variables in
# different collections, otherwise raise error.
if (
None in self.reservations[name]
or col is None
or col in self.reservations[name]
):
return True
return False
def reserve(self, name: str, col: Optional[str] = None):
"""Reserves a name for a child Scope or Variable.
Throws an error if the name exists already.
Args:
name: the name to reserve.
col: if a variable, the collection used.
"""
if not isinstance(name, str):
raise TypeError(
'The type of scope "{name}" should be string but ' f'it is {type(name)}'
)
if self.name_reserved(name, col):
raise ValueError(f'Duplicate use of scope name: "{name}"')
self.reservations[name].add(col)
def default_name(self, prefix: str) -> str:
"""Generates an unreserved name with the given prefix.
Args:
prefix: prefix to use for generating an unreserved name.
Returns:
The generated name.
"""
i = 0
while True:
name = f'{prefix}{i}'
if name not in self.reservations:
return name
i += 1
def push(
self, name: Optional[str] = None, prefix: str = '', reuse=False
) -> 'Scope':
"""Creates a child Scope.
Args:
name: optional name of the child.
prefix: prefix used for generating the name if `name` is `None`.
reuse: if True will return a pre-existing child scope with the given name
instead of throwing an error.
Returns:
The child scope.
"""
self._check_valid()
self._validate_trace_level()
if name is None:
name = self.default_name(prefix)
if not reuse or name not in self.reservations:
self.reserve(name)
rngs = {key: LazyRng.create(rng, name) for key, rng in self.rngs.items()}
rng_key = (child_rng_token, name)
if rng_key in self.rng_counters:
rng_counters = self.rng_counters.get(rng_key) # type: ignore
else:
rng_counters = {key: 0 for key in rngs}
self.rng_counters[rng_key] = rng_counters # type: ignore
scope = Scope(
{},
name=name,
rngs=rngs,
parent=self,
mutable=self.mutable,
path=self.path + (name,),
debug_path=self.debug_path + (name,),
flags=self.flags,
)
scope.rng_counters = rng_counters
return scope
def child(
self,
fn: Callable[..., Any],
name: Optional[str] = None,
prefix: Optional[str] = None,
named_call: bool = True,
**partial_kwargs,
) -> Callable[..., Any]:
"""Partially applies a child scope to fn.
When calling the returned function multiple times variables will be reused.
Args:
fn: the function to partially apply the child Scope to.
name: optional name of the child.
prefix: prefix used for generating name if it is `None`.
named_call: if true, `fn` will be run under `jax.named_scope`. The XLA
profiler will use this to name tag the computation.
**partial_kwargs: additional kwargs partially applied to `fn`.
Returns:
The function with a partially applied scope.
"""
if name is None:
if prefix is None:
prefix = fn.__name__ + '_' if hasattr(fn, '__name__') else ''
name = self.default_name(prefix)
scope = self.push(name)
def wrapper(*args, **kwargs):
kwargs = dict(partial_kwargs, **kwargs)
if named_call:
with jax.named_scope(name):
res = fn(scope.rewound(), *args, **kwargs)
else:
res = fn(scope.rewound(), *args, **kwargs)
return res
return wrapper
def is_mutable_collection(self, col: str) -> bool:
"""Returns true if the collection `col` is mutable."""
return in_filter(self.mutable, col)
def is_collection_empty(self, col: str) -> bool:
"""Returns true if the collection is empty."""
if col in self.root._variables: # pylint: disable=protected-access
return not self.root._variables[col] # pylint: disable=protected-access
return True
def _mutable_collection(self, col: str) -> MutableCollection:
"""Returns the collection `col` as a mutable object."""
assert self.is_mutable_collection(col), f'Collection {col} is not mutable'
# The actual variable dict is stored in the root scope only, and subscopes
# hold references to subtrees relevant to them. This function ensures that
# the collections are created in the top-level Scope and we return the
# correct reference.
if col not in self._variables:
if not self.parent:
# If this is the top-level Scope, just add an empty collection.
self._variables[col] = {}
else:
assert self.name is not None # Only top-level Scope have name None.
# Populate the parent collections recursively and obtain a reference to
# the direct parent (which, by transitivity, is be a reference to a
# dict in the root Scope).
parent_col = self.parent._mutable_collection(col) # pylint: disable=protected-access
if self.name not in parent_col:
# If this Scope's name does not occur in the parent collection, add it
# to the parent scope (updating the parent's variable dict).
parent_col[self.name] = {}
# Store a reference to the parent's scope collection for in this scope's
# variable dict.
self._variables[col] = parent_col[self.name]
return self._variables[col]
def _collection(self, col: str) -> Collection:
"""Returns a collection of variables of collection `col`."""
if col not in self._variables:
if self.parent:
assert self.name is not None
parent_col = self.parent._collection(col) # pylint: disable=protected-access
if self.name not in parent_col:
return FrozenDict()
self._variables[col] = parent_col[self.name]
else:
return FrozenDict()
return self._variables[col]
def has_rng(self, name: str) -> bool:
"""Returns true if a PRNGSequence with name `name` exists."""
return name in self.rngs
def make_rng(self, name: str = 'params') -> PRNGKey:
"""Generates A PRNGKey from a PRNGSequence with name `name`."""
if not self.has_rng(name):
if self.has_rng('params'):
name = 'params'
else:
raise errors.InvalidRngError(f'{self.name} needs PRNG for "{name}"')
self._check_valid()
self._validate_trace_level()
self.rng_counters[name] += 1
return LazyRng.create(self.rngs[name], self.rng_counters[name]).as_jax_rng()
def get_variable(self, col: str, name: str, default: Any = None) -> Any:
"""Retrieves the value of a Variable.
Args:
col: the variable collection.
name: the name of the variable.
default: the default value to return if the variable does not exist in
this scope.
Returns:
The value of the input variable, of the default value if the variable
doesn't exist in this scope.
"""
variables = self._collection(col)
if name in variables:
return variables[name]
else:
return default
def has_variable(self, col: str, name: str) -> bool:
"""Returns true if the given variable exists in this scope.
Args:
col: the collection of the variable.
name: the name of the variable.
"""
variables = self._collection(col)
return name in variables
def put_variable(self, col: str, name: str, value: Any):
"""Updates the value of the given variable if it is mutable, or an error otherwise.
Args:
col: the collection of the variable.
name: the name of the variable.
value: the new value of the given variable.
"""
self._check_valid()
self._validate_trace_level()
if not self.is_mutable_collection(col):
raise errors.ModifyScopeVariableError(col, name, self.path_text)
variables = self._mutable_collection(col)
# Make sure reference sharing of child variable dictionaries isn't broken.
# See https://github.com/google/flax/issues/2022 for more details.
def put(target, key, val):
if (
key in target
and isinstance(target[key], dict)
and isinstance(val, Mapping)
):
for k, v in val.items():
put(target[key], k, v)
else:
target[key] = val
put(variables, name, value)
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> Variable[meta.AxisMetadata[T]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
...
def variable(
self,
col: str,
name: str, # pylint: disable=keyword-arg-before-vararg
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
"""Creates a variable if it doesn't exist yet in this scope and returns it.
Args:
col: the collection of the variable.
name: the name of the variable.
init_fn: a function taking a PRNGKey plus any other number of positional
arguments. If None, the variable must already be initialized otherwise
an error is raised.
*init_args: the positional arguments to evaluate init_fn on lazily.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: the key-word arguments to evaluate init_fn on lazily.
Returns:
The variable. Throws an error if the variable exists already.
"""
self.reserve(name, col)
if not self.has_variable(col, name):
if not self.is_mutable_collection(col) or init_fn is None:
if self.is_collection_empty(col):
raise errors.ScopeCollectionNotFound(col, name, self.path_text)
raise errors.ScopeVariableNotFoundError(name, col, self.path_text)
init_value = init_fn(*init_args, **init_kwargs)
self.put_variable(col, name, init_value)
# cast to make static analyzers happy
return cast(
Union[Variable[T], Variable[meta.AxisMetadata[T]]],
Variable(self, col, name, unbox=unbox),
)
def param(
self, name: str, init_fn: Callable[..., T], *init_args,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> meta.AxisMetadata[T]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
"""Creates a parameter if it doesn't exist yet in this scope and returns it.
If the parameter exists already, the existing value is simply returned.
Args:
name: the name of the parameter.
init_fn: a function taking a PRNGKey plus any other number of positional
arguments.
*init_args: the positional arguments to evaluate init_fn on lazily.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: the key-word arguments to evaluate init_fn on lazily.
Returns:
The parameters. Throws an error if the params exist already.
"""
self.reserve(name, 'params')
if self.has_variable('params', name):
value = self.get_variable('params', name)
# Validate that the shape of the init_fn output is the same as the shape
# of the existing parameter. This is to make sure that the hparams set up
# in a Flax Module match the shapes coming in during apply, and if not,
# catch it with an error message.
# NOTE: We could consider moving this to `self.`
abs_value = jax.eval_shape(
lambda: init_fn(random.key(0), *init_args, **init_kwargs)
)
abs_value_flat = jax.tree_util.tree_leaves(abs_value)
value_flat = jax.tree_util.tree_leaves(value)
for val, abs_val in zip(value_flat, abs_value_flat):
# NOTE: We could check dtype consistency here as well but it's
# usefuleness is less obvious. We might intentionally change the dtype
# for inference to a half float type for example.
if jnp.shape(val) != jnp.shape(abs_val):
raise errors.ScopeParamShapeError(
name, self.path_text, jnp.shape(abs_val), jnp.shape(val)
)
else:
if not self.is_mutable_collection('params'):
if self.is_collection_empty('params'):
raise errors.ScopeCollectionNotFound('params', name, self.path_text)
raise errors.ScopeParamNotFoundError(name, self.path_text)
value = init_fn(self.make_rng('params'), *init_args, **init_kwargs)
self.put_variable('params', name, value)
if unbox:
value = meta.unbox(value)
return value
def _populate_collections(self):
collections = self.root._variables.keys() # pylint: disable=protected-access
for col in collections:
self._collection(col)
def has_flag(self, key) -> bool:
return key in self.flags
def get_flag(self, key, default=no_flag) -> Any:
if key not in self.flags and default is no_flag:
return ValueError(f'Flag {key} not present on scope.')
return self.flags.get(key, default)
The provided code snippet includes necessary dependencies for implementing the `jvp` function. Write a Python function `def jvp( fn: Callable[..., Any], scope: Scope, primals, tangents, variable_tangents, variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, ) -> Tuple[Any, Any]` to solve the following problem:
A lifted version of ``jax.jvp``. See ``jax.jvp`` for the unlifted Jacobian-vector product (forward gradient). Note that no tangents are returned for variables. When variable tangents are required their value should be returned explicitly by `fn` using `scope.variables()`. Example:: def learn_scale(scope, x): p = scope.param('scale', nn.initializers.zeros_init(), ()) return p * x def f(scope, x): vars_t = jax.tree_util.tree_map(jnp.ones_like, scope.variables().get('params', {})) x, out_t = lift.jvp( learn_scale, scope, (x,), (jnp.zeros_like(x),), variable_tangents={'params': vars_t}) return out_t Args: fn: The function to be transformed. scope: The scope(s) which should be lifted into the transform. primals: The primal values at which the Jacobian of ``fun`` should be evaluated. Should be either a tuple or a list of arguments, and its length should be equal to the number of positional parameters of ``fun``. tangents: The tangent vector for which the Jacobian-vector product should be evaluated. Should be either a tuple or a list of tangents, with the same tree structure and array shapes as ``primals``. variable_tangents: A dict or PyTree fo dicts with the same structure as scopes. Each entry in the dict specifies the tangents for a variable collection. Not specifying a collection in variable_tangents is equivalent to passing a zero vector as the tangent. variables: other variables collections that are available inside `fn` but do not receive a tangent. rngs: the prngs that are available inside `fn`. Returns: A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is ``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of ``function`` evaluated at ``primals`` with ``tangents``. The ``tangents_out`` value has the same Python tree structure and shapes as ``primals_out``.
Here is the function:
def jvp(
fn: Callable[..., Any],
scope: Scope,
primals,
tangents,
variable_tangents,
variables: CollectionFilter = True,
rngs: PRNGSequenceFilter = True,
) -> Tuple[Any, Any]:
"""A lifted version of ``jax.jvp``.
See ``jax.jvp`` for the unlifted Jacobian-vector product (forward gradient).
Note that no tangents are returned for variables. When variable tangents
are required their value should be returned explicitly by `fn`
using `scope.variables()`.
Example::
def learn_scale(scope, x):
p = scope.param('scale', nn.initializers.zeros_init(), ())
return p * x
def f(scope, x):
vars_t = jax.tree_util.tree_map(jnp.ones_like,
scope.variables().get('params', {}))
x, out_t = lift.jvp(
learn_scale, scope, (x,), (jnp.zeros_like(x),),
variable_tangents={'params': vars_t})
return out_t
Args:
fn: The function to be transformed.
scope: The scope(s) which should be lifted into the transform.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be either a tuple or a list of arguments,
and its length should be equal to the number of positional parameters of
``fun``.
tangents: The tangent vector for which the Jacobian-vector product should be
evaluated. Should be either a tuple or a list of tangents, with the same
tree structure and array shapes as ``primals``.
variable_tangents: A dict or PyTree fo dicts with the same structure as
scopes. Each entry in the dict specifies the tangents for a variable
collection. Not specifying a collection in variable_tangents is
equivalent to passing a zero vector as the tangent.
variables: other variables collections that are available inside `fn` but
do not receive a tangent.
rngs: the prngs that are available inside `fn`.
Returns:
A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is
``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of
``function`` evaluated at ``primals`` with ``tangents``. The
``tangents_out`` value has the same Python tree structure and shapes as
``primals_out``.
"""
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
jvp_vars, other_vars = variable_groups
@functools.wraps(fn)
def wrapper(vars_primals, args):
variable_groups = (vars_primals, other_vars)
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
(y, out_vars), out_tangents = jax.jvp(
wrapper, (jvp_vars, args), (variable_tangents, tangents)
)
return (y, out_tangents[0]), out_vars
# filter out empty tangent collections because JAX will error on non-equal
# tree structure for example: {"params": {}} != {}.
treedef = jax.tree_util.tree_structure(scope)
variable_tangents = tuple(
{k: v for k, v in vt.items() if v} # pylint: disable=g-complex-comprehension
for vt in treedef.flatten_up_to(variable_tangents)
)
target = tuple(variable_tangents[0].keys())
return pack(
inner,
(target, variables),
(variables,),
(rngs,),
name='jvp',
enable_kwargs=False,
)(scope, *primals) | A lifted version of ``jax.jvp``. See ``jax.jvp`` for the unlifted Jacobian-vector product (forward gradient). Note that no tangents are returned for variables. When variable tangents are required their value should be returned explicitly by `fn` using `scope.variables()`. Example:: def learn_scale(scope, x): p = scope.param('scale', nn.initializers.zeros_init(), ()) return p * x def f(scope, x): vars_t = jax.tree_util.tree_map(jnp.ones_like, scope.variables().get('params', {})) x, out_t = lift.jvp( learn_scale, scope, (x,), (jnp.zeros_like(x),), variable_tangents={'params': vars_t}) return out_t Args: fn: The function to be transformed. scope: The scope(s) which should be lifted into the transform. primals: The primal values at which the Jacobian of ``fun`` should be evaluated. Should be either a tuple or a list of arguments, and its length should be equal to the number of positional parameters of ``fun``. tangents: The tangent vector for which the Jacobian-vector product should be evaluated. Should be either a tuple or a list of tangents, with the same tree structure and array shapes as ``primals``. variable_tangents: A dict or PyTree fo dicts with the same structure as scopes. Each entry in the dict specifies the tangents for a variable collection. Not specifying a collection in variable_tangents is equivalent to passing a zero vector as the tangent. variables: other variables collections that are available inside `fn` but do not receive a tangent. rngs: the prngs that are available inside `fn`. Returns: A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is ``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of ``function`` evaluated at ``primals`` with ``tangents``. The ``tangents_out`` value has the same Python tree structure and shapes as ``primals_out``. |
22,675 | import collections
import functools
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
from flax import traceback_util
from flax.typing import (
In,
Out,
InOutAxis,
InOutScanAxis,
)
import jax
from jax import random
from . import axes_scan, meta
from .frozen_dict import freeze, unfreeze
from .scope import (
CollectionFilter,
DenyList, # pylint: disable=g-multiple-import
Filter,
PRNGSequenceFilter,
Scope,
group_collections,
in_filter,
intersect_filters,
is_filter_empty,
subtract_filters,
union_filters,
)
def tree_map_rngs(fn, tree):
"""Needed for mapping JAX random.* functions over PRNGKey leaves."""
return jax.tree_util.tree_map(
fn,
tree,
is_leaf=lambda x: hasattr(x, 'dtype')
and jax.dtypes.issubdtype(x.dtype, jax.dtypes.prng_key),
)
def pack(
fn: Callable[..., Any],
in_variable_filters: Sequence[CollectionFilter],
out_variable_filters: Sequence[CollectionFilter],
rng_filters: Sequence[PRNGSequenceFilter],
name=None,
enable_kwargs=False,
) -> Callable[..., Any]:
"""Pack variables and rngs for functional transformations.
The pack function is the building block for all other lifted transformations.
Args:
fn: The function to pack. `fn` has the signature
`(scope_fn, repack_fn, variable_groups, rng_groups, *args) ->
(output, packed_variables)`.
in_variable_filters: Input variable filters.
out_variable_filters: Output variable filters.
rng_filters: RNG filters.
name: The name of the packed scope.
enable_kwargs: Whether to enable kwargs or not.
Returns:
A callable which expects a scope as the first argument.
"""
def wrapper(scope_tree: Scope, *args, **kwargs):
if not enable_kwargs and kwargs:
msg = 'kwargs are not supported in {}, so "{}" is(are) ignored'
warnings.warn(msg.format(name, ', '.join(kwargs.keys())), RuntimeWarning)
(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
publish_results_fn,
invalidate_scopes_fn,
) = _partial_pack(scope_tree, in_variable_filters, out_variable_filters, rng_filters, name)
try:
if enable_kwargs:
y, out_variable_groups_xs_t = fn(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
*args,
**kwargs,
)
else:
y, out_variable_groups_xs_t = fn(
scope_fn, repack_fn, variable_groups_xs_t, rng_groups_xs_t, *args
)
finally:
invalidate_scopes_fn()
publish_results_fn(out_variable_groups_xs_t)
return y
return wrapper
def _split_in_out_axes(xs: Mapping[CollectionFilter, Any]):
unpack = lambda v: v.axis if isinstance(v, (In, Out)) else v
in_axes = {k: unpack(v) for k, v in xs.items() if not isinstance(v, Out)}
out_axes = {k: unpack(v) for k, v in xs.items() if not isinstance(v, In)}
return in_axes, out_axes
def _unzip2(xs):
ys = tuple(zip(*xs))
return ys if ys else ((), ())
InOutAxis = Union[Axis, In[Axis], Out[Axis]]
CollectionFilter = Filter
PRNGSequenceFilter = Filter
The provided code snippet includes necessary dependencies for implementing the `vmap` function. Write a Python function `def vmap( fn: Callable[..., Any], variable_axes: Mapping[CollectionFilter, InOutAxis], split_rngs: Mapping[PRNGSequenceFilter, bool], in_axes=0, out_axes=0, axis_size: Optional[int] = None, axis_name: Optional[str] = None, spmd_axis_name: Optional[str] = None, metadata_params: Dict[Any, Any] = {}, ) -> Callable[..., Any]` to solve the following problem:
A lifted version of ``jax.vmap``. See ``jax.vmap`` for the unlifted batch transform in Jax. ``vmap`` can be used to add a batch axis to a scope function. For example we could create a version of ``dense`` with a batch axis that does not share parameters:: batch_dense = lift.vmap( nn.dense, in_axes=(0, None), variable_axes={'params': 0}, split_rngs={'params': True}) By using ``variable_axes={'params': 0}``, we indicate that the parameters themselves are mapped over and therefore not shared along the mapped axis. Consequently, we also split the 'params' RNG, otherwise the parameters would be initialized identically along the mapped axis. Similarly, ``vmap`` could be use to add a batch axis with parameter sharing:: batch_foo = lift.vmap( foo, in_axes=0, out_axes=0, variable_axes={'params': None}, split_rngs={'params': False}) Here we use ``variable_axes={'params': None}`` to indicate the parameter variables are shared along the mapped axis. Consequently, the 'params' RNG must also be shared. Args: fn: the function to be transformed. variable_axes: the variable collections that are lifted into the batching transformation. Use `None` to indicate a broadcasted collection or an integer to map over an axis. split_rngs: Split PRNG sequences will be different for each index of the batch dimension. Unsplit PRNGs will be broadcasted. in_axes: Specifies the mapping of the input arguments (see `jax.vmap). out_axes: Specifies the mapping of the return value (see `jax.vmap). axis_size: Specifies the size of the batch axis. This only needs to be specified if it cannot be derived from the input arguments. axis_name: Specifies a name for the batch axis. Can be used together with parallel reduction primitives (e.g. `jax.lax.pmean`, `jax.lax.ppermute`, etc.). Note, this is only used for pmap and shmap. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. spmd_axis_name: Axis name added to any pjit sharding constraints appearing in `fn`. See also https://github.com/google/flax/blob/main/flax/linen/partitioning.py. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A vectorized version of the input scope function.
Here is the function:
def vmap(
fn: Callable[..., Any],
variable_axes: Mapping[CollectionFilter, InOutAxis],
split_rngs: Mapping[PRNGSequenceFilter, bool],
in_axes=0,
out_axes=0,
axis_size: Optional[int] = None,
axis_name: Optional[str] = None,
spmd_axis_name: Optional[str] = None,
metadata_params: Dict[Any, Any] = {},
) -> Callable[..., Any]:
"""A lifted version of ``jax.vmap``.
See ``jax.vmap`` for the unlifted batch transform in Jax.
``vmap`` can be used to add a batch axis to a scope function.
For example we could create a version of ``dense`` with
a batch axis that does not share parameters::
batch_dense = lift.vmap(
nn.dense,
in_axes=(0, None),
variable_axes={'params': 0},
split_rngs={'params': True})
By using ``variable_axes={'params': 0}``, we indicate that the
parameters themselves are mapped over and therefore not shared along
the mapped axis. Consequently, we also split the 'params' RNG,
otherwise the parameters would be initialized identically along
the mapped axis.
Similarly, ``vmap`` could be use to add a batch axis with parameter
sharing::
batch_foo = lift.vmap(
foo,
in_axes=0, out_axes=0,
variable_axes={'params': None},
split_rngs={'params': False})
Here we use ``variable_axes={'params': None}`` to indicate the parameter
variables are shared along the mapped axis. Consequently, the 'params'
RNG must also be shared.
Args:
fn: the function to be transformed.
variable_axes: the variable collections that are lifted into the batching
transformation. Use `None` to indicate a broadcasted collection or an
integer to map over an axis.
split_rngs: Split PRNG sequences will be different for each index of the
batch dimension. Unsplit PRNGs will be broadcasted.
in_axes: Specifies the mapping of the input arguments (see `jax.vmap).
out_axes: Specifies the mapping of the return value (see `jax.vmap).
axis_size: Specifies the size of the batch axis. This only needs to be
specified if it cannot be derived from the input arguments.
axis_name: Specifies a name for the batch axis. Can be used together with
parallel reduction primitives (e.g. `jax.lax.pmean`, `jax.lax.ppermute`,
etc.). Note, this is only used for pmap and shmap. For SPMD jit, you do
not need to manually synchronize. Just make sure that the axes are
correctly annotated and XLA:SPMD will insert the necessary collectives.
spmd_axis_name: Axis name added to any pjit sharding constraints appearing
in `fn`. See also
https://github.com/google/flax/blob/main/flax/linen/partitioning.py.
metadata_params: arguments dict passed to AxisMetadata instances in the
variable tree.
Returns:
A vectorized version of the input scope function.
"""
variable_in_axes, variable_out_axes = _split_in_out_axes(variable_axes)
variable_in_groups, variable_in_axes = _unzip2(variable_in_axes.items())
variable_out_groups, variable_out_axes = _unzip2(variable_out_axes.items())
rng_groups, rng_splits = _unzip2(split_rngs.items())
rng_axes = tuple(0 if rng_split else None for rng_split in rng_splits)
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
def find_axis_size(axis, x):
if axis is not None:
leaves = jax.tree_util.tree_leaves(x)
if leaves:
return leaves[0].shape[axis]
return ()
# split rngs
axis_sizes = jax.tree_util.tree_map(
find_axis_size, (variable_in_axes, in_axes), (variable_groups, args)
)
axis_sizes = set(jax.tree_util.tree_leaves(axis_sizes))
if axis_size is None and len(axis_sizes) == 1:
(d_axis_size,) = axis_sizes
elif len(axis_sizes) > 1:
raise ValueError(f'Inconsistent batch axis sizes: {axis_sizes}')
elif axis_size is None:
raise ValueError('axis_size should be specified manually.')
else:
d_axis_size = axis_size
# random.clone is only available on Jax versions 0.4.26 or newer
# see: https://jax.readthedocs.io/en/latest/jax.experimental.key_reuse.html
if hasattr(random, 'clone'):
split_fn = lambda rng: random.split(random.clone(rng), d_axis_size)
else:
split_fn = lambda rng: random.split(rng, d_axis_size)
rng_groups = tuple(
tree_map_rngs(split_fn, rng_group) if split else rng_group
for rng_group, split in zip(rng_groups, rng_splits)
)
new_variable_groups = []
for var_group, axis in zip(variable_groups, variable_in_axes):
if axis is not None:
new_variable_groups.append(
meta.remove_axis(var_group, axis, metadata_params)
)
else:
new_variable_groups.append(var_group)
variable_groups = tuple(new_variable_groups)
@functools.partial(
jax.vmap,
in_axes=(variable_in_axes, rng_axes, in_axes),
out_axes=(out_axes, variable_out_axes),
axis_name=axis_name,
axis_size=axis_size,
spmd_axis_name=spmd_axis_name,
)
@functools.wraps(fn)
def mapped(variable_groups, rng_groups, args):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
y, vars_out = mapped(variable_groups, rng_groups, args)
new_vars_out = []
for var_group, axis in zip(vars_out, variable_out_axes):
if axis is not None:
new_vars_out.append(meta.add_axis(var_group, axis, metadata_params))
else:
new_vars_out.append(var_group)
vars_out = tuple(new_vars_out)
return y, vars_out
return pack(
inner, variable_in_groups, variable_out_groups, rng_groups, name='vmap'
) | A lifted version of ``jax.vmap``. See ``jax.vmap`` for the unlifted batch transform in Jax. ``vmap`` can be used to add a batch axis to a scope function. For example we could create a version of ``dense`` with a batch axis that does not share parameters:: batch_dense = lift.vmap( nn.dense, in_axes=(0, None), variable_axes={'params': 0}, split_rngs={'params': True}) By using ``variable_axes={'params': 0}``, we indicate that the parameters themselves are mapped over and therefore not shared along the mapped axis. Consequently, we also split the 'params' RNG, otherwise the parameters would be initialized identically along the mapped axis. Similarly, ``vmap`` could be use to add a batch axis with parameter sharing:: batch_foo = lift.vmap( foo, in_axes=0, out_axes=0, variable_axes={'params': None}, split_rngs={'params': False}) Here we use ``variable_axes={'params': None}`` to indicate the parameter variables are shared along the mapped axis. Consequently, the 'params' RNG must also be shared. Args: fn: the function to be transformed. variable_axes: the variable collections that are lifted into the batching transformation. Use `None` to indicate a broadcasted collection or an integer to map over an axis. split_rngs: Split PRNG sequences will be different for each index of the batch dimension. Unsplit PRNGs will be broadcasted. in_axes: Specifies the mapping of the input arguments (see `jax.vmap). out_axes: Specifies the mapping of the return value (see `jax.vmap). axis_size: Specifies the size of the batch axis. This only needs to be specified if it cannot be derived from the input arguments. axis_name: Specifies a name for the batch axis. Can be used together with parallel reduction primitives (e.g. `jax.lax.pmean`, `jax.lax.ppermute`, etc.). Note, this is only used for pmap and shmap. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. spmd_axis_name: Axis name added to any pjit sharding constraints appearing in `fn`. See also https://github.com/google/flax/blob/main/flax/linen/partitioning.py. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A vectorized version of the input scope function. |
22,676 | import collections
import functools
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
from flax import traceback_util
from flax.typing import (
In,
Out,
InOutAxis,
InOutScanAxis,
)
import jax
from jax import random
from . import axes_scan, meta
from .frozen_dict import freeze, unfreeze
from .scope import (
CollectionFilter,
DenyList, # pylint: disable=g-multiple-import
Filter,
PRNGSequenceFilter,
Scope,
group_collections,
in_filter,
intersect_filters,
is_filter_empty,
subtract_filters,
union_filters,
)
def tree_map_rngs(fn, tree):
"""Needed for mapping JAX random.* functions over PRNGKey leaves."""
return jax.tree_util.tree_map(
fn,
tree,
is_leaf=lambda x: hasattr(x, 'dtype')
and jax.dtypes.issubdtype(x.dtype, jax.dtypes.prng_key),
)
def pack(
fn: Callable[..., Any],
in_variable_filters: Sequence[CollectionFilter],
out_variable_filters: Sequence[CollectionFilter],
rng_filters: Sequence[PRNGSequenceFilter],
name=None,
enable_kwargs=False,
) -> Callable[..., Any]:
"""Pack variables and rngs for functional transformations.
The pack function is the building block for all other lifted transformations.
Args:
fn: The function to pack. `fn` has the signature
`(scope_fn, repack_fn, variable_groups, rng_groups, *args) ->
(output, packed_variables)`.
in_variable_filters: Input variable filters.
out_variable_filters: Output variable filters.
rng_filters: RNG filters.
name: The name of the packed scope.
enable_kwargs: Whether to enable kwargs or not.
Returns:
A callable which expects a scope as the first argument.
"""
def wrapper(scope_tree: Scope, *args, **kwargs):
if not enable_kwargs and kwargs:
msg = 'kwargs are not supported in {}, so "{}" is(are) ignored'
warnings.warn(msg.format(name, ', '.join(kwargs.keys())), RuntimeWarning)
(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
publish_results_fn,
invalidate_scopes_fn,
) = _partial_pack(scope_tree, in_variable_filters, out_variable_filters, rng_filters, name)
try:
if enable_kwargs:
y, out_variable_groups_xs_t = fn(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
*args,
**kwargs,
)
else:
y, out_variable_groups_xs_t = fn(
scope_fn, repack_fn, variable_groups_xs_t, rng_groups_xs_t, *args
)
finally:
invalidate_scopes_fn()
publish_results_fn(out_variable_groups_xs_t)
return y
return wrapper
C = TypeVar('C')
def _unzip2(xs):
ys = tuple(zip(*xs))
return ys if ys else ((), ())
CollectionFilter = Filter
PRNGSequenceFilter = Filter
class Scope:
"""A Scope allows easy access to variables and manages RNGS of a neural network layer.
Scopes are purely functional and encapsulated in
:class:`flax.linen.module.Module`, so users writing neural network code
usually generally do not interact with ``Scopes`` directly.
See `core design tests
<https://github.com/google/flax/tree/main/tests/core/design>`_
for a number of examples using ``Scopes``.
"""
reservations: Dict[str, Set[Optional[str]]]
def __init__(
self,
variables: MutableVariableDict,
rngs: Optional[Union[RNGSequences, Dict[str, LazyRng]]] = None,
name: Optional[str] = None,
mutable: CollectionFilter = False,
parent: Optional['Scope'] = None,
path: Iterable[str] = (),
debug_path: Iterable[str] = (),
flags: Optional[Mapping] = None,
):
"""Initializes a Scope.
Args:
variables: VariableDict to initialize the Scope with.
rngs: RNGs used in this scope or one of the child scopes.
name: name of this scope.
mutable: A CollectionFilter determining which variables are mutable.
parent: The parent scope.
path: The path in the variable tree from the root scope to this scope. It
exactly matches the module path.
debug_path: Similar to path but could contain transformation decorators.
flags: internal flags.
"""
rngs = {k: LazyRng.create(v) for k, v in rngs.items()} if rngs else {}
self._variables = variables
self.parent = parent
self.name = name
self.path = tuple(path)
self.debug_path = tuple(debug_path) or self.path
self.rngs = rngs
self.mutable = mutable
self.flags = freeze({} if flags is None else flags)
self._root = parent.root if parent else None
self.trace_level = tracers.trace_level(tracers.current_trace())
self.rng_counters = {key: 0 for key in self.rngs}
self.reservations = collections.defaultdict(set)
self._invalid = False
def __eq__(self, other: Any) -> bool:
# If the root variable dict and path are the same, then two scopes behave
# identically. Effectively, a scope is nothing more than a cursor into a
# variable dict and an rng counter dict.
if not isinstance(other, Scope):
return False
if self is other:
return True
return (
self.root._variables is other.root._variables
and self.path == other.path
and self.rng_counters is other.rng_counters
)
def __hash__(self) -> int:
# see __eq__
return hash((id(self.root._variables), self.path, id(self.rng_counters)))
def root(self) -> 'Scope':
return self._root or self
def path_text(self) -> str:
"""Returns the debug path as a human readable string."""
return '/' + '/'.join(self.debug_path)
def invalid(self) -> bool:
"""Returns true if this scope is invalidated as a result of `Scope.temporary`."""
return self._invalid
def _check_valid(self):
if self._invalid:
raise errors.InvalidScopeError(self.name)
def temporary(self):
"""Returns a context manager that will invalidate this Scope when leaving the context."""
try:
yield self
finally:
self.invalidate()
def invalidate(self):
"""Invalidates the Scope."""
self._invalid = True
def mutable_variables(self) -> Union[VariableDict, Dict[str, Any]]:
"""Returns an immutable copy of the mutable variables belonging to this Scope."""
self._populate_collections()
xs = {
k: v for k, v in self._variables.items() if in_filter(self.mutable, k)
}
if config.flax_return_frozendict:
return freeze(xs)
return xs
def variables(self) -> Union[VariableDict, Dict[str, Any]]:
"""Returns an immutable copy of the variables belonging to this Scope."""
self._populate_collections()
if config.flax_return_frozendict:
return freeze(self._variables)
return self._variables
def _validate_trace_level(self):
tracers.check_trace_level(self.trace_level)
def rewound(self, rewind_rngs: bool = False) -> 'Scope':
"""Returns a rewound version of this Scope.
Args:
rewind_rngs: if true, reset the RNG counter of this scope.
Returns:
A rewound version of this scope, which means reservations are
emptied, and the rng counter is optionally rewound.
"""
self._check_valid()
scope = Scope(
self._variables,
self.rngs,
self.name,
self.mutable,
self.parent,
path=self.path,
debug_path=self.debug_path,
flags=self.flags,
)
if not rewind_rngs:
scope.rng_counters = self.rng_counters
return scope
def name_reserved(self, name: str, col: Optional[str] = None) -> bool:
"""Checks whether a name for a child Scope or Variable is taken.
Args:
name: the name to check for collision.
col: if a variable, the collection used.
"""
if name in self.reservations:
# allow the same name for two variables in
# different collections, otherwise raise error.
if (
None in self.reservations[name]
or col is None
or col in self.reservations[name]
):
return True
return False
def reserve(self, name: str, col: Optional[str] = None):
"""Reserves a name for a child Scope or Variable.
Throws an error if the name exists already.
Args:
name: the name to reserve.
col: if a variable, the collection used.
"""
if not isinstance(name, str):
raise TypeError(
'The type of scope "{name}" should be string but ' f'it is {type(name)}'
)
if self.name_reserved(name, col):
raise ValueError(f'Duplicate use of scope name: "{name}"')
self.reservations[name].add(col)
def default_name(self, prefix: str) -> str:
"""Generates an unreserved name with the given prefix.
Args:
prefix: prefix to use for generating an unreserved name.
Returns:
The generated name.
"""
i = 0
while True:
name = f'{prefix}{i}'
if name not in self.reservations:
return name
i += 1
def push(
self, name: Optional[str] = None, prefix: str = '', reuse=False
) -> 'Scope':
"""Creates a child Scope.
Args:
name: optional name of the child.
prefix: prefix used for generating the name if `name` is `None`.
reuse: if True will return a pre-existing child scope with the given name
instead of throwing an error.
Returns:
The child scope.
"""
self._check_valid()
self._validate_trace_level()
if name is None:
name = self.default_name(prefix)
if not reuse or name not in self.reservations:
self.reserve(name)
rngs = {key: LazyRng.create(rng, name) for key, rng in self.rngs.items()}
rng_key = (child_rng_token, name)
if rng_key in self.rng_counters:
rng_counters = self.rng_counters.get(rng_key) # type: ignore
else:
rng_counters = {key: 0 for key in rngs}
self.rng_counters[rng_key] = rng_counters # type: ignore
scope = Scope(
{},
name=name,
rngs=rngs,
parent=self,
mutable=self.mutable,
path=self.path + (name,),
debug_path=self.debug_path + (name,),
flags=self.flags,
)
scope.rng_counters = rng_counters
return scope
def child(
self,
fn: Callable[..., Any],
name: Optional[str] = None,
prefix: Optional[str] = None,
named_call: bool = True,
**partial_kwargs,
) -> Callable[..., Any]:
"""Partially applies a child scope to fn.
When calling the returned function multiple times variables will be reused.
Args:
fn: the function to partially apply the child Scope to.
name: optional name of the child.
prefix: prefix used for generating name if it is `None`.
named_call: if true, `fn` will be run under `jax.named_scope`. The XLA
profiler will use this to name tag the computation.
**partial_kwargs: additional kwargs partially applied to `fn`.
Returns:
The function with a partially applied scope.
"""
if name is None:
if prefix is None:
prefix = fn.__name__ + '_' if hasattr(fn, '__name__') else ''
name = self.default_name(prefix)
scope = self.push(name)
def wrapper(*args, **kwargs):
kwargs = dict(partial_kwargs, **kwargs)
if named_call:
with jax.named_scope(name):
res = fn(scope.rewound(), *args, **kwargs)
else:
res = fn(scope.rewound(), *args, **kwargs)
return res
return wrapper
def is_mutable_collection(self, col: str) -> bool:
"""Returns true if the collection `col` is mutable."""
return in_filter(self.mutable, col)
def is_collection_empty(self, col: str) -> bool:
"""Returns true if the collection is empty."""
if col in self.root._variables: # pylint: disable=protected-access
return not self.root._variables[col] # pylint: disable=protected-access
return True
def _mutable_collection(self, col: str) -> MutableCollection:
"""Returns the collection `col` as a mutable object."""
assert self.is_mutable_collection(col), f'Collection {col} is not mutable'
# The actual variable dict is stored in the root scope only, and subscopes
# hold references to subtrees relevant to them. This function ensures that
# the collections are created in the top-level Scope and we return the
# correct reference.
if col not in self._variables:
if not self.parent:
# If this is the top-level Scope, just add an empty collection.
self._variables[col] = {}
else:
assert self.name is not None # Only top-level Scope have name None.
# Populate the parent collections recursively and obtain a reference to
# the direct parent (which, by transitivity, is be a reference to a
# dict in the root Scope).
parent_col = self.parent._mutable_collection(col) # pylint: disable=protected-access
if self.name not in parent_col:
# If this Scope's name does not occur in the parent collection, add it
# to the parent scope (updating the parent's variable dict).
parent_col[self.name] = {}
# Store a reference to the parent's scope collection for in this scope's
# variable dict.
self._variables[col] = parent_col[self.name]
return self._variables[col]
def _collection(self, col: str) -> Collection:
"""Returns a collection of variables of collection `col`."""
if col not in self._variables:
if self.parent:
assert self.name is not None
parent_col = self.parent._collection(col) # pylint: disable=protected-access
if self.name not in parent_col:
return FrozenDict()
self._variables[col] = parent_col[self.name]
else:
return FrozenDict()
return self._variables[col]
def has_rng(self, name: str) -> bool:
"""Returns true if a PRNGSequence with name `name` exists."""
return name in self.rngs
def make_rng(self, name: str = 'params') -> PRNGKey:
"""Generates A PRNGKey from a PRNGSequence with name `name`."""
if not self.has_rng(name):
if self.has_rng('params'):
name = 'params'
else:
raise errors.InvalidRngError(f'{self.name} needs PRNG for "{name}"')
self._check_valid()
self._validate_trace_level()
self.rng_counters[name] += 1
return LazyRng.create(self.rngs[name], self.rng_counters[name]).as_jax_rng()
def get_variable(self, col: str, name: str, default: Any = None) -> Any:
"""Retrieves the value of a Variable.
Args:
col: the variable collection.
name: the name of the variable.
default: the default value to return if the variable does not exist in
this scope.
Returns:
The value of the input variable, of the default value if the variable
doesn't exist in this scope.
"""
variables = self._collection(col)
if name in variables:
return variables[name]
else:
return default
def has_variable(self, col: str, name: str) -> bool:
"""Returns true if the given variable exists in this scope.
Args:
col: the collection of the variable.
name: the name of the variable.
"""
variables = self._collection(col)
return name in variables
def put_variable(self, col: str, name: str, value: Any):
"""Updates the value of the given variable if it is mutable, or an error otherwise.
Args:
col: the collection of the variable.
name: the name of the variable.
value: the new value of the given variable.
"""
self._check_valid()
self._validate_trace_level()
if not self.is_mutable_collection(col):
raise errors.ModifyScopeVariableError(col, name, self.path_text)
variables = self._mutable_collection(col)
# Make sure reference sharing of child variable dictionaries isn't broken.
# See https://github.com/google/flax/issues/2022 for more details.
def put(target, key, val):
if (
key in target
and isinstance(target[key], dict)
and isinstance(val, Mapping)
):
for k, v in val.items():
put(target[key], k, v)
else:
target[key] = val
put(variables, name, value)
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> Variable[T]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> Variable[meta.AxisMetadata[T]]:
...
def variable(
self,
col: str,
name: str,
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
...
def variable(
self,
col: str,
name: str, # pylint: disable=keyword-arg-before-vararg
init_fn: Optional[Callable[..., T]] = None,
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]:
"""Creates a variable if it doesn't exist yet in this scope and returns it.
Args:
col: the collection of the variable.
name: the name of the variable.
init_fn: a function taking a PRNGKey plus any other number of positional
arguments. If None, the variable must already be initialized otherwise
an error is raised.
*init_args: the positional arguments to evaluate init_fn on lazily.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: the key-word arguments to evaluate init_fn on lazily.
Returns:
The variable. Throws an error if the variable exists already.
"""
self.reserve(name, col)
if not self.has_variable(col, name):
if not self.is_mutable_collection(col) or init_fn is None:
if self.is_collection_empty(col):
raise errors.ScopeCollectionNotFound(col, name, self.path_text)
raise errors.ScopeVariableNotFoundError(name, col, self.path_text)
init_value = init_fn(*init_args, **init_kwargs)
self.put_variable(col, name, init_value)
# cast to make static analyzers happy
return cast(
Union[Variable[T], Variable[meta.AxisMetadata[T]]],
Variable(self, col, name, unbox=unbox),
)
def param(
self, name: str, init_fn: Callable[..., T], *init_args,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[True],
**init_kwargs,
) -> T:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: Literal[False],
**init_kwargs,
) -> meta.AxisMetadata[T]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
...
def param(
self,
name: str,
init_fn: Callable[..., T],
*init_args,
unbox: bool = True,
**init_kwargs,
) -> Union[T, meta.AxisMetadata[T]]:
"""Creates a parameter if it doesn't exist yet in this scope and returns it.
If the parameter exists already, the existing value is simply returned.
Args:
name: the name of the parameter.
init_fn: a function taking a PRNGKey plus any other number of positional
arguments.
*init_args: the positional arguments to evaluate init_fn on lazily.
unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed
value, see ``flax.nn.meta.unbox`` (default: True).
**init_kwargs: the key-word arguments to evaluate init_fn on lazily.
Returns:
The parameters. Throws an error if the params exist already.
"""
self.reserve(name, 'params')
if self.has_variable('params', name):
value = self.get_variable('params', name)
# Validate that the shape of the init_fn output is the same as the shape
# of the existing parameter. This is to make sure that the hparams set up
# in a Flax Module match the shapes coming in during apply, and if not,
# catch it with an error message.
# NOTE: We could consider moving this to `self.`
abs_value = jax.eval_shape(
lambda: init_fn(random.key(0), *init_args, **init_kwargs)
)
abs_value_flat = jax.tree_util.tree_leaves(abs_value)
value_flat = jax.tree_util.tree_leaves(value)
for val, abs_val in zip(value_flat, abs_value_flat):
# NOTE: We could check dtype consistency here as well but it's
# usefuleness is less obvious. We might intentionally change the dtype
# for inference to a half float type for example.
if jnp.shape(val) != jnp.shape(abs_val):
raise errors.ScopeParamShapeError(
name, self.path_text, jnp.shape(abs_val), jnp.shape(val)
)
else:
if not self.is_mutable_collection('params'):
if self.is_collection_empty('params'):
raise errors.ScopeCollectionNotFound('params', name, self.path_text)
raise errors.ScopeParamNotFoundError(name, self.path_text)
value = init_fn(self.make_rng('params'), *init_args, **init_kwargs)
self.put_variable('params', name, value)
if unbox:
value = meta.unbox(value)
return value
def _populate_collections(self):
collections = self.root._variables.keys() # pylint: disable=protected-access
for col in collections:
self._collection(col)
def has_flag(self, key) -> bool:
return key in self.flags
def get_flag(self, key, default=no_flag) -> Any:
if key not in self.flags and default is no_flag:
return ValueError(f'Flag {key} not present on scope.')
return self.flags.get(key, default)
The provided code snippet includes necessary dependencies for implementing the `while_loop` function. Write a Python function `def while_loop( cond_fn: Callable[[Scope, C], bool], body_fn: Callable[[Scope, C], C], scope: Scope, init: C, carry_variables: CollectionFilter = False, broadcast_variables: CollectionFilter = True, split_rngs: Mapping[PRNGSequenceFilter, bool] = {}, ) -> C` to solve the following problem:
Lifted version of jax.lax.while_loop. The lifted scope is passed to `cond_fn` and `body_fn`. Broadcasted variables are immutable. The carry variable are mutable but cannot change shape and dtype. This also means you cannot initialize variables inside the body. Consider calling `body_fn` once manually before calling `while_loop` if variable initialization is required. Example:: def f(scope, x): def cond_fn(scope, c): return scope.get_variable('state', 'acc') < 10 def body_fn(scope, c): acc = scope.variable('state', 'acc') acc += 1 y = scope.child(nn.dense)(c, c.shape[-1]) return y c = x c = body_fn(scope, c) return lift.while_loop(cond_fn, body_fn, scope, (), carry_variables='state') Args: cond_fn: Should return True as long as the loop should continue. body_fn: The body of the while loop. scope: The scope(s) which should be lifted into the loop. init: The initial state passed to the loop carry_variables: collections that are carried through the loop and are therefore mutable (default: none). broadcast_variables: collections that are closed over and are therefore read-only (default: all collections) split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: The final state after executing the while loop.
Here is the function:
def while_loop(
cond_fn: Callable[[Scope, C], bool],
body_fn: Callable[[Scope, C], C],
scope: Scope,
init: C,
carry_variables: CollectionFilter = False,
broadcast_variables: CollectionFilter = True,
split_rngs: Mapping[PRNGSequenceFilter, bool] = {},
) -> C:
"""Lifted version of jax.lax.while_loop.
The lifted scope is passed to `cond_fn` and `body_fn`.
Broadcasted variables are immutable. The carry variable are
mutable but cannot change shape and dtype.
This also means you cannot initialize variables inside
the body. Consider calling `body_fn` once manually before
calling `while_loop` if variable initialization is required.
Example::
def f(scope, x):
def cond_fn(scope, c):
return scope.get_variable('state', 'acc') < 10
def body_fn(scope, c):
acc = scope.variable('state', 'acc')
acc += 1
y = scope.child(nn.dense)(c, c.shape[-1])
return y
c = x
c = body_fn(scope, c)
return lift.while_loop(cond_fn, body_fn, scope, (),
carry_variables='state')
Args:
cond_fn: Should return True as long as the loop should continue.
body_fn: The body of the while loop.
scope: The scope(s) which should be lifted into the loop.
init: The initial state passed to the loop
carry_variables: collections that are carried through the loop
and are therefore mutable (default: none).
broadcast_variables: collections that are closed over and are
therefore read-only (default: all collections)
split_rngs: Split PRNG sequences will be different for each loop iterations.
If split is False the PRNGs will be the same across iterations.
Returns:
The final state after executing the while loop.
"""
rng_groups, rng_splits = _unzip2(split_rngs.items())
def inner(scope_fn, repack_fn, variable_groups, rng_groups):
carry_variables, broadcast_variables = variable_groups
def make_loop_rngs(i):
local_rng_groups = []
for rng_group, rng_split in zip(rng_groups, rng_splits):
if rng_split:
rng_group = tree_map_rngs(
lambda rng: random.fold_in(rng, i), rng_group
)
local_rng_groups.append(rng_group)
return local_rng_groups
def cond_wrapper(c):
i, carry_variables, carry = c
scope = scope_fn(
(carry_variables, broadcast_variables),
make_loop_rngs(-i),
mutable_filter=False,
)
return cond_fn(scope, carry)
def body_wrapper(c):
i, carry_variables, carry = c
scope = scope_fn(
(carry_variables, broadcast_variables), make_loop_rngs(i)
)
carry = body_fn(scope, carry)
(carry_variables,) = repack_fn(scope)
return (i + 1, carry_variables, carry)
c = (0, carry_variables, init)
_, carry_variables, carry = jax.lax.while_loop(
cond_wrapper, body_wrapper, c
)
return carry, (carry_variables,)
return pack(
inner,
(carry_variables, broadcast_variables),
(carry_variables,),
rng_groups,
name='while_loop',
)(scope) | Lifted version of jax.lax.while_loop. The lifted scope is passed to `cond_fn` and `body_fn`. Broadcasted variables are immutable. The carry variable are mutable but cannot change shape and dtype. This also means you cannot initialize variables inside the body. Consider calling `body_fn` once manually before calling `while_loop` if variable initialization is required. Example:: def f(scope, x): def cond_fn(scope, c): return scope.get_variable('state', 'acc') < 10 def body_fn(scope, c): acc = scope.variable('state', 'acc') acc += 1 y = scope.child(nn.dense)(c, c.shape[-1]) return y c = x c = body_fn(scope, c) return lift.while_loop(cond_fn, body_fn, scope, (), carry_variables='state') Args: cond_fn: Should return True as long as the loop should continue. body_fn: The body of the while loop. scope: The scope(s) which should be lifted into the loop. init: The initial state passed to the loop carry_variables: collections that are carried through the loop and are therefore mutable (default: none). broadcast_variables: collections that are closed over and are therefore read-only (default: all collections) split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: The final state after executing the while loop. |
22,677 | import collections
import functools
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
from flax import traceback_util
from flax.typing import (
In,
Out,
InOutAxis,
InOutScanAxis,
)
import jax
from jax import random
from . import axes_scan, meta
from .frozen_dict import freeze, unfreeze
from .scope import (
CollectionFilter,
DenyList, # pylint: disable=g-multiple-import
Filter,
PRNGSequenceFilter,
Scope,
group_collections,
in_filter,
intersect_filters,
is_filter_empty,
subtract_filters,
union_filters,
)
def pack(
fn: Callable[..., Any],
in_variable_filters: Sequence[CollectionFilter],
out_variable_filters: Sequence[CollectionFilter],
rng_filters: Sequence[PRNGSequenceFilter],
name=None,
enable_kwargs=False,
) -> Callable[..., Any]:
"""Pack variables and rngs for functional transformations.
The pack function is the building block for all other lifted transformations.
Args:
fn: The function to pack. `fn` has the signature
`(scope_fn, repack_fn, variable_groups, rng_groups, *args) ->
(output, packed_variables)`.
in_variable_filters: Input variable filters.
out_variable_filters: Output variable filters.
rng_filters: RNG filters.
name: The name of the packed scope.
enable_kwargs: Whether to enable kwargs or not.
Returns:
A callable which expects a scope as the first argument.
"""
def wrapper(scope_tree: Scope, *args, **kwargs):
if not enable_kwargs and kwargs:
msg = 'kwargs are not supported in {}, so "{}" is(are) ignored'
warnings.warn(msg.format(name, ', '.join(kwargs.keys())), RuntimeWarning)
(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
publish_results_fn,
invalidate_scopes_fn,
) = _partial_pack(scope_tree, in_variable_filters, out_variable_filters, rng_filters, name)
try:
if enable_kwargs:
y, out_variable_groups_xs_t = fn(
scope_fn,
repack_fn,
variable_groups_xs_t,
rng_groups_xs_t,
*args,
**kwargs,
)
else:
y, out_variable_groups_xs_t = fn(
scope_fn, repack_fn, variable_groups_xs_t, rng_groups_xs_t, *args
)
finally:
invalidate_scopes_fn()
publish_results_fn(out_variable_groups_xs_t)
return y
return wrapper
remat = checkpoint
CollectionFilter = Filter
PRNGSequenceFilter = Filter
The provided code snippet includes necessary dependencies for implementing the `checkpoint` function. Write a Python function `def checkpoint( fn: Callable[..., Any], variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, concrete: bool = False, prevent_cse: bool = True, static_argnums: Union[int, Tuple[int, ...]] = (), policy: Optional[Callable[..., bool]] = None, ) -> Callable[..., Any]` to solve the following problem:
Lifted version of ``jax.checkpoint``. This function is aliased to ``lift.remat`` just like ``jax.remat``. Args: fn: scope function for which intermediate computations should be re-computed when computing gradients. variables: The variable collections that are lifted. By default all collections are lifted. rngs: The PRNG sequences that are lifted. By default all PRNG sequences are lifted. concrete: Optional, boolean indicating whether ``fun`` may involve value-dependent Python control flow (default ``False``). Support for such control flow is optional, and disabled by default, because in some edge-case compositions with :func:`jax.jit` it can lead to some extra computation. prevent_cse: Optional, boolean indicating whether to prevent common subexpression elimination (CSE) optimizations in the HLO generated from differentiation. This CSE prevention has costs because it can foil other optimizations, and because it can incur high overheads on some backends, especially GPU. The default is True because otherwise, under a ``jit`` or ``pmap``, CSE can defeat the purpose of this decorator. But in some settings, like when used inside a ``scan``, this CSE prevention mechanism is unnecessary, in which case ``prevent_cse`` can be set to False. static_argnums: Optional, int or sequence of ints, indicates which argument values on which to specialize for tracing and caching purposes. Specifying arguments as static can avoid ConcretizationTypeErrors when tracing, but at the cost of more retracing overheads. policy: Experimental checkpoint policy, see ``jax.checkpoint``. Returns: A wrapped version of ``fn``. When computing gradients intermediate computations will be re-computed when computing gradients.
Here is the function:
def checkpoint(
fn: Callable[..., Any],
variables: CollectionFilter = True,
rngs: PRNGSequenceFilter = True,
concrete: bool = False,
prevent_cse: bool = True,
static_argnums: Union[int, Tuple[int, ...]] = (),
policy: Optional[Callable[..., bool]] = None,
) -> Callable[..., Any]:
"""Lifted version of ``jax.checkpoint``.
This function is aliased to ``lift.remat`` just like ``jax.remat``.
Args:
fn: scope function for which intermediate computations should be
re-computed when computing gradients.
variables: The variable collections that are lifted. By default all
collections are lifted.
rngs: The PRNG sequences that are lifted. By default all PRNG sequences
are lifted.
concrete: Optional, boolean indicating whether ``fun`` may involve
value-dependent Python control flow (default ``False``). Support for such
control flow is optional, and disabled by default, because in some
edge-case compositions with :func:`jax.jit` it can lead to some extra
computation.
prevent_cse: Optional, boolean indicating whether to prevent common
subexpression elimination (CSE) optimizations in the HLO generated from
differentiation. This CSE prevention has costs because it can foil other
optimizations, and because it can incur high overheads on some backends,
especially GPU. The default is True because otherwise, under a ``jit`` or
``pmap``, CSE can defeat the purpose of this decorator. But in some
settings, like when used inside a ``scan``, this CSE prevention mechanism
is unnecessary, in which case ``prevent_cse`` can be set to False.
static_argnums: Optional, int or sequence of ints, indicates which argument
values on which to specialize for tracing and caching purposes. Specifying
arguments as static can avoid ConcretizationTypeErrors when tracing, but
at the cost of more retracing overheads.
policy: Experimental checkpoint policy, see ``jax.checkpoint``.
Returns:
A wrapped version of ``fn``. When computing gradients intermediate
computations will be re-computed when computing gradients.
"""
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args, **kwargs):
# add 2 to each static_argnums because we add two initial arguments to rematted
static_argnums_ = jax.tree_util.tree_map(lambda x: x + 2, static_argnums)
@functools.partial(
jax.remat,
concrete=concrete,
static_argnums=static_argnums_,
prevent_cse=prevent_cse,
policy=policy,
)
@functools.wraps(fn)
def rematted(variable_groups, rng_groups, *args, **kwargs):
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args, **kwargs)
return y, repack_fn(scope)
return rematted(variable_groups, rng_groups, *args, **kwargs)
return pack(
inner,
(variables,),
(variables,),
(rngs,),
name='remat',
enable_kwargs=True,
) | Lifted version of ``jax.checkpoint``. This function is aliased to ``lift.remat`` just like ``jax.remat``. Args: fn: scope function for which intermediate computations should be re-computed when computing gradients. variables: The variable collections that are lifted. By default all collections are lifted. rngs: The PRNG sequences that are lifted. By default all PRNG sequences are lifted. concrete: Optional, boolean indicating whether ``fun`` may involve value-dependent Python control flow (default ``False``). Support for such control flow is optional, and disabled by default, because in some edge-case compositions with :func:`jax.jit` it can lead to some extra computation. prevent_cse: Optional, boolean indicating whether to prevent common subexpression elimination (CSE) optimizations in the HLO generated from differentiation. This CSE prevention has costs because it can foil other optimizations, and because it can incur high overheads on some backends, especially GPU. The default is True because otherwise, under a ``jit`` or ``pmap``, CSE can defeat the purpose of this decorator. But in some settings, like when used inside a ``scan``, this CSE prevention mechanism is unnecessary, in which case ``prevent_cse`` can be set to False. static_argnums: Optional, int or sequence of ints, indicates which argument values on which to specialize for tracing and caching purposes. Specifying arguments as static can avoid ConcretizationTypeErrors when tracing, but at the cost of more retracing overheads. policy: Experimental checkpoint policy, see ``jax.checkpoint``. Returns: A wrapped version of ``fn``. When computing gradients intermediate computations will be re-computed when computing gradients. |
22,678 | import collections
import functools
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
from flax import traceback_util
from flax.typing import (
In,
Out,
InOutAxis,
InOutScanAxis,
)
import jax
from jax import random
from . import axes_scan, meta
from .frozen_dict import freeze, unfreeze
from .scope import (
CollectionFilter,
DenyList, # pylint: disable=g-multiple-import
Filter,
PRNGSequenceFilter,
Scope,
group_collections,
in_filter,
intersect_filters,
is_filter_empty,
subtract_filters,
union_filters,
)
def scan(
fn: Callable[..., Any],
variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {},
variable_broadcast: CollectionFilter = False,
variable_carry: CollectionFilter = False,
split_rngs: Mapping[PRNGSequenceFilter, bool] = {},
in_axes=0,
out_axes=0,
length: Optional[int] = None,
reverse: bool = False,
unroll: int = 1,
data_transform: Optional[Callable[..., Any]] = None,
metadata_params: Dict[Any, Any] = {},
) -> Callable[..., Any]:
"""A lifted version of ``jax.lax.scan``.
See ``jax.lax.scan`` for the unlifted scan in Jax.
To improve consistency with ``vmap``, this version of scan
uses ``in_axes`` and ``out_axes`` to determine which arguments
are scanned over and along which axis.
``scan`` distinguishes between 3 different types of values inside the loop:
1. **scan**: a value that is iterated over in a loop. All scan values must
have the same size in the axis they are scanned over. Scanned outputs
will be stacked along the scan axis.
2. **carry**: A carried value is updated at each loop iteration. It must
have the same shape and dtype throughout the loop.
3. **broadcast**: a value that is closed over by the loop. When a variable
is broadcasted they are typically initialized inside the loop body but
independent of the loop variables.
The loop body should have the signature
``(scope, body, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys``
are the scan values that go in and out of the loop.
Example::
scope.variable('counter', 'i', jnp.zeros, ())
def body_fn(scope, c, x):
counter = scope.variable('counter', 'i', jnp.zeros, ())
counter.value += 1
x = scope.child(nn.dense)(x, 1)
return c, x
_, ys = lift.scan(
body_fn,
variable_carry='counter',
variable_broadcast='params',
split_rngs={'params': False})(scope, (), xs)
Args:
fn: the function to be transformed.
variable_axes: the variable collections that are scanned over.
variable_broadcast: Specifies the broadcasted variable collections.
A broadcasted variable should not depend on any computation that cannot b
lifted out of the loop. This is typically used to define shared parameters
inside the fn.
variable_carry: Specifies the variable collections that are carried through
the loop. Mutations to these variables are carried to the next iteration
and will be preserved when the scan finishes.
split_rngs: Split PRNG sequences will be different for each loop iterations.
If split is False the PRNGs will be the same across iterations.
in_axes: Specifies the axis to scan over for the arguments. Should be a
prefix tree of the arguments. Use `flax.core.broadcast` to feed an entire
input to each iteration of the scan body.
out_axes: Specifies the axis to scan over for the return value. Should be a
prefix tree of the return value.
length: Specifies the number of loop iterations. This only needs
to be specified if it cannot be derived from the scan arguments.
reverse: If true, scan from end to start in reverse order.
unroll: how many scan iterations to unroll within a single
iteration of a loop (default: 1).
data_transform: optional function to transform raw variable and rng groups,
intended for inline SPMD annotations.
metadata_params: arguments dict passed to AxisMetadata instances in the
variable tree.
Returns:
The scan function with the signature
``(scope, carry, *xxs) -> (carry, yys)``, where ``xxs`` and ``yys`` are the
scan values that go in and out of the loop.
"""
variable_in_axes, variable_out_axes = _split_in_out_axes(variable_axes)
variable_in_groups, variable_in_axes = _unzip2(variable_in_axes.items())
variable_out_groups, variable_out_axes = _unzip2(variable_out_axes.items())
assert all(isinstance(ax, int) for ax in variable_in_axes)
assert all(isinstance(ax, int) for ax in variable_out_axes)
rng_groups, rng_splits = _unzip2(split_rngs.items())
rng_axes = tuple(
0 if rng_split else axes_scan.broadcast for rng_split in rng_splits
)
def inner(scope_fn, repack_fn, variable_groups, rng_groups, init, *args):
def find_length(axis, x):
if axis is not axes_scan.broadcast:
leaves = jax.tree_util.tree_leaves(x)
if leaves:
return leaves[0].shape[axis]
return ()
# split rngs
lengths = jax.tree_util.tree_map(find_length, in_axes, args)
lengths = set(jax.tree_util.tree_leaves(lengths))
if length is None and len(lengths) == 1:
(d_length,) = lengths
elif len(lengths) > 1:
raise ValueError(f'Inconsistent scan lengths: {lengths}')
elif length is None:
raise ValueError('length should be specified manually.')
else:
d_length = length
# random.clone is only available on Jax versions 0.4.26 or newer
# see: https://jax.readthedocs.io/en/latest/jax.experimental.key_reuse.html
if hasattr(random, 'clone'):
split_fn = lambda rng: random.split(random.clone(rng), d_length)
else:
split_fn = lambda rng: random.split(rng, d_length)
rng_groups = tuple(
tree_map_rngs(split_fn, rng_group) if split else rng_group
for rng_group, split in zip(rng_groups, rng_splits)
)
axes_scan.scan,
in_axes=(variable_in_axes, rng_axes, in_axes),
out_axes=(out_axes, variable_out_axes),
length=length,
reverse=reverse,
unroll=unroll,
)
def scanned(broadcast_vars, carry, scan_variable_groups, rng_groups, args):
carry_vars, c = carry
variable_groups = (broadcast_vars, carry_vars) + scan_variable_groups
if data_transform is not None:
variable_groups, rng_groups = data_transform(
variable_groups, rng_groups
)
scope = scope_fn(variable_groups, rng_groups)
c, y = fn(scope, c, *args)
out_vars = repack_fn(scope)
broadcast_vars_out = out_vars[0]
carry_vars = out_vars[1]
scan_vars = out_vars[2:]
# add immutable broadcast vars back to broadcast output
# otherwise they won't be fed to the actual scan body
for in_group, out_group in zip(broadcast_vars, broadcast_vars_out):
for col in in_group:
if col not in out_group:
out_group[col] = in_group[col]
return broadcast_vars_out, (carry_vars, c), (y, scan_vars)
broadcast_vars = variable_groups[0]
carry_vars = variable_groups[1]
scan_vars = variable_groups[2:]
new_scan_vars = []
for scan_group, axis in zip(scan_vars, variable_in_axes):
new_scan_vars.append(meta.remove_axis(scan_group, axis, metadata_params))
broadcast_vars, (carry_vars, c), (ys, scan_vars) = scanned(
broadcast_vars,
(carry_vars, init),
tuple(new_scan_vars),
rng_groups,
args,
)
new_scan_vars = []
for scan_group, axis in zip(scan_vars, variable_out_axes):
new_scan_vars.append(meta.add_axis(scan_group, axis, metadata_params))
scan_vars = tuple(new_scan_vars)
out_vars = (
broadcast_vars,
carry_vars,
) + scan_vars
return (c, ys), out_vars
return pack(
inner,
(variable_broadcast, variable_carry) + variable_in_groups,
(variable_broadcast, variable_carry) + variable_out_groups,
rng_groups,
name='scan',
)
remat = checkpoint
InOutScanAxis = Union[ScanAxis, In[ScanAxis], Out[ScanAxis]]
CollectionFilter = Filter
PRNGSequenceFilter = Filter
The provided code snippet includes necessary dependencies for implementing the `remat_scan` function. Write a Python function `def remat_scan( body_fn: Callable[..., Any], lengths: Sequence[int], policy: Optional[Callable[..., bool]] = None, variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {True: 0}, split_rngs: Mapping[PRNGSequenceFilter, bool] = {True: True}, ) -> Callable[..., Any]` to solve the following problem:
Combines `lift.remat` and `lift.scan` for memory efficiency and constant time compilation. ``remat_scan`` allows for constant compile times and sublinear memory usage with respect to model depth. At a small constant penalty. This is typically beneficial for very deep models. Example:: def body_fn(scope, x): return nn.dense(scope, x, features=x.shape[-1]) # 100x dense with O(sqrt(N)) memory for gradient computation y = lift.remat_scan(body_fn, lengths=(10, 10))(scope, x) Args: body_fn: Scope function to be repeated using a (nested scan) lengths: number of loop iterations at the given level. The total number of iterations `n = prod(lengths)`. each loop is rematerialized. This way the memory consumption is proportional to `n^(1 / d)` where `d = len(lengths)`. Minimal memory consumptions requires tuning the lengths such that the same amount of memory is consumed at each level of the nested loop. policy: Experimental checkpoint policy, see ``jax.checkpoint``. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. variable_axes: the variable collections that are scanned over. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: A wrapped version of ``body_fn`` that repeats itself prod(lengths) times.
Here is the function:
def remat_scan(
body_fn: Callable[..., Any],
lengths: Sequence[int],
policy: Optional[Callable[..., bool]] = None,
variable_broadcast: CollectionFilter = False,
variable_carry: CollectionFilter = False,
variable_axes: Mapping[CollectionFilter, InOutScanAxis] = {True: 0},
split_rngs: Mapping[PRNGSequenceFilter, bool] = {True: True},
) -> Callable[..., Any]:
"""Combines `lift.remat` and `lift.scan` for memory efficiency and constant time compilation.
``remat_scan`` allows for constant compile times and sublinear
memory usage with respect to model depth. At a small constant
penalty. This is typically beneficial for very deep models.
Example::
def body_fn(scope, x):
return nn.dense(scope, x, features=x.shape[-1])
# 100x dense with O(sqrt(N)) memory for gradient computation
y = lift.remat_scan(body_fn, lengths=(10, 10))(scope, x)
Args:
body_fn: Scope function to be repeated using a (nested scan)
lengths: number of loop iterations at the given level. The total number of
iterations `n = prod(lengths)`. each loop is rematerialized. This way the
memory consumption is proportional to `n^(1 / d)` where `d =
len(lengths)`. Minimal memory consumptions requires tuning the lengths
such that the same amount of memory is consumed at each level of the
nested loop.
policy: Experimental checkpoint policy, see ``jax.checkpoint``.
variable_broadcast: Specifies the broadcasted variable collections. A
broadcasted variable should not depend on any computation that cannot be
lifted out of the loop. This is typically used to define shared parameters
inside the fn.
variable_carry: Specifies the variable collections that are carried through
the loop. Mutations to these variables are carried to the next iteration
and will be preserved when the scan finishes.
variable_axes: the variable collections that are scanned over.
split_rngs: Split PRNG sequences will be different for each loop iterations.
If split is False the PRNGs will be the same across iterations.
Returns:
A wrapped version of ``body_fn`` that repeats itself prod(lengths) times.
"""
# TODO(jheek) should remat scan have scan inputs/outputs?
scan_fn = functools.partial(
scan,
variable_broadcast=variable_broadcast,
variable_carry=variable_carry,
variable_axes=variable_axes,
split_rngs=split_rngs,
)
if len(lengths) == 1:
def wrapper(scope, carry):
return body_fn(scope, carry), ()
fn = lambda scope, c: scan_fn(wrapper, length=lengths[0])(scope, c)[0]
else:
@functools.partial(remat, policy=policy, prevent_cse=False)
def inner_loop(scope, carry):
carry = remat_scan(
body_fn,
lengths[1:],
policy,
variable_broadcast,
variable_carry,
variable_axes,
split_rngs,
)(scope, carry)
return carry, ()
fn = lambda scope, c: scan_fn(inner_loop, length=lengths[0])(scope, c)[0]
return fn | Combines `lift.remat` and `lift.scan` for memory efficiency and constant time compilation. ``remat_scan`` allows for constant compile times and sublinear memory usage with respect to model depth. At a small constant penalty. This is typically beneficial for very deep models. Example:: def body_fn(scope, x): return nn.dense(scope, x, features=x.shape[-1]) # 100x dense with O(sqrt(N)) memory for gradient computation y = lift.remat_scan(body_fn, lengths=(10, 10))(scope, x) Args: body_fn: Scope function to be repeated using a (nested scan) lengths: number of loop iterations at the given level. The total number of iterations `n = prod(lengths)`. each loop is rematerialized. This way the memory consumption is proportional to `n^(1 / d)` where `d = len(lengths)`. Minimal memory consumptions requires tuning the lengths such that the same amount of memory is consumed at each level of the nested loop. policy: Experimental checkpoint policy, see ``jax.checkpoint``. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. variable_axes: the variable collections that are scanned over. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: A wrapped version of ``body_fn`` that repeats itself prod(lengths) times. |
22,679 | import collections
import contextlib
import dataclasses
import functools
import hashlib
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
import jax
import numpy as np
from jax import numpy as jnp
from jax import random, tree_util
from flax import config as config
from flax import configurations as legacy_config
from flax import errors, struct, traceback_util
from flax.ids import uuid
from flax.typing import (
PRNGKey,
Array,
RNGSequences,
Collection,
MutableCollection,
VariableDict,
FrozenVariableDict as FrozenVariableDict,
MutableVariableDict,
PRNGFoldable,
)
from . import meta, partial_eval, tracers
from .frozen_dict import FrozenDict, freeze, unfreeze
PRNGKey = jax.Array
The provided code snippet includes necessary dependencies for implementing the `_legacy_rng_fold_in` function. Write a Python function `def _legacy_rng_fold_in(rng: PRNGKey, data: Iterable[PRNGFoldable]) -> PRNGKey` to solve the following problem:
Legacy RNG folding.
Here is the function:
def _legacy_rng_fold_in(rng: PRNGKey, data: Iterable[PRNGFoldable]) -> PRNGKey:
"""Legacy RNG folding."""
for x in data:
if isinstance(x, str):
m = hashlib.sha1()
m.update(x.encode('utf-8'))
d = m.digest()
hash_int = int.from_bytes(d[:4], byteorder='big')
rng = random.fold_in(rng, jnp.uint32(hash_int)) # type: ignore
elif isinstance(x, int):
rng = random.fold_in(rng, x)
else:
raise ValueError(f'Expected int or string, got: {x}')
return rng | Legacy RNG folding. |
22,680 | import collections
import contextlib
import dataclasses
import functools
import hashlib
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
import jax
import numpy as np
from jax import numpy as jnp
from jax import random, tree_util
from flax import config as config
from flax import configurations as legacy_config
from flax import errors, struct, traceback_util
from flax.ids import uuid
from flax.typing import (
PRNGKey,
Array,
RNGSequences,
Collection,
MutableCollection,
VariableDict,
FrozenVariableDict as FrozenVariableDict,
MutableVariableDict,
PRNGFoldable,
)
from . import meta, partial_eval, tracers
from .frozen_dict import FrozenDict, freeze, unfreeze
config: configurations.Config = configurations.config
PRNGKey = jax.Array
Collection = Mapping[str, Any]
The provided code snippet includes necessary dependencies for implementing the `_fold_in_static` function. Write a Python function `def _fold_in_static( rng: PRNGKey, data: typing.Collection[PRNGFoldable] ) -> PRNGKey` to solve the following problem:
Folds static data (strings & ints) into a jax.random.PRNGKey using its SHA-1 hash. This is faster than splitting an PRNGKey because it allows generating new PRNG keys in parallel that are independent of each other. Args: rng: the rng to fold the string into. data: the string to be folded in. Returns: The newly generated PRNG key.
Here is the function:
def _fold_in_static(
rng: PRNGKey, data: typing.Collection[PRNGFoldable]
) -> PRNGKey:
"""Folds static data (strings & ints) into a jax.random.PRNGKey using its SHA-1 hash.
This is faster than splitting an PRNGKey because it allows generating new PRNG
keys in parallel that are independent of each other.
Args:
rng: the rng to fold the string into.
data: the string to be folded in.
Returns:
The newly generated PRNG key.
"""
if not data:
return rng
m = hashlib.sha1()
for x in data:
if config.flax_fix_rng_separator:
# encode seperate to avoid collisions like for example: ("ab", "c") and ("a", "bc")
m.update(b'\00')
if isinstance(x, str):
m.update(x.encode('utf-8'))
elif isinstance(x, int):
m.update(x.to_bytes((x.bit_length() + 7) // 8, byteorder='big'))
else:
raise ValueError(f'Expected int or string, got: {x}')
d = m.digest()
hash_int = int.from_bytes(d[:4], byteorder='big')
return random.fold_in(rng, jnp.uint32(hash_int)) # type: ignore | Folds static data (strings & ints) into a jax.random.PRNGKey using its SHA-1 hash. This is faster than splitting an PRNGKey because it allows generating new PRNG keys in parallel that are independent of each other. Args: rng: the rng to fold the string into. data: the string to be folded in. Returns: The newly generated PRNG key. |
22,681 | import collections
import contextlib
import dataclasses
import functools
import hashlib
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
import jax
import numpy as np
from jax import numpy as jnp
from jax import random, tree_util
from flax import config as config
from flax import configurations as legacy_config
from flax import errors, struct, traceback_util
from flax.ids import uuid
from flax.typing import (
PRNGKey,
Array,
RNGSequences,
Collection,
MutableCollection,
VariableDict,
FrozenVariableDict as FrozenVariableDict,
MutableVariableDict,
PRNGFoldable,
)
from . import meta, partial_eval, tracers
from .frozen_dict import FrozenDict, freeze, unfreeze
CollectionFilter = Filter
def init(
fn: Callable[..., Any],
mutable: CollectionFilter = True,
flags: Optional[Mapping] = None,
) -> Callable[..., Any]:
"""Functionalize a `Scope` function for initialization.
Args:
fn: a function taking a `Scope` as its first argument.
mutable: the filter determining which variable collections are mutable.
flags: internal flags.
Returns:
`fn` with the scope partially applied.
"""
def wrapper(rngs, *args, **kwargs) -> Tuple[Any, VariableDict]:
if not _is_valid_rng(rngs) and not _is_valid_rngs(rngs):
raise ValueError(
'First argument passed to an init function should be a '
'``jax.PRNGKey`` or a dictionary mapping strings to '
'``jax.PRNGKey``.'
)
if not isinstance(rngs, (dict, FrozenDict)):
rngs = {'params': rngs}
init_flags = {**(flags if flags is not None else {}), 'initializing': True}
return apply(fn, mutable=mutable, flags=init_flags)(
{}, *args, rngs=rngs, **kwargs
)
return wrapper
The provided code snippet includes necessary dependencies for implementing the `lazy_init` function. Write a Python function `def lazy_init( fn: Callable[..., Any], mutable: CollectionFilter = True, flags: Optional[Mapping] = None, ) -> Callable[..., Any]` to solve the following problem:
Functionalizes a `Scope` function for lazy initialization. Similair to ``init`` except that the init function now accepts ``jax.ShapeDtypeStruct`` instances for arguments that do not affect the variable initialization (typically this is all the input data). Example:: def f(scope, x): # the kernel init only uses the shape of x so we don't actually # need a value for x and can pass it as a ShapeDtypeStruct in lazy_init. k = scope.param("kernel", nn.initializers.lecun_normal(), (x.shape[-1], x.shape[-1])) return x @ k init_fn = lazy_init(f) variables = init_fn(random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) Args: fn: a function taking a `Scope` as its first argument. mutable: the filter determining which variable collections are mutable. flags: internal flags. Returns: `fn` with the scope partially applied. Unlike ``init`` which returns a tuple of function output and variables, the lazy init function only returns the variables.
Here is the function:
def lazy_init(
fn: Callable[..., Any],
mutable: CollectionFilter = True,
flags: Optional[Mapping] = None,
) -> Callable[..., Any]:
"""Functionalizes a `Scope` function for lazy initialization.
Similair to ``init`` except that the init function now accepts
``jax.ShapeDtypeStruct`` instances for arguments that do not
affect the variable initialization (typically this is all the input data).
Example::
def f(scope, x):
# the kernel init only uses the shape of x so we don't actually
# need a value for x and can pass it as a ShapeDtypeStruct in lazy_init.
k = scope.param("kernel", nn.initializers.lecun_normal(), (x.shape[-1], x.shape[-1]))
return x @ k
init_fn = lazy_init(f)
variables = init_fn(random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32))
Args:
fn: a function taking a `Scope` as its first argument.
mutable: the filter determining which variable collections are mutable.
flags: internal flags.
Returns:
`fn` with the scope partially applied. Unlike ``init`` which returns a tuple of function
output and variables, the lazy init function only returns the variables.
"""
return partial_eval.lazy_init(
lambda *args, **kwargs: init(fn, mutable, flags)(*args, **kwargs)[1]
) | Functionalizes a `Scope` function for lazy initialization. Similair to ``init`` except that the init function now accepts ``jax.ShapeDtypeStruct`` instances for arguments that do not affect the variable initialization (typically this is all the input data). Example:: def f(scope, x): # the kernel init only uses the shape of x so we don't actually # need a value for x and can pass it as a ShapeDtypeStruct in lazy_init. k = scope.param("kernel", nn.initializers.lecun_normal(), (x.shape[-1], x.shape[-1])) return x @ k init_fn = lazy_init(f) variables = init_fn(random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) Args: fn: a function taking a `Scope` as its first argument. mutable: the filter determining which variable collections are mutable. flags: internal flags. Returns: `fn` with the scope partially applied. Unlike ``init`` which returns a tuple of function output and variables, the lazy init function only returns the variables. |
22,682 | import jax
from .. import errors
def current_trace():
def trace_level(main):
def check_trace_level(base_level):
level = trace_level(current_trace())
if level != base_level:
raise errors.JaxTransformError() | null |
22,683 | import collections
from types import MappingProxyType
from typing import Any, Dict, Hashable, Mapping, Tuple, TypeVar, Union
import jax
from flax import serialization
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
__slots__ = ('_dict', '_hash')
def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name
# make sure the dict is as
xs = dict(*args, **kwargs)
if __unsafe_skip_copy__:
self._dict = xs
else:
self._dict = _prepare_freeze(xs)
self._hash = None
def __getitem__(self, key):
v = self._dict[key]
if isinstance(v, dict):
return FrozenDict(v)
return v
def __setitem__(self, key, value):
raise ValueError('FrozenDict is immutable.')
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return self.pretty_repr()
def __reduce__(self):
return FrozenDict, (self.unfreeze(),)
def pretty_repr(self, num_spaces=4):
"""Returns an indented representation of the nested dictionary."""
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return f'FrozenDict({pretty_dict(self._dict)})'
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy(
self, add_or_replace: Mapping[K, V] = MappingProxyType({})
) -> 'FrozenDict[K, V]':
"""Create a new FrozenDict with additional or replaced entries."""
return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
def keys(self):
return FrozenKeysView(self)
def values(self):
return FrozenValuesView(self)
def items(self):
for key in self._dict:
yield (key, self[key])
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
"""Create a new FrozenDict where one entry is removed.
Example::
>>> from flax.core import FrozenDict
>>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}})
>>> new_variables, params = variables.pop('params')
Args:
key: the key to remove from the dict
Returns:
A pair with the new FrozenDict and the removed value.
"""
value = self[key]
new_dict = dict(self._dict)
new_dict.pop(key)
new_self = type(self)(new_dict)
return new_self, value
def unfreeze(self) -> Dict[K, V]:
"""Unfreeze this FrozenDict.
Returns:
An unfrozen version of this FrozenDict instance.
"""
return unfreeze(self)
def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]:
"""Flattens this FrozenDict.
Returns:
A flattened version of this FrozenDict instance.
"""
sorted_keys = sorted(self._dict)
return tuple(
[(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys]
), tuple(sorted_keys)
def tree_unflatten(cls, keys, values):
# data is already deep copied due to tree map mechanism
# we can skip the deep copy in the constructor
return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True)
The provided code snippet includes necessary dependencies for implementing the `_prepare_freeze` function. Write a Python function `def _prepare_freeze(xs: Any) -> Any` to solve the following problem:
Deep copy unfrozen dicts to make the dictionary FrozenDict safe.
Here is the function:
def _prepare_freeze(xs: Any) -> Any:
"""Deep copy unfrozen dicts to make the dictionary FrozenDict safe."""
if isinstance(xs, FrozenDict):
# we can safely ref share the internal state of a FrozenDict
# because it is immutable.
return xs._dict # pylint: disable=protected-access
if not isinstance(xs, dict):
# return a leaf as is.
return xs
# recursively copy dictionary to avoid ref sharing
return {key: _prepare_freeze(val) for key, val in xs.items()} | Deep copy unfrozen dicts to make the dictionary FrozenDict safe. |
22,684 | import collections
from types import MappingProxyType
from typing import Any, Dict, Hashable, Mapping, Tuple, TypeVar, Union
import jax
from flax import serialization
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
__slots__ = ('_dict', '_hash')
def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name
# make sure the dict is as
xs = dict(*args, **kwargs)
if __unsafe_skip_copy__:
self._dict = xs
else:
self._dict = _prepare_freeze(xs)
self._hash = None
def __getitem__(self, key):
v = self._dict[key]
if isinstance(v, dict):
return FrozenDict(v)
return v
def __setitem__(self, key, value):
raise ValueError('FrozenDict is immutable.')
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return self.pretty_repr()
def __reduce__(self):
return FrozenDict, (self.unfreeze(),)
def pretty_repr(self, num_spaces=4):
"""Returns an indented representation of the nested dictionary."""
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return f'FrozenDict({pretty_dict(self._dict)})'
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy(
self, add_or_replace: Mapping[K, V] = MappingProxyType({})
) -> 'FrozenDict[K, V]':
"""Create a new FrozenDict with additional or replaced entries."""
return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
def keys(self):
return FrozenKeysView(self)
def values(self):
return FrozenValuesView(self)
def items(self):
for key in self._dict:
yield (key, self[key])
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
"""Create a new FrozenDict where one entry is removed.
Example::
>>> from flax.core import FrozenDict
>>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}})
>>> new_variables, params = variables.pop('params')
Args:
key: the key to remove from the dict
Returns:
A pair with the new FrozenDict and the removed value.
"""
value = self[key]
new_dict = dict(self._dict)
new_dict.pop(key)
new_self = type(self)(new_dict)
return new_self, value
def unfreeze(self) -> Dict[K, V]:
"""Unfreeze this FrozenDict.
Returns:
An unfrozen version of this FrozenDict instance.
"""
return unfreeze(self)
def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]:
"""Flattens this FrozenDict.
Returns:
A flattened version of this FrozenDict instance.
"""
sorted_keys = sorted(self._dict)
return tuple(
[(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys]
), tuple(sorted_keys)
def tree_unflatten(cls, keys, values):
# data is already deep copied due to tree map mechanism
# we can skip the deep copy in the constructor
return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True)
The provided code snippet includes necessary dependencies for implementing the `copy` function. Write a Python function `def copy( x: Union[FrozenDict, Dict[str, Any]], add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict({}), ) -> Union[FrozenDict, Dict[str, Any]]` to solve the following problem:
Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of ``FrozenDict.copy``. Example:: >>> from flax.core import FrozenDict, copy >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables = copy(variables, {'additional_entries': 1}) Args: x: the dictionary to be copied and updated add_or_replace: dictionary of key-value pairs to add or replace in the dict x Returns: A new dict with the additional and/or replaced entries.
Here is the function:
def copy(
x: Union[FrozenDict, Dict[str, Any]],
add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict({}),
) -> Union[FrozenDict, Dict[str, Any]]:
"""Create a new dict with additional and/or replaced entries. This is a utility
function that can act on either a FrozenDict or regular dict and mimics the
behavior of ``FrozenDict.copy``.
Example::
>>> from flax.core import FrozenDict, copy
>>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}})
>>> new_variables = copy(variables, {'additional_entries': 1})
Args:
x: the dictionary to be copied and updated
add_or_replace: dictionary of key-value pairs to add or replace in the dict x
Returns:
A new dict with the additional and/or replaced entries.
"""
if isinstance(x, FrozenDict):
return x.copy(add_or_replace)
elif isinstance(x, dict):
new_dict = jax.tree_util.tree_map(
lambda x: x, x
) # make a deep copy of dict x
new_dict.update(add_or_replace)
return new_dict
raise TypeError(f'Expected FrozenDict or dict, got {type(x)}') | Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of ``FrozenDict.copy``. Example:: >>> from flax.core import FrozenDict, copy >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables = copy(variables, {'additional_entries': 1}) Args: x: the dictionary to be copied and updated add_or_replace: dictionary of key-value pairs to add or replace in the dict x Returns: A new dict with the additional and/or replaced entries. |
22,685 | import collections
from types import MappingProxyType
from typing import Any, Dict, Hashable, Mapping, Tuple, TypeVar, Union
import jax
from flax import serialization
def _indent(x, num_spaces):
indent_str = ' ' * num_spaces
lines = x.split('\n')
assert not lines[-1]
# skip the final line because it's empty and should not be indented.
return '\n'.join(indent_str + line for line in lines[:-1]) + '\n'
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
__slots__ = ('_dict', '_hash')
def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name
# make sure the dict is as
xs = dict(*args, **kwargs)
if __unsafe_skip_copy__:
self._dict = xs
else:
self._dict = _prepare_freeze(xs)
self._hash = None
def __getitem__(self, key):
v = self._dict[key]
if isinstance(v, dict):
return FrozenDict(v)
return v
def __setitem__(self, key, value):
raise ValueError('FrozenDict is immutable.')
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return self.pretty_repr()
def __reduce__(self):
return FrozenDict, (self.unfreeze(),)
def pretty_repr(self, num_spaces=4):
"""Returns an indented representation of the nested dictionary."""
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return f'FrozenDict({pretty_dict(self._dict)})'
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy(
self, add_or_replace: Mapping[K, V] = MappingProxyType({})
) -> 'FrozenDict[K, V]':
"""Create a new FrozenDict with additional or replaced entries."""
return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
def keys(self):
return FrozenKeysView(self)
def values(self):
return FrozenValuesView(self)
def items(self):
for key in self._dict:
yield (key, self[key])
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
"""Create a new FrozenDict where one entry is removed.
Example::
>>> from flax.core import FrozenDict
>>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}})
>>> new_variables, params = variables.pop('params')
Args:
key: the key to remove from the dict
Returns:
A pair with the new FrozenDict and the removed value.
"""
value = self[key]
new_dict = dict(self._dict)
new_dict.pop(key)
new_self = type(self)(new_dict)
return new_self, value
def unfreeze(self) -> Dict[K, V]:
"""Unfreeze this FrozenDict.
Returns:
An unfrozen version of this FrozenDict instance.
"""
return unfreeze(self)
def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]:
"""Flattens this FrozenDict.
Returns:
A flattened version of this FrozenDict instance.
"""
sorted_keys = sorted(self._dict)
return tuple(
[(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys]
), tuple(sorted_keys)
def tree_unflatten(cls, keys, values):
# data is already deep copied due to tree map mechanism
# we can skip the deep copy in the constructor
return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True)
The provided code snippet includes necessary dependencies for implementing the `pretty_repr` function. Write a Python function `def pretty_repr(x: Any, num_spaces: int = 4) -> str` to solve the following problem:
Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of ``FrozenDict.pretty_repr``. If x is any other dtype, this function will return ``repr(x)``. Args: x: the dictionary to be represented num_spaces: the number of space characters in each indentation level Returns: An indented string representation of the nested dictionary.
Here is the function:
def pretty_repr(x: Any, num_spaces: int = 4) -> str:
"""Returns an indented representation of the nested dictionary.
This is a utility function that can act on either a FrozenDict or
regular dict and mimics the behavior of ``FrozenDict.pretty_repr``.
If x is any other dtype, this function will return ``repr(x)``.
Args:
x: the dictionary to be represented
num_spaces: the number of space characters in each indentation level
Returns:
An indented string representation of the nested dictionary.
"""
if isinstance(x, FrozenDict):
return x.pretty_repr()
else:
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return pretty_dict(x) | Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of ``FrozenDict.pretty_repr``. If x is any other dtype, this function will return ``repr(x)``. Args: x: the dictionary to be represented num_spaces: the number of space characters in each indentation level Returns: An indented string representation of the nested dictionary. |
22,686 | import collections
from types import MappingProxyType
from typing import Any, Dict, Hashable, Mapping, Tuple, TypeVar, Union
import jax
from flax import serialization
serialization.register_serialization_state(
FrozenDict, _frozen_dict_state_dict, _restore_frozen_dict
)
def _frozen_dict_state_dict(xs):
return {key: serialization.to_state_dict(value) for key, value in xs.items()} | null |
22,687 | import collections
from types import MappingProxyType
from typing import Any, Dict, Hashable, Mapping, Tuple, TypeVar, Union
import jax
from flax import serialization
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
__slots__ = ('_dict', '_hash')
def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name
# make sure the dict is as
xs = dict(*args, **kwargs)
if __unsafe_skip_copy__:
self._dict = xs
else:
self._dict = _prepare_freeze(xs)
self._hash = None
def __getitem__(self, key):
v = self._dict[key]
if isinstance(v, dict):
return FrozenDict(v)
return v
def __setitem__(self, key, value):
raise ValueError('FrozenDict is immutable.')
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return self.pretty_repr()
def __reduce__(self):
return FrozenDict, (self.unfreeze(),)
def pretty_repr(self, num_spaces=4):
"""Returns an indented representation of the nested dictionary."""
def pretty_dict(x):
if not isinstance(x, dict):
return repr(x)
rep = ''
for key, val in x.items():
rep += f'{key}: {pretty_dict(val)},\n'
if rep:
return '{\n' + _indent(rep, num_spaces) + '}'
else:
return '{}'
return f'FrozenDict({pretty_dict(self._dict)})'
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy(
self, add_or_replace: Mapping[K, V] = MappingProxyType({})
) -> 'FrozenDict[K, V]':
"""Create a new FrozenDict with additional or replaced entries."""
return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
def keys(self):
return FrozenKeysView(self)
def values(self):
return FrozenValuesView(self)
def items(self):
for key in self._dict:
yield (key, self[key])
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
"""Create a new FrozenDict where one entry is removed.
Example::
>>> from flax.core import FrozenDict
>>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}})
>>> new_variables, params = variables.pop('params')
Args:
key: the key to remove from the dict
Returns:
A pair with the new FrozenDict and the removed value.
"""
value = self[key]
new_dict = dict(self._dict)
new_dict.pop(key)
new_self = type(self)(new_dict)
return new_self, value
def unfreeze(self) -> Dict[K, V]:
"""Unfreeze this FrozenDict.
Returns:
An unfrozen version of this FrozenDict instance.
"""
return unfreeze(self)
def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]:
"""Flattens this FrozenDict.
Returns:
A flattened version of this FrozenDict instance.
"""
sorted_keys = sorted(self._dict)
return tuple(
[(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys]
), tuple(sorted_keys)
def tree_unflatten(cls, keys, values):
# data is already deep copied due to tree map mechanism
# we can skip the deep copy in the constructor
return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True)
serialization.register_serialization_state(
FrozenDict, _frozen_dict_state_dict, _restore_frozen_dict
)
def _restore_frozen_dict(xs, states):
diff = set(map(str, xs.keys())).difference(map(str, states.keys()))
if diff:
raise ValueError(
'The target dict keys and state dict keys do not match, target dict'
f' contains keys {diff} which are not present in state dict at path'
f' {serialization.current_path()}'
)
return FrozenDict(
{
key: serialization.from_state_dict(value, states[key], name=key)
for key, value in xs.items()
}
) | null |
22,688 | import abc
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
from flax import errors, struct
from flax.typing import LogicalNames
import jax
from jax.experimental import maps
class AxisMetadata(Generic[A], metaclass=abc.ABCMeta):
"""Abstract base class for boxed Metadata.
``AxisMetadata`` enables arbitrary, per axis metadata for variables.
By using ``unbox`` the metadata is stripped away to obtain the original
variables. By using unboxing, most code handling variables does not need
to handle ``AxisMetadata`` specifically, but can directly operate on the JAX
arrays that they wrap.
Additionally, ``AxisMetadata`` supports updating metadata whenever an axis
is added or removed by a functional transformation
(e.g.: ``nn.scan`` or ``nn.vmap``) using the ``add_axis`` and ``remove_axis``
methods.
By extending ``AxisMetadata``, custom metadata can be stored. See
``Partitioned`` for a specific implementation.
"""
def unbox(self) -> A:
"""Returns the content of the AxisMetadata box.
Note that unlike ``meta.unbox`` the unbox call should recursively unbox
metadata. It should simply return value that it wraps directly even
if that value itself is an instance of AxisMetadata.
In practise, AxisMetadata subclasses should be registered as PyTree nodes to
support passing instances to JAX and Flax APIs. The leaves returned for this
note should correspond to the value returned by unbox.
Returns:
The unboxed value.
"""
pass
def replace_boxed(self, val: B) -> 'AxisMetadata[B]':
"""Replaces the boxed value with the provided value.
Args:
val: The new value to be boxed by this AxisMetadata wrapper
Returns:
A new instance of the same type as self with `val` as the new ``unbox``
content
"""
pass
def add_axis(
self: TAxisMetadata, index: int, params: Dict[Any, Any]
) -> TAxisMetadata:
"""Adds a new axis to the axis metadata.
Note that add_axis and remove_axis should act as each other's inverse
(meaning: ``x.add_axis(i, p).remove_axis(i, p) == x``)
Args:
index: The position at which the new axis will be inserted
params: An arbitrary dictionary of parameters passed by the transformation
that introduces the new axis (e.g.: ``nn.scan`` or ``nn.vmap``). The
user passes this dictionary as the `metadata_param` argument to the
transformation.
Returns:
A new instance of the same type as self and with the same ``unbox``
content with updated axis metadata.
"""
pass
def remove_axis(
self: TAxisMetadata, index: int, params: Dict[Any, Any]
) -> TAxisMetadata:
"""Removes an axis from the axis metadata.
Note that add_axis and remove_axis should act as each other's inverse
(meaning: ``x.remove_axis(i, p).add_axis(i, p) == x``)
Args:
index: The position of the axis that is to be removed
params: An arbitrary dictionary of parameters passed by the transformation
that introduced the axis (e.g.: ``nn.scan`` or ``nn.vmap``). The user
passes this dictionary as the `metadata_param` argument to the
transformation.
Returns:
A new instance of the same type as self and with the same ``unbox``
content with updated axis metadata.
"""
pass
def is_axis_metadata(val: Any) -> bool:
"""Returns whether the argument is an instance of AxisMetadata."""
return isinstance(val, AxisMetadata)
def unbox(tree: Any) -> Any:
"""Strips all AxisMetadata boxes from a PyTree."""
return map_axis_meta(lambda x: unbox(x.unbox()), tree)
The provided code snippet includes necessary dependencies for implementing the `replace_boxed` function. Write a Python function `def replace_boxed(tree: Any, updates: Any) -> Any` to solve the following problem:
Updates all AxisMetadata boxes with the values in updates.
Here is the function:
def replace_boxed(tree: Any, updates: Any) -> Any:
"""Updates all AxisMetadata boxes with the values in updates."""
def inner_update(c, v):
if isinstance(c, AxisMetadata):
return c.replace_boxed(replace_boxed(c.unbox(), v))
else:
return v
return jax.tree_util.tree_map(
inner_update, tree, updates, is_leaf=is_axis_metadata
) | Updates all AxisMetadata boxes with the values in updates. |
22,689 | import abc
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
from flax import errors, struct
from flax.typing import LogicalNames
import jax
from jax.experimental import maps
class Partitioned(struct.PyTreeNode, AxisMetadata[A]):
"""Wrapper for partitioning metadata.
``Partitioned`` is used to extend variables with partitioning information
required for ``jax.experimental.pjit``.
The easiest way to define Partitioned variables is by using the
``with_partitioning`` wrapper around the variable initializer.
Example::
class MLP(nn.Module):
hidden_size: int
def __call__(self, x):
ki = nn.linear.default_kernel_init
h = nn.Dense(
self.hidden_size,
kernel_init=nn.with_partitioning(ki, ('data', 'model')))(x)
h = nn.relu(h)
return nn.Dense(
x.shape[-1],
kernel_init=nn.with_partitioning(ki, ('model', 'data')))(h)
mlp = MLP(4096)
x = jnp.ones((8 * 1024, 1024))
# use eval_shape to get the Partitioned instances for the variables.
# this way we can determine the PartitionSpecs for the init variables
# before we call the init fn.
var_spec = nn.get_partition_spec(
jax.eval_shape(mlp.init, random.key(0), x))
init_fn = mesh(pjit(mlp.init,
(None, PartitionSpec("data", "model")), var_spec))
variables = init_fn(random.key(0), x)
apply_fn = mesh(pjit(
mlp.apply,
(var_spec, PartitionSpec("data", "model")),
PartitionSpec("data", "model")))
apply_fn(variables, x)
``Partitioned`` values can gain additional axes when using transformations
like ``nn.vmap`` and ``nn.scan``. In this case you can specify the name of
the new axis with the `metadata_params` args in vmap/scan::
class Model(nn.Module):
def __call__(self, x):
def body(mdl, c):
c = MLP(4096)(c)
return c, ()
c, _ = nn.scan(
body, variable_axes={"params": 0}, split_rngs={"params": 0}, length=8,
metadata_params={nn.meta.PARTITION_NAME: "layers"})(self, x)
return c
"""
value: Any
names: LogicalNames = struct.field(pytree_node=False)
mesh: Optional[jax.sharding.Mesh] = struct.field(
default=None, pytree_node=False
)
def unbox(self, apply_constraint=True) -> A:
"""Returns the wrapped value with the partitioning applied as a sharding constraint."""
if apply_constraint and (_global_mesh_defined() or self.mesh is not None):
axis_resource = self.get_partition_spec()
if self.mesh is not None:
sharding = jax.sharding.NamedSharding(self.mesh, axis_resource)
return jax.lax.with_sharding_constraint(self.value, sharding)
return jax.lax.with_sharding_constraint(self.value, axis_resource)
else:
return self.value
def replace_boxed(self, val: B) -> 'Partitioned[B]':
return self.replace(value=val) # type: ignore
def _get_partition_name(self, params: Dict[Any, Any]) -> str:
if PARTITION_NAME not in params:
raise errors.PartitioningUnspecifiedError(self)
return params[PARTITION_NAME]
def add_axis(self, index: int, params: Dict[Any, Any]) -> 'Partitioned[A]':
axis_name = self._get_partition_name(params)
names = list(self.names)
while len(names) < index:
names.append(None) # type: ignore
names.insert(index, axis_name) # type: ignore
return self.replace(names=tuple(names))
def remove_axis(self, index: int, params: Dict[Any, Any]) -> 'Partitioned[A]':
axis_name = self._get_partition_name(params)
names = list(self.names)
assert names.pop(index) == axis_name
return self.replace(names=tuple(names))
def get_partition_spec(self) -> jax.sharding.PartitionSpec:
"""Returns the ``Partitionspec`` for this partitioned value."""
return jax.sharding.PartitionSpec(*self.names)
def get_sharding(self, mesh: jax.sharding.Mesh) -> jax.sharding.Sharding:
"""Returns the ``NamedSharding`` for this partitioned value."""
return jax.sharding.NamedSharding(mesh, self.get_partition_spec())
LogicalNames = Tuple[Union[str, None], ...]
The provided code snippet includes necessary dependencies for implementing the `with_partitioning` function. Write a Python function `def with_partitioning( fn: Callable[..., Any], names: LogicalNames, mesh: Optional[jax.sharding.Mesh] = None, ) -> Callable[..., Partitioned[Any]]` to solve the following problem:
Wraps a function's return value with Partitioned. Example:: >>> import flax.linen as nn >>> kernel_init = nn.with_partitioning( ... nn.initializers.lecun_normal(), (None, "data")) >>> partitioned_dense = nn.Dense(features=3, kernel_init=kernel_init) Args: fn: The function to be wrapped. Typically this is an initializer. names: The logical axis passed to ``Partitioned``. mesh: The mesh to use for the partitioning. If None, the global mesh resource is used if available. Returns: A function wrapping ``fn`` that will return an instance of ``Partitioned``.
Here is the function:
def with_partitioning(
fn: Callable[..., Any],
names: LogicalNames,
mesh: Optional[jax.sharding.Mesh] = None,
) -> Callable[..., Partitioned[Any]]:
"""Wraps a function's return value with Partitioned.
Example::
>>> import flax.linen as nn
>>> kernel_init = nn.with_partitioning(
... nn.initializers.lecun_normal(), (None, "data"))
>>> partitioned_dense = nn.Dense(features=3, kernel_init=kernel_init)
Args:
fn: The function to be wrapped. Typically this is an initializer.
names: The logical axis passed to ``Partitioned``.
mesh: The mesh to use for the partitioning. If None, the global mesh
resource is used if available.
Returns:
A function wrapping ``fn`` that will return an instance of ``Partitioned``.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return Partitioned(fn(*args, **kwargs), names, mesh=mesh)
return wrapper | Wraps a function's return value with Partitioned. Example:: >>> import flax.linen as nn >>> kernel_init = nn.with_partitioning( ... nn.initializers.lecun_normal(), (None, "data")) >>> partitioned_dense = nn.Dense(features=3, kernel_init=kernel_init) Args: fn: The function to be wrapped. Typically this is an initializer. names: The logical axis passed to ``Partitioned``. mesh: The mesh to use for the partitioning. If None, the global mesh resource is used if available. Returns: A function wrapping ``fn`` that will return an instance of ``Partitioned``. |
22,690 | import abc
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
from flax import errors, struct
from flax.typing import LogicalNames
import jax
from jax.experimental import maps
def get_partition_spec(tree: Any) -> Any:
"""Extracts a PartitionSpec tree from a PyTree containing ``Partitioned`` values."""
def f(x):
if isinstance(x, Partitioned):
return x.get_partition_spec()
# Unboxed arrays, which should be replicated across all devices
elif hasattr(x, 'shape'):
return jax.sharding.PartitionSpec()
else:
return None
return jax.tree_util.tree_map(
f, tree, is_leaf=lambda x: isinstance(x, Partitioned)
)
The provided code snippet includes necessary dependencies for implementing the `get_sharding` function. Write a Python function `def get_sharding(tree: Any, mesh: jax.sharding.Mesh) -> Any` to solve the following problem:
Extracts a jax.sharding tree from a PyTree containing ``Partitioned`` values and a mesh.
Here is the function:
def get_sharding(tree: Any, mesh: jax.sharding.Mesh) -> Any:
"""Extracts a jax.sharding tree from a PyTree containing ``Partitioned`` values and a mesh."""
pspec_tree = get_partition_spec(tree)
return jax.tree_util.tree_map(
lambda x: jax.sharding.NamedSharding(mesh, x), pspec_tree
) | Extracts a jax.sharding tree from a PyTree containing ``Partitioned`` values and a mesh. |
22,691 | import functools
from typing import Any
import jax
from jax import core
from jax.extend import linear_util as lu
from jax.interpreters import partial_eval as pe
from flax import errors
def _maybe_unknown(x: Any) -> pe.PartialVal:
if isinstance(x, jax.ShapeDtypeStruct):
return pe.PartialVal.unknown(core.ShapedArray(x.shape, x.dtype))
else:
return pe.PartialVal.known(x)
The provided code snippet includes necessary dependencies for implementing the `lazy_init` function. Write a Python function `def lazy_init(fn)` to solve the following problem:
Lazily evaluates a function by using the shapes of the inputs. The returned function accepts a combination of JAX values and ``jax.ShapeDtypeStruct`` instances for the inputs for which we don't need concrete values (only the shape and dtype). This API is used by ``core.lazy_init`` or ``Module.lazy_init`` to initialize variables without doing any actual computation on the inputs. Args: fn: the function to be lazily evaluated. Returns: A new function that accepts a mix of concrete values and ``jax.ShapeDtypeStruct`` instances.
Here is the function:
def lazy_init(fn):
"""Lazily evaluates a function by using the shapes of the inputs.
The returned function accepts a combination of JAX values and
``jax.ShapeDtypeStruct`` instances for the inputs for which we
don't need concrete values (only the shape and dtype).
This API is used by ``core.lazy_init`` or ``Module.lazy_init``
to initialize variables without doing any actual computation on the
inputs.
Args:
fn: the function to be lazily evaluated.
Returns:
A new function that accepts a mix of concrete values and
``jax.ShapeDtypeStruct`` instances.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
# TODO(mattjj,jheek): use a public JAX API
# flatten fn and prepare for internal JAX transform
inputs_flat, in_tree = jax.tree_util.tree_flatten((args, kwargs))
f_flat, out_tree = jax.api_util.flatten_fun(lu.wrap_init(fn), in_tree)
# map inputs to PartialVal known/unknown
# only the computations depending on knowns will be executed
in_pvals = [_maybe_unknown(x) for x in inputs_flat]
_, out_pvals, _ = pe.trace_to_jaxpr_nounits(f_flat, in_pvals)
# all outputs should be knowns. If this fails
# the user is creating variables that depend on a
# argument that was passed as a ShapeDtypeStruct.
out_flat = []
for pv, const in out_pvals:
if pv is None:
# const is the actual value of the known output
out_flat.append(const)
else:
raise errors.LazyInitError(pv)
return jax.tree_util.tree_unflatten(out_tree(), out_flat)
return wrapper | Lazily evaluates a function by using the shapes of the inputs. The returned function accepts a combination of JAX values and ``jax.ShapeDtypeStruct`` instances for the inputs for which we don't need concrete values (only the shape and dtype). This API is used by ``core.lazy_init`` or ``Module.lazy_init`` to initialize variables without doing any actual computation on the inputs. Args: fn: the function to be lazily evaluated. Returns: A new function that accepts a mix of concrete values and ``jax.ShapeDtypeStruct`` instances. |
22,692 | import collections
import itertools
import warnings
from collections.abc import Iterable
import jax
import jax.numpy as jnp
import numpy as np
from jax import core, lax
from jax.extend import linear_util as lu
from jax.interpreters import partial_eval as pe
def _parse_spec(spec):
"""Parse an input spec of the form (shape, dtype) or shape into a jax.ShapeDtypeStruct."""
spec = tuple(spec)
if len(spec) == 2 and isinstance(spec[0], Iterable):
return jax.ShapeDtypeStruct(tuple(spec[0]), spec[1])
else:
return jax.ShapeDtypeStruct(spec, jnp.float32)
The provided code snippet includes necessary dependencies for implementing the `partial_eval_by_shape` function. Write a Python function `def partial_eval_by_shape(fn, input_spec, *args, **kwargs)` to solve the following problem:
Lazily evaluate a function by using the shapes of the inputs. This function is similar to ``jax.eval_shape`` with the key difference that function outputs that can be computed without a concrete value of the inputs are returned as is instead of only the shape. See for example ``module.init_by_shape`` where this functionality is used to initialize a model without using input data lr computation. Args: fn: the function to be lazily evaluated. input_spec: an iterable of shapes or (shape, dtype) tuples specifying the shape and type of the inputs. If unspecified the dtype is float32. *args: other arguments passed to the module's apply function **kwargs: keyword arguments passed to the module's apply function Returns: A pair consisting of the model output and an instance of Model
Here is the function:
def partial_eval_by_shape(fn, input_spec, *args, **kwargs):
"""Lazily evaluate a function by using the shapes of the inputs.
This function is similar to ``jax.eval_shape`` with the key difference that
function outputs that can be computed without a concrete value of the
inputs are returned as is instead of only the shape. See for example
``module.init_by_shape`` where this functionality is used to initialize a
model without using input data lr computation.
Args:
fn: the function to be lazily evaluated.
input_spec: an iterable of shapes or (shape, dtype) tuples specifying the
shape and type of the inputs. If unspecified the dtype is float32.
*args: other arguments passed to the module's apply function
**kwargs: keyword arguments passed to the module's apply function
Returns:
A pair consisting of the model output and an instance of Model
"""
# output cannot be returned in lazy_create because jax.eval_shape will only
# return the shape and dtype.
# TODO(mattjj,jheek): use a public JAX API
f = lambda *inputs: fn(*inputs, *args, **kwargs)
input_structs = [_parse_spec(spec) for spec in input_spec]
inputs_flat, in_tree = jax.tree_util.tree_flatten(input_structs)
f_flat, out_tree = jax.api_util.flatten_fun_nokwargs(lu.wrap_init(f), in_tree)
in_pvals = [
pe.PartialVal.unknown(core.ShapedArray(x.shape, x.dtype))
for x in inputs_flat
]
_, out_pvals, _ = pe.trace_to_jaxpr_nounits(f_flat, in_pvals)
out_flat = [
const if pv is None else jax.ShapeDtypeStruct(pv.shape, pv.dtype)
for pv, const in out_pvals
]
return jax.tree_util.tree_unflatten(out_tree(), out_flat) | Lazily evaluate a function by using the shapes of the inputs. This function is similar to ``jax.eval_shape`` with the key difference that function outputs that can be computed without a concrete value of the inputs are returned as is instead of only the shape. See for example ``module.init_by_shape`` where this functionality is used to initialize a model without using input data lr computation. Args: fn: the function to be lazily evaluated. input_spec: an iterable of shapes or (shape, dtype) tuples specifying the shape and type of the inputs. If unspecified the dtype is float32. *args: other arguments passed to the module's apply function **kwargs: keyword arguments passed to the module's apply function Returns: A pair consisting of the model output and an instance of Model |
22,693 | import collections
import itertools
import warnings
from collections.abc import Iterable
import jax
import jax.numpy as jnp
import numpy as np
from jax import core, lax
from jax.extend import linear_util as lu
from jax.interpreters import partial_eval as pe
def _scan_nd(body_fn, init, xs, n=1, unroll=(1,)):
"""Utility for performing an n-dimensional `lax.scan`.
The n-d scan is simply recursive call of 1-d scan.
Args:
body_fn: the body of the loop of type (c, x) -> (c, y).
init: initial value for the carry.
xs: a pytree of tensors to scan over.
n: number of dimensions to scan over (default: 1)
Returns:
A tuple of the final carry and the values returned by the body.
"""
if n == 1:
return lax.scan(body_fn, init, xs, unroll=unroll[0])
else:
def scan_body(c, x):
return _scan_nd(body_fn, c, x, n=n - 1, unroll=unroll[1:])
return lax.scan(scan_body, init, xs, unroll=unroll[0])
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
The provided code snippet includes necessary dependencies for implementing the `scan_in_dim` function. Write a Python function `def scan_in_dim(body_fn, init, xs, axis=(0,), unroll=(1,), keepdims=False)` to solve the following problem:
utility for doing a scan along arbitrary dimensions. See `lax.scan` for details on how the scan operation works. Note on `unroll`: This argument gets left padded with ones to match the size of `axis`. Doing so allows unrolls to performed from the innermost loop first. For example, `scan_in_dim(..., axis=(1, 2, 3), unroll=5)` is equivalent to `scan_in_dim(..., axis=(1, 2, 3), unroll=(1, 1, 5))`. Args: body_fn: the body of the loop of type (c, x) -> (c, y). init: initial value for the carry. xs: a pytree of tensors to scan over. axis: the axis to scan over. keepdims: keep the dimensions that are scanned over. unroll: an optional positive integer, or tuple of positive integers showing how many iterations of the loop to be unrolled into a single iteration for each axis. Returns: A tuple of the final carry and the values returned by the body.
Here is the function:
def scan_in_dim(body_fn, init, xs, axis=(0,), unroll=(1,), keepdims=False):
"""utility for doing a scan along arbitrary dimensions.
See `lax.scan` for details on how the scan operation works.
Note on `unroll`: This argument gets left padded with ones to match the size
of `axis`. Doing so allows unrolls to performed from the innermost loop first.
For example, `scan_in_dim(..., axis=(1, 2, 3), unroll=5)` is equivalent to
`scan_in_dim(..., axis=(1, 2, 3), unroll=(1, 1, 5))`.
Args:
body_fn: the body of the loop of type (c, x) -> (c, y).
init: initial value for the carry.
xs: a pytree of tensors to scan over.
axis: the axis to scan over.
keepdims: keep the dimensions that are scanned over.
unroll: an optional positive integer, or tuple of positive integers
showing how many iterations of the loop to be unrolled into a single
iteration for each axis.
Returns:
A tuple of the final carry and the values returned by the body.
"""
if not isinstance(axis, Iterable):
axis = (axis,)
if not isinstance(unroll, Iterable):
unroll = (unroll,)
# Pad unroll with ones so we start unrolling from the innermost loop
len_diff = len(axis) - len(unroll)
unroll = (1,) * len_diff + unroll
def transpose_in(x):
perm = axis + tuple(np.delete(np.arange(x.ndim), axis))
return x.transpose(perm)
def transpose_out(x):
perm = axis + tuple(np.delete(np.arange(x.ndim), axis))
return x.transpose(_invert_perm(perm))
def body_wrapper(c, xs):
if keepdims:
xs = jax.tree_util.tree_map(
lambda x: x.reshape((1,) * len(axis) + x.shape), xs
)
xs = jax.tree_util.tree_map(transpose_out, xs)
c, ys = body_fn(c, xs)
if keepdims:
ys = jax.tree_util.tree_map(transpose_in, ys)
ys = jax.tree_util.tree_map(lambda x: x.reshape(x.shape[len(axis) :]), ys)
return c, ys
xs = jax.tree_util.tree_map(transpose_in, xs)
c, ys = _scan_nd(body_wrapper, init, xs, n=len(axis), unroll=unroll)
ys = jax.tree_util.tree_map(transpose_out, ys)
return c, ys | utility for doing a scan along arbitrary dimensions. See `lax.scan` for details on how the scan operation works. Note on `unroll`: This argument gets left padded with ones to match the size of `axis`. Doing so allows unrolls to performed from the innermost loop first. For example, `scan_in_dim(..., axis=(1, 2, 3), unroll=5)` is equivalent to `scan_in_dim(..., axis=(1, 2, 3), unroll=(1, 1, 5))`. Args: body_fn: the body of the loop of type (c, x) -> (c, y). init: initial value for the carry. xs: a pytree of tensors to scan over. axis: the axis to scan over. keepdims: keep the dimensions that are scanned over. unroll: an optional positive integer, or tuple of positive integers showing how many iterations of the loop to be unrolled into a single iteration for each axis. Returns: A tuple of the final carry and the values returned by the body. |
22,694 | import collections
import itertools
import warnings
from collections.abc import Iterable
import jax
import jax.numpy as jnp
import numpy as np
from jax import core, lax
from jax.extend import linear_util as lu
from jax.interpreters import partial_eval as pe
The provided code snippet includes necessary dependencies for implementing the `pad_shard_unpad` function. Write a Python function `def pad_shard_unpad( wrapped, static_argnums=(0,), static_argnames=(), static_return=False )` to solve the following problem:
Wraps a function with code that pads, shards, then un-shards, un-pads. Args: wrapped: the function to be wrapped. Signature is ``params, *args, *kwargs``. static_argnums: indices of arguments to ``wrapped`` that should _not_ be padded and sharded, but instead be forwarded as-is. The default is (0,) because by far the most common use-case is to pass ``params`` first. static_argnames: names of kwargs to ``wrapped`` that should _not_ be padded and sharded, but instead be forwarded as-is. static_return: whether not to un-shard, and un-pad the return value; static return values are typically used with eval steps that compute metrics Returns: A new function that pads and shards its arguments before passing them to the wrapped function, and un-shards and un-pads the returned pytree. This is useful for calling a pmap'ed function with inputs that aren't divisible by the number of devices. A typical use is: @pad_shard_unpad @jax.pmap def forward(params, x): ... Notes: The padding is done in host-memory before being passed to the function, and the values returned by the function are transferred back to host memory. The returned function is augmented with a new keyword-only argument ``min_device_batch`` that, if specified, forces padding inputs to at least this size per device. This can be useful to avoid recompiles for the last batch and reduce memory fragmentation. For more information refer to https://flax.readthedocs.io/en/latest/guides/data_preprocessing/full_eval.html
Here is the function:
def pad_shard_unpad(
wrapped, static_argnums=(0,), static_argnames=(), static_return=False
):
"""Wraps a function with code that pads, shards, then un-shards, un-pads.
Args:
wrapped: the function to be wrapped. Signature is ``params, *args, *kwargs``.
static_argnums: indices of arguments to ``wrapped`` that should _not_ be
padded and sharded, but instead be forwarded as-is. The default is (0,)
because by far the most common use-case is to pass ``params`` first.
static_argnames: names of kwargs to ``wrapped`` that should _not_ be padded
and sharded, but instead be forwarded as-is.
static_return: whether not to un-shard, and un-pad the return value; static
return values are typically used with eval steps that compute metrics
Returns:
A new function that pads and shards its arguments before passing them to
the wrapped function, and un-shards and un-pads the returned pytree.
This is useful for calling a pmap'ed function with inputs that aren't
divisible by the number of devices. A typical use is:
@pad_shard_unpad
@jax.pmap
def forward(params, x): ...
Notes:
The padding is done in host-memory before being passed to the function, and
the values returned by the function are transferred back to host memory.
The returned function is augmented with a new keyword-only argument
``min_device_batch`` that, if specified, forces padding inputs to at least
this size per device. This can be useful to avoid recompiles for the last
batch and reduce memory fragmentation.
For more information refer to https://flax.readthedocs.io/en/latest/guides/data_preprocessing/full_eval.html
"""
def pad_shard_unpad_wrapper(*args, min_device_batch=None, **kw):
d = jax.local_device_count() # d = devices, b = batch
batch_sizes = set()
for i, a in enumerate(args):
if i not in static_argnums:
batch_sizes |= {t.shape[0] for t in jax.tree_util.tree_leaves(a)}
for k, v in kw.items():
if k not in static_argnames:
batch_sizes |= {t.shape[0] for t in jax.tree_util.tree_leaves(v)}
assert len(batch_sizes) == 1, f'Inconsistent batch-sizes: {batch_sizes}'
b = batch_sizes.pop()
def pad(x):
_, *shape = x.shape
db, rest = divmod(b, d)
if rest:
x = np.concatenate([x, np.zeros((d - rest, *shape), x.dtype)], axis=0)
db += 1
if min_device_batch and db < min_device_batch:
x = np.concatenate(
[x, np.zeros((d * (min_device_batch - db), *shape), x.dtype)]
)
db = min_device_batch
return x.reshape(d, db, *shape)
def maybe_pad(tree, actually_pad=True):
if not actually_pad:
return tree # For call-site convenience below.
return jax.tree_util.tree_map(pad, tree)
args = [maybe_pad(a, i not in static_argnums) for i, a in enumerate(args)]
kw = {k: maybe_pad(v, k not in static_argnames) for k, v in kw.items()}
out = wrapped(*args, **kw)
def unpad(x):
# Transfer back before cutting, to reduce on-device shape diversity.
return jax.device_get(x).reshape([np.prod(x.shape[:2]), *x.shape[2:]])[:b]
return out if static_return else jax.tree_util.tree_map(unpad, out)
return pad_shard_unpad_wrapper | Wraps a function with code that pads, shards, then un-shards, un-pads. Args: wrapped: the function to be wrapped. Signature is ``params, *args, *kwargs``. static_argnums: indices of arguments to ``wrapped`` that should _not_ be padded and sharded, but instead be forwarded as-is. The default is (0,) because by far the most common use-case is to pass ``params`` first. static_argnames: names of kwargs to ``wrapped`` that should _not_ be padded and sharded, but instead be forwarded as-is. static_return: whether not to un-shard, and un-pad the return value; static return values are typically used with eval steps that compute metrics Returns: A new function that pads and shards its arguments before passing them to the wrapped function, and un-shards and un-pads the returned pytree. This is useful for calling a pmap'ed function with inputs that aren't divisible by the number of devices. A typical use is: @pad_shard_unpad @jax.pmap def forward(params, x): ... Notes: The padding is done in host-memory before being passed to the function, and the values returned by the function are transferred back to host memory. The returned function is augmented with a new keyword-only argument ``min_device_batch`` that, if specified, forces padding inputs to at least this size per device. This can be useful to avoid recompiles for the last batch and reduce memory fragmentation. For more information refer to https://flax.readthedocs.io/en/latest/guides/data_preprocessing/full_eval.html |
22,695 | import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
The provided code snippet includes necessary dependencies for implementing the `shard_prng_key` function. Write a Python function `def shard_prng_key(prng_key)` to solve the following problem:
Helper to shard (aka split) a PRNGKey for use with pmap'd functions. PRNG keys can be used at train time to drive stochastic modules e.g. Dropout. We would like a different PRNG key for each local device so that we end up with different random numbers on each one, hence we split our PRNG key. Args: prng_key: JAX PRNGKey Returns: A new array of PRNGKeys with leading dimension equal to local device count.
Here is the function:
def shard_prng_key(prng_key):
"""Helper to shard (aka split) a PRNGKey for use with pmap'd functions.
PRNG keys can be used at train time to drive stochastic modules
e.g. Dropout. We would like a different PRNG key for each local
device so that we end up with different random numbers on each one,
hence we split our PRNG key.
Args:
prng_key: JAX PRNGKey
Returns:
A new array of PRNGKeys with leading dimension equal to local device count.
"""
return jax.random.split(prng_key, num=jax.local_device_count()) | Helper to shard (aka split) a PRNGKey for use with pmap'd functions. PRNG keys can be used at train time to drive stochastic modules e.g. Dropout. We would like a different PRNG key for each local device so that we end up with different random numbers on each one, hence we split our PRNG key. Args: prng_key: JAX PRNGKey Returns: A new array of PRNGKeys with leading dimension equal to local device count. |
22,696 | import functools
import os
import pathlib
import re
import time
import warnings
from concurrent.futures import thread
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import jax
import orbax.checkpoint as ocp
from absl import logging
from jax import monitoring, process_index
from jax import tree_util as jtu
from jax.experimental.array_serialization.serialization import (
GlobalAsyncCheckpointManager,
get_tensorstore_spec,
)
from jax.experimental.multihost_utils import sync_global_devices
from flax import config, core, errors, io, serialization, traverse_util
from flax.training import orbax_utils
def _all_checkpoints(
ckpt_dir: Union[str, os.PathLike], prefix: str = 'checkpoint_'
) -> List[str]:
"""Retrieve all checkpoint paths in directory.
Args:
ckpt_dir: str: directory of checkpoints to restore from.
prefix: str: name prefix of checkpoint files.
Returns:
Sorted list of checkpoint paths or empty list if no checkpoints were found.
"""
ckpt_dir = os.fspath(ckpt_dir) # Pathlib -> str
checkpoint_files: List[Any] = [
pathlib.PurePath(c) for c in _allowempty_listdir(ckpt_dir)
]
checkpoint_files = [
os.path.join(ckpt_dir, c)
for c in checkpoint_files
if c.match(f'{prefix}*')
and not c.match(f'{prefix}tmp')
and not c.match(f'*{MP_ARRAY_POSTFIX}')
and not c.match(f'*{ocp.utils.TMP_DIR_SUFFIX}*')
]
checkpoint_files = natural_sort(checkpoint_files)
if checkpoint_files:
return checkpoint_files
else:
return []
The provided code snippet includes necessary dependencies for implementing the `available_steps` function. Write a Python function `def available_steps( ckpt_dir: Union[str, os.PathLike], prefix: str = 'checkpoint_', step_type: Type = int, ) -> List[Union[int, float]]` to solve the following problem:
Return step numbers of available checkpoints in a directory. Args: ckpt_dir: str: directory of checkpoints to restore from. prefix: str: name prefix of checkpoint files. step_type: type: type for steps, int (default) or float. Returns: Sorted list of available steps or empty list if no checkpoints were found.
Here is the function:
def available_steps(
ckpt_dir: Union[str, os.PathLike],
prefix: str = 'checkpoint_',
step_type: Type = int,
) -> List[Union[int, float]]:
"""Return step numbers of available checkpoints in a directory.
Args:
ckpt_dir: str: directory of checkpoints to restore from.
prefix: str: name prefix of checkpoint files.
step_type: type: type for steps, int (default) or float.
Returns:
Sorted list of available steps or empty list if no checkpoints were found.
"""
checkpoint_files = _all_checkpoints(ckpt_dir, prefix)
checkpoint_steps = []
for file in checkpoint_files:
prefix_idx = file.rfind(prefix)
checkpoint_steps += [step_type(file[prefix_idx + len(prefix) :])]
return checkpoint_steps | Return step numbers of available checkpoints in a directory. Args: ckpt_dir: str: directory of checkpoints to restore from. prefix: str: name prefix of checkpoint files. step_type: type: type for steps, int (default) or float. Returns: Sorted list of available steps or empty list if no checkpoints were found. |
22,697 | import functools
import os
import pathlib
import re
import time
import warnings
from concurrent.futures import thread
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import jax
import orbax.checkpoint as ocp
from absl import logging
from jax import monitoring, process_index
from jax import tree_util as jtu
from jax.experimental.array_serialization.serialization import (
GlobalAsyncCheckpointManager,
get_tensorstore_spec,
)
from jax.experimental.multihost_utils import sync_global_devices
from flax import config, core, errors, io, serialization, traverse_util
from flax.training import orbax_utils
MODULE_NUM_RE = re.compile(r'(.*)_\d+$')
PyTree = Any
def natural_sort(file_list: Iterable[str], signed: bool = True) -> List[str]:
"""Natural sort for filenames with numerical substrings.
Args:
file_list: list of paths to sort containing numerical substrings.
signed: bool: if leading '-' (or '+') signs should be included in numerical
substrings as a sign or treated as a separator.
Returns:
List of filenames sorted 'naturally', not lexicographically: any
integer substrings are used to subsort numerically. e.g.
file_1, file_10, file_2 --> file_1, file_2, file_10
file_0.1, file_-0.2, file_2.0 --> file_-0.2, file_0.1, file_2.0
"""
float_re = SIGNED_FLOAT_RE if signed else UNSIGNED_FLOAT_RE
def maybe_num(s):
if float_re.match(s):
return float(s)
else:
return s
def split_keys(s):
return [maybe_num(c) for c in float_re.split(s)]
return sorted(file_list, key=split_keys)
The provided code snippet includes necessary dependencies for implementing the `convert_pre_linen` function. Write a Python function `def convert_pre_linen(params: PyTree) -> PyTree` to solve the following problem:
Converts a pre-Linen parameter pytree. In pre-Linen API submodules were numbered incrementally, independent of the submodule class. With Linen this behavior has changed to keep separate submodule counts per module class. Consider the following module:: class Model(nn.Module): @nn.compact def __call__(self, x): x = nn.Conv(1, 1)(x) x = nn.Dense(1)(x) return x In pre-Linen the resulting params would have had the structure: ``{'Conv_0': { ... }, 'Dense_1': { ... } }`` With Linen the resulting params would instead have had the structure: ``{'Conv_0': { ... }, 'Dense_0': { ... } }`` To convert from pre-Linen format to Linen simply call:: params = convert_pre_linen(pre_linen_params) Note that you can also use this utility to convert pre-Linen collections because they're following the same module naming. Note though that collections were "flat" in pre-Linen and first need to be unflattened before they can be used with this function:: batch_stats = convert_pre_linen(flax.traverse_util.unflatten_dict({ tuple(k.split('/')[1:]): v for k, v in pre_linen_model_state.as_dict().items() })) Then Linen variables can be defined from these converted collections:: variables = {'params': params, 'batch_stats': batch_stats} Args: params: Parameter pytree in pre-Linen format. If the pytree is already in Linen format, then the returned pytree is unchanged (i.e. this function can safely be called on any loaded checkpoint for use with Linen). Returns: Parameter pytree with Linen submodule naming.
Here is the function:
def convert_pre_linen(params: PyTree) -> PyTree:
"""Converts a pre-Linen parameter pytree.
In pre-Linen API submodules were numbered incrementally, independent of the
submodule class. With Linen this behavior has changed to keep separate
submodule counts per module class.
Consider the following module::
class Model(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.Conv(1, 1)(x)
x = nn.Dense(1)(x)
return x
In pre-Linen the resulting params would have had the structure:
``{'Conv_0': { ... }, 'Dense_1': { ... } }``
With Linen the resulting params would instead have had the structure:
``{'Conv_0': { ... }, 'Dense_0': { ... } }``
To convert from pre-Linen format to Linen simply call::
params = convert_pre_linen(pre_linen_params)
Note that you can also use this utility to convert pre-Linen collections
because they're following the same module naming. Note though that collections
were "flat" in pre-Linen and first need to be unflattened before they can be
used with this function::
batch_stats = convert_pre_linen(flax.traverse_util.unflatten_dict({
tuple(k.split('/')[1:]): v
for k, v in pre_linen_model_state.as_dict().items()
}))
Then Linen variables can be defined from these converted collections::
variables = {'params': params, 'batch_stats': batch_stats}
Args:
params: Parameter pytree in pre-Linen format. If the pytree is already in
Linen format, then the returned pytree is unchanged (i.e. this function
can safely be called on any loaded checkpoint for use with Linen).
Returns:
Parameter pytree with Linen submodule naming.
"""
if not isinstance(params, (dict, core.FrozenDict)):
return params
params_renamed = {}
counts: Dict[Any, Any] = {}
names = natural_sort(params.keys())
for name in names:
value = params[name]
match = MODULE_NUM_RE.match(name)
if match:
module = match.group(1)
num = counts.get(module, 0)
name = f'{module}_{num}'
counts[module] = num + 1
params_renamed[name] = convert_pre_linen(value)
if isinstance(params, core.FrozenDict):
params_renamed = core.freeze(params_renamed) # type: ignore
return params_renamed | Converts a pre-Linen parameter pytree. In pre-Linen API submodules were numbered incrementally, independent of the submodule class. With Linen this behavior has changed to keep separate submodule counts per module class. Consider the following module:: class Model(nn.Module): @nn.compact def __call__(self, x): x = nn.Conv(1, 1)(x) x = nn.Dense(1)(x) return x In pre-Linen the resulting params would have had the structure: ``{'Conv_0': { ... }, 'Dense_1': { ... } }`` With Linen the resulting params would instead have had the structure: ``{'Conv_0': { ... }, 'Dense_0': { ... } }`` To convert from pre-Linen format to Linen simply call:: params = convert_pre_linen(pre_linen_params) Note that you can also use this utility to convert pre-Linen collections because they're following the same module naming. Note though that collections were "flat" in pre-Linen and first need to be unflattened before they can be used with this function:: batch_stats = convert_pre_linen(flax.traverse_util.unflatten_dict({ tuple(k.split('/')[1:]): v for k, v in pre_linen_model_state.as_dict().items() })) Then Linen variables can be defined from these converted collections:: variables = {'params': params, 'batch_stats': batch_stats} Args: params: Parameter pytree in pre-Linen format. If the pytree is already in Linen format, then the returned pytree is unchanged (i.e. this function can safely be called on any loaded checkpoint for use with Linen). Returns: Parameter pytree with Linen submodule naming. |
22,698 | import jax.numpy as jnp
import numpy as np
from absl import logging
The provided code snippet includes necessary dependencies for implementing the `create_constant_learning_rate_schedule` function. Write a Python function `def create_constant_learning_rate_schedule( base_learning_rate, steps_per_epoch, warmup_length=0.0 )` to solve the following problem:
Create a constant learning rate schedule with optional warmup. Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are **effectively deprecated** in favor of Optax_ schedules. Please refer to `Optimizer Schedules`_ for more information. .. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md .. _Optax: https://github.com/deepmind/optax .. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules Holds the learning rate constant. This function also offers a learing rate warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training with large mini-batches. Args: base_learning_rate: the base learning rate steps_per_epoch: the number of iterations per epoch warmup_length: if > 0, the learning rate will be modulated by a warmup factor that will linearly ramp-up from 0 to 1 over the first ``warmup_length`` epochs Returns: Function ``f(step) -> lr`` that computes the learning rate for a given step.
Here is the function:
def create_constant_learning_rate_schedule(
base_learning_rate, steps_per_epoch, warmup_length=0.0
):
"""Create a constant learning rate schedule with optional warmup.
Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are
**effectively deprecated** in favor of Optax_ schedules. Please refer to
`Optimizer Schedules`_ for more information.
.. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md
.. _Optax: https://github.com/deepmind/optax
.. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules
Holds the learning rate constant. This function also offers a learing rate
warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training
with large mini-batches.
Args:
base_learning_rate: the base learning rate
steps_per_epoch: the number of iterations per epoch
warmup_length: if > 0, the learning rate will be modulated by a warmup
factor that will linearly ramp-up from 0 to 1 over the first
``warmup_length`` epochs
Returns:
Function ``f(step) -> lr`` that computes the learning rate for a given step.
"""
logging.warning(
'Learning rate schedules in ``flax.training`` are effectively deprecated '
'in favor of Optax schedules. Please refer to '
'https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules'
' for alternatives.'
)
def learning_rate_fn(step):
lr = base_learning_rate
if warmup_length > 0.0:
lr = lr * jnp.minimum(1.0, step / float(warmup_length) / steps_per_epoch)
return lr
return learning_rate_fn | Create a constant learning rate schedule with optional warmup. Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are **effectively deprecated** in favor of Optax_ schedules. Please refer to `Optimizer Schedules`_ for more information. .. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md .. _Optax: https://github.com/deepmind/optax .. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules Holds the learning rate constant. This function also offers a learing rate warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training with large mini-batches. Args: base_learning_rate: the base learning rate steps_per_epoch: the number of iterations per epoch warmup_length: if > 0, the learning rate will be modulated by a warmup factor that will linearly ramp-up from 0 to 1 over the first ``warmup_length`` epochs Returns: Function ``f(step) -> lr`` that computes the learning rate for a given step. |
22,699 | import jax.numpy as jnp
import numpy as np
from absl import logging
def _piecewise_constant(boundaries, values, t):
index = jnp.sum(boundaries < t)
return jnp.take(values, index)
The provided code snippet includes necessary dependencies for implementing the `create_stepped_learning_rate_schedule` function. Write a Python function `def create_stepped_learning_rate_schedule( base_learning_rate, steps_per_epoch, lr_sched_steps, warmup_length=0.0 )` to solve the following problem:
Create a stepped learning rate schedule with optional warmup. Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are **effectively deprecated** in favor of Optax_ schedules. Please refer to `Optimizer Schedules`_ for more information. .. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md .. _Optax: https://github.com/deepmind/optax .. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules A stepped learning rate schedule decreases the learning rate by specified amounts at specified epochs. The steps are given as the ``lr_sched_steps`` parameter. A common ImageNet schedule decays the learning rate by a factor of 0.1 at epochs 30, 60 and 80. This would be specified as:: [ [30, 0.1], [60, 0.01], [80, 0.001] ] This function also offers a learing rate warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training with large mini-batches. Args: base_learning_rate: the base learning rate steps_per_epoch: the number of iterations per epoch lr_sched_steps: the schedule as a list of steps, each of which is a ``[epoch, lr_factor]`` pair; the step occurs at epoch ``epoch`` and sets the learning rate to ``base_learning_rage * lr_factor`` warmup_length: if > 0, the learning rate will be modulated by a warmup factor that will linearly ramp-up from 0 to 1 over the first ``warmup_length`` epochs Returns: Function ``f(step) -> lr`` that computes the learning rate for a given step.
Here is the function:
def create_stepped_learning_rate_schedule(
base_learning_rate, steps_per_epoch, lr_sched_steps, warmup_length=0.0
):
"""Create a stepped learning rate schedule with optional warmup.
Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are
**effectively deprecated** in favor of Optax_ schedules. Please refer to
`Optimizer Schedules`_ for more information.
.. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md
.. _Optax: https://github.com/deepmind/optax
.. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules
A stepped learning rate schedule decreases the learning rate
by specified amounts at specified epochs. The steps are given as
the ``lr_sched_steps`` parameter. A common ImageNet schedule decays the
learning rate by a factor of 0.1 at epochs 30, 60 and 80. This would be
specified as::
[
[30, 0.1],
[60, 0.01],
[80, 0.001]
]
This function also offers a learing rate warmup as per
https://arxiv.org/abs/1706.02677, for the purpose of training with large
mini-batches.
Args:
base_learning_rate: the base learning rate
steps_per_epoch: the number of iterations per epoch
lr_sched_steps: the schedule as a list of steps, each of which is
a ``[epoch, lr_factor]`` pair; the step occurs at epoch ``epoch`` and
sets the learning rate to ``base_learning_rage * lr_factor``
warmup_length: if > 0, the learning rate will be modulated by a warmup
factor that will linearly ramp-up from 0 to 1 over the first
``warmup_length`` epochs
Returns:
Function ``f(step) -> lr`` that computes the learning rate for a given step.
"""
logging.warning(
'Learning rate schedules in ``flax.training`` are effectively deprecated '
'in favor of Optax schedules. Please refer to '
'https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules'
' for alternatives.'
)
boundaries = [step[0] for step in lr_sched_steps]
decays = [step[1] for step in lr_sched_steps]
boundaries = np.array(boundaries) * steps_per_epoch
boundaries = np.round(boundaries).astype(int)
values = np.array([1.0] + decays) * base_learning_rate
def learning_rate_fn(step):
lr = _piecewise_constant(boundaries, values, step)
if warmup_length > 0.0:
lr = lr * jnp.minimum(1.0, step / float(warmup_length) / steps_per_epoch)
return lr
return learning_rate_fn | Create a stepped learning rate schedule with optional warmup. Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are **effectively deprecated** in favor of Optax_ schedules. Please refer to `Optimizer Schedules`_ for more information. .. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md .. _Optax: https://github.com/deepmind/optax .. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules A stepped learning rate schedule decreases the learning rate by specified amounts at specified epochs. The steps are given as the ``lr_sched_steps`` parameter. A common ImageNet schedule decays the learning rate by a factor of 0.1 at epochs 30, 60 and 80. This would be specified as:: [ [30, 0.1], [60, 0.01], [80, 0.001] ] This function also offers a learing rate warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training with large mini-batches. Args: base_learning_rate: the base learning rate steps_per_epoch: the number of iterations per epoch lr_sched_steps: the schedule as a list of steps, each of which is a ``[epoch, lr_factor]`` pair; the step occurs at epoch ``epoch`` and sets the learning rate to ``base_learning_rage * lr_factor`` warmup_length: if > 0, the learning rate will be modulated by a warmup factor that will linearly ramp-up from 0 to 1 over the first ``warmup_length`` epochs Returns: Function ``f(step) -> lr`` that computes the learning rate for a given step. |
22,700 | import jax.numpy as jnp
import numpy as np
from absl import logging
The provided code snippet includes necessary dependencies for implementing the `create_cosine_learning_rate_schedule` function. Write a Python function `def create_cosine_learning_rate_schedule( base_learning_rate, steps_per_epoch, halfcos_epochs, warmup_length=0.0 )` to solve the following problem:
Create a cosine learning rate schedule with optional warmup. Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are **effectively deprecated** in favor of Optax_ schedules. Please refer to `Optimizer Schedules`_ for more information. .. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md .. _Optax: https://github.com/deepmind/optax .. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules A cosine learning rate schedule modules the learning rate with half a cosine wave, gradually scaling it to 0 at the end of training. This function also offers a learing rate warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training with large mini-batches. Args: base_learning_rate: the base learning rate steps_per_epoch: the number of iterations per epoch halfcos_epochs: the number of epochs to complete half a cosine wave; normally the number of epochs used for training warmup_length: if > 0, the learning rate will be modulated by a warmup factor that will linearly ramp-up from 0 to 1 over the first ``warmup_length`` epochs Returns: Function ``f(step) -> lr`` that computes the learning rate for a given step.
Here is the function:
def create_cosine_learning_rate_schedule(
base_learning_rate, steps_per_epoch, halfcos_epochs, warmup_length=0.0
):
"""Create a cosine learning rate schedule with optional warmup.
Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are
**effectively deprecated** in favor of Optax_ schedules. Please refer to
`Optimizer Schedules`_ for more information.
.. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md
.. _Optax: https://github.com/deepmind/optax
.. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules
A cosine learning rate schedule modules the learning rate with
half a cosine wave, gradually scaling it to 0 at the end of training.
This function also offers a learing rate warmup as per
https://arxiv.org/abs/1706.02677, for the purpose of training with large
mini-batches.
Args:
base_learning_rate: the base learning rate
steps_per_epoch: the number of iterations per epoch
halfcos_epochs: the number of epochs to complete half a cosine wave;
normally the number of epochs used for training
warmup_length: if > 0, the learning rate will be modulated by a warmup
factor that will linearly ramp-up from 0 to 1 over the first
``warmup_length`` epochs
Returns:
Function ``f(step) -> lr`` that computes the learning rate for a given step.
"""
logging.warning(
'Learning rate schedules in ``flax.training`` are effectively deprecated '
'in favor of Optax schedules. Please refer to '
'https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules'
' for alternatives.'
)
halfwavelength_steps = halfcos_epochs * steps_per_epoch
def learning_rate_fn(step):
scale_factor = jnp.cos(step * jnp.pi / halfwavelength_steps) * 0.5 + 0.5
lr = base_learning_rate * scale_factor
if warmup_length > 0.0:
lr = lr * jnp.minimum(1.0, step / float(warmup_length) / steps_per_epoch)
return lr
return learning_rate_fn | Create a cosine learning rate schedule with optional warmup. Note that with `FLIP #1009`_ learning rate schedules in ``flax.training`` are **effectively deprecated** in favor of Optax_ schedules. Please refer to `Optimizer Schedules`_ for more information. .. _FLIP #1009: https://github.com/google/flax/blob/main/docs/flip/1009-optimizer-api.md .. _Optax: https://github.com/deepmind/optax .. _Optimizer Schedules: https://optax.readthedocs.io/en/latest/api.html#optimizer-schedules A cosine learning rate schedule modules the learning rate with half a cosine wave, gradually scaling it to 0 at the end of training. This function also offers a learing rate warmup as per https://arxiv.org/abs/1706.02677, for the purpose of training with large mini-batches. Args: base_learning_rate: the base learning rate steps_per_epoch: the number of iterations per epoch halfcos_epochs: the number of epochs to complete half a cosine wave; normally the number of epochs used for training warmup_length: if > 0, the learning rate will be modulated by a warmup factor that will linearly ramp-up from 0 to 1 over the first ``warmup_length`` epochs Returns: Function ``f(step) -> lr`` that computes the learning rate for a given step. |
22,701 | import dataclasses
import enum
from typing import (
Any,
Callable,
Dict,
Generator,
Generic,
Mapping,
Optional,
Protocol,
TypeVar,
runtime_checkable,
)
from flax.core import FrozenDict
from flax.errors import CursorFindError, TraverseTreeError
class AccessType(enum.Enum):
ITEM = enum.auto()
ATTR = enum.auto()
def is_named_tuple(obj):
return (
isinstance(obj, tuple)
and hasattr(obj, '_fields')
and hasattr(obj, '_asdict')
and hasattr(obj, '_replace')
)
class TraverseTreeError(FlaxError):
"""Error when calling ``Cursor._traverse_tree()``. This function has two
modes:
- if ``update_fn`` is not None, it will traverse the tree and return a
generator of tuples containing the path where the ``update_fn`` was
applied and the newly modified value.
- if ``cond_fn`` is not None, it will traverse the tree and return a
generator of tuple paths that fulfilled the conditions of the ``cond_fn``.
This error occurs if either both ``update_fn`` and ``cond_fn`` are None,
or both are not None.
"""
def __init__(self, update_fn, cond_fn):
if update_fn is None and cond_fn is None:
super().__init__(
'Both update_fn and cond_fn are None. Exactly one of them must be'
' None.'
)
else:
super().__init__(
'Both update_fn and cond_fn are not None. Exactly one of them must be'
' not None.'
)
The provided code snippet includes necessary dependencies for implementing the `_traverse_tree` function. Write a Python function `def _traverse_tree(path, obj, *, update_fn=None, cond_fn=None)` to solve the following problem:
Helper function for ``Cursor.apply_update`` and ``Cursor.find_all``. Exactly one of ``update_fn`` and ``cond_fn`` must be not None. - If ``update_fn`` is not None, then ``Cursor.apply_update`` is calling this function and ``_traverse_tree`` will return a generator where each generated element is of type Tuple[Tuple[Union[str, int], AccessType], Any]. The first element is a tuple of the key path and access type where the change was applied from the ``update_fn``, and the second element is the newly modified value. If the generator is non-empty, then the tuple key path will always be non-empty as well. - If ``cond_fn`` is not None, then ``Cursor.find_all`` is calling this function and ``_traverse_tree`` will return a generator where each generated element is of type Tuple[Union[str, int], AccessType]. The tuple contains the key path and access type where the object was found that fulfilled the conditions of the ``cond_fn``.
Here is the function:
def _traverse_tree(path, obj, *, update_fn=None, cond_fn=None):
"""Helper function for ``Cursor.apply_update`` and ``Cursor.find_all``.
Exactly one of ``update_fn`` and ``cond_fn`` must be not None.
- If ``update_fn`` is not None, then ``Cursor.apply_update`` is calling
this function and ``_traverse_tree`` will return a generator where
each generated element is of type Tuple[Tuple[Union[str, int], AccessType], Any].
The first element is a tuple of the key path and access type where the
change was applied from the ``update_fn``, and the second element is
the newly modified value. If the generator is non-empty, then the
tuple key path will always be non-empty as well.
- If ``cond_fn`` is not None, then ``Cursor.find_all`` is calling this
function and ``_traverse_tree`` will return a generator where each
generated element is of type Tuple[Union[str, int], AccessType]. The
tuple contains the key path and access type where the object was found
that fulfilled the conditions of the ``cond_fn``.
"""
if not (bool(update_fn) ^ bool(cond_fn)):
raise TraverseTreeError(update_fn, cond_fn)
if path:
str_path = '/'.join(str(key) for key, _ in path)
if update_fn:
new_obj = update_fn(str_path, obj)
if new_obj is not obj:
yield path, new_obj
return
elif cond_fn(str_path, obj): # type: ignore
yield path
return
if isinstance(obj, (FrozenDict, dict)):
items = obj.items()
access_type = AccessType.ITEM
elif is_named_tuple(obj):
items = ((name, getattr(obj, name)) for name in obj._fields) # type: ignore
access_type = AccessType.ATTR
elif isinstance(obj, (list, tuple)):
items = enumerate(obj)
access_type = AccessType.ITEM
elif dataclasses.is_dataclass(obj):
items = (
(f.name, getattr(obj, f.name)) for f in dataclasses.fields(obj) if f.init
)
access_type = AccessType.ATTR
else:
return
if update_fn:
for key, value in items:
yield from _traverse_tree(
path + ((key, access_type),), value, update_fn=update_fn
)
else:
for key, value in items:
yield from _traverse_tree(
path + ((key, access_type),), value, cond_fn=cond_fn
) | Helper function for ``Cursor.apply_update`` and ``Cursor.find_all``. Exactly one of ``update_fn`` and ``cond_fn`` must be not None. - If ``update_fn`` is not None, then ``Cursor.apply_update`` is calling this function and ``_traverse_tree`` will return a generator where each generated element is of type Tuple[Tuple[Union[str, int], AccessType], Any]. The first element is a tuple of the key path and access type where the change was applied from the ``update_fn``, and the second element is the newly modified value. If the generator is non-empty, then the tuple key path will always be non-empty as well. - If ``cond_fn`` is not None, then ``Cursor.find_all`` is calling this function and ``_traverse_tree`` will return a generator where each generated element is of type Tuple[Union[str, int], AccessType]. The tuple contains the key path and access type where the object was found that fulfilled the conditions of the ``cond_fn``. |
22,702 | import dataclasses
import enum
from typing import (
Any,
Callable,
Dict,
Generator,
Generic,
Mapping,
Optional,
Protocol,
TypeVar,
runtime_checkable,
)
from flax.core import FrozenDict
from flax.errors import CursorFindError, TraverseTreeError
A = TypeVar('A')
class Cursor(Generic[A]):
_obj: A
_parent_key: Optional[ParentKey[A]]
_changes: Dict[Any, 'Cursor[A]']
def __init__(self, obj: A, parent_key: Optional[ParentKey[A]]):
# NOTE: we use `vars` here to avoid calling `__setattr__`
# vars(self) = self.__dict__
vars(self)['_obj'] = obj
vars(self)['_parent_key'] = parent_key
vars(self)['_changes'] = {}
def _root(self) -> 'Cursor[A]':
if self._parent_key is None:
return self
else:
return self._parent_key.parent._root # type: ignore
def _path(self) -> str:
if self._parent_key is None:
return ''
if self._parent_key.access_type == AccessType.ITEM: # type: ignore
if isinstance(self._parent_key.key, str): # type: ignore
key = "'" + self._parent_key.key + "'" # type: ignore
else:
key = str(self._parent_key.key) # type: ignore
return self._parent_key.parent._path + '[' + key + ']' # type: ignore
# self.parent_key.access_type == AccessType.ATTR:
return self._parent_key.parent._path + '.' + self._parent_key.key # type: ignore
def __getitem__(self, key) -> 'Cursor[A]':
if key in self._changes:
return self._changes[key]
if not isinstance(self._obj, Indexable):
raise TypeError(f'Cannot index into {self._obj}')
if isinstance(self._obj, Mapping) and key not in self._obj:
raise KeyError(f'Key {key} not found in {self._obj}')
if is_named_tuple(self._obj):
return getattr(self, self._obj._fields[key]) # type: ignore
child = Cursor(self._obj[key], ParentKey(self, key, AccessType.ITEM))
self._changes[key] = child
return child
def __getattr__(self, name) -> 'Cursor[A]':
if name in self._changes:
return self._changes[name]
if not hasattr(self._obj, name):
raise AttributeError(f'Attribute {name} not found in {self._obj}')
child = Cursor(
getattr(self._obj, name), ParentKey(self, name, AccessType.ATTR)
)
self._changes[name] = child
return child
def __setitem__(self, key, value):
if is_named_tuple(self._obj):
return setattr(self, self._obj._fields[key], value) # type: ignore
self._changes[key] = Cursor(value, ParentKey(self, key, AccessType.ITEM))
def __setattr__(self, name, value):
self._changes[name] = Cursor(value, ParentKey(self, name, AccessType.ATTR))
def set(self, value) -> A:
"""Set a new value for an attribute, property, element or entry
in the Cursor object and return a copy of the original object,
containing the new set value.
Example::
>>> from flax.cursor import cursor
>>> from flax.training import train_state
>>> import optax
>>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]}
>>> modified_dict_obj = cursor(dict_obj)['b'][0].set(10)
>>> assert modified_dict_obj == {'a': 1, 'b': (10, 3), 'c': [4, 5]}
>>> state = train_state.TrainState.create(
... apply_fn=lambda x: x,
... params=dict_obj,
... tx=optax.adam(1e-3),
... )
>>> modified_state = cursor(state).params['b'][1].set(10)
>>> assert modified_state.params == {'a': 1, 'b': (2, 10), 'c': [4, 5]}
Args:
value: the value used to set an attribute, property, element or entry in the Cursor object
Returns:
A copy of the original object with the new set value.
"""
if self._parent_key is None:
return value
parent, key = self._parent_key.parent, self._parent_key.key # type: ignore
parent._changes[key] = Cursor(value, self._parent_key)
return parent._root.build()
def build(self) -> A:
"""Create and return a copy of the original object with accumulated changes.
This method is to be called after making changes to the Cursor object.
.. note::
The new object is built bottom-up, the changes will be first applied
to the leaf nodes, and then its parent, all the way up to the root.
Example::
>>> from flax.cursor import cursor
>>> from flax.training import train_state
>>> import optax
>>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]}
>>> c = cursor(dict_obj)
>>> c['b'][0] = 10
>>> c['a'] = (100, 200)
>>> modified_dict_obj = c.build()
>>> assert modified_dict_obj == {'a': (100, 200), 'b': (10, 3), 'c': [4, 5]}
>>> state = train_state.TrainState.create(
... apply_fn=lambda x: x,
... params=dict_obj,
... tx=optax.adam(1e-3),
... )
>>> new_fn = lambda x: x + 1
>>> c = cursor(state)
>>> c.params['b'][1] = 10
>>> c.apply_fn = new_fn
>>> modified_state = c.build()
>>> assert modified_state.params == {'a': 1, 'b': (2, 10), 'c': [4, 5]}
>>> assert modified_state.apply_fn == new_fn
Returns:
A copy of the original object with the accumulated changes.
"""
changes = {
key: child.build() if isinstance(child, Cursor) else child
for key, child in self._changes.items()
}
if isinstance(self._obj, FrozenDict):
obj = self._obj.copy(changes) # type: ignore
elif isinstance(self._obj, (dict, list)):
obj = self._obj.copy() # type: ignore
for key, value in changes.items():
obj[key] = value
elif is_named_tuple(self._obj):
obj = self._obj._replace(**changes) # type: ignore
elif isinstance(self._obj, tuple):
obj = list(self._obj) # type: ignore
for key, value in changes.items():
obj[key] = value
obj = tuple(obj) # type: ignore
elif dataclasses.is_dataclass(self._obj):
obj = dataclasses.replace(self._obj, **changes) # type: ignore
else:
obj = self._obj # type: ignore
return obj # type: ignore
def apply_update(
self,
update_fn: Callable[[str, Any], Any],
) -> 'Cursor[A]':
"""Traverse the Cursor object and record conditional changes recursively via an ``update_fn``.
The changes are recorded in the Cursor object's ``._changes`` dictionary. To generate a copy
of the original object with the accumulated changes, call the ``.build`` method after calling
``.apply_update``.
The ``update_fn`` has a function signature of ``(str, Any) -> Any``:
- The input arguments are the current key path (in the form of a string delimited
by ``'/'``) and value at that current key path
- The output is the new value (either modified by the ``update_fn`` or same as the
input value if the condition wasn't fulfilled)
.. note::
- If the ``update_fn`` returns a modified value, this method will not recurse any further
down that branch to record changes. For example, if we intend to replace an attribute that points
to a dictionary with an int, we don't need to look for further changes inside the dictionary,
since the dictionary will be replaced anyways.
- The ``is`` operator is used to determine whether the return value is modified (by comparing it
to the input value). Therefore if the ``update_fn`` modifies a mutable container (e.g. lists,
dicts, etc.) and returns the same container, ``.apply_update`` will treat the returned value as
unmodified as it contains the same ``id``. To avoid this, return a copy of the modified value.
- ``.apply_update`` WILL NOT call the ``update_fn`` to the value at the top-most level of
the pytree (i.e. the root node). The ``update_fn`` will first be called on the root node's
children, and then the pytree traversal will continue recursively from there.
Example::
>>> import flax.linen as nn
>>> from flax.cursor import cursor
>>> import jax, jax.numpy as jnp
>>> class Model(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... return x
>>> params = Model().init(jax.random.key(0), jnp.empty((1, 2)))['params']
>>> def update_fn(path, value):
... '''Multiply all dense kernel params by 2 and add 1.
... Subtract the Dense_1 bias param by 1.'''
... if 'kernel' in path:
... return value * 2 + 1
... elif 'Dense_1' in path and 'bias' in path:
... return value - 1
... return value
>>> c = cursor(params)
>>> new_params = c.apply_update(update_fn).build()
>>> for layer in ('Dense_0', 'Dense_1', 'Dense_2'):
... assert (new_params[layer]['kernel'] == 2 * params[layer]['kernel'] + 1).all()
... if layer == 'Dense_1':
... assert (new_params[layer]['bias'] == params[layer]['bias'] - 1).all()
... else:
... assert (new_params[layer]['bias'] == params[layer]['bias']).all()
>>> assert jax.tree_util.tree_all(
... jax.tree_util.tree_map(
... lambda x, y: (x == y).all(),
... params,
... Model().init(jax.random.key(0), jnp.empty((1, 2)))[
... 'params'
... ],
... )
... ) # make sure original params are unchanged
Args:
update_fn: the function that will conditionally record changes to the Cursor object
Returns:
The current Cursor object with the recorded conditional changes specified by the
``update_fn``. To generate a copy of the original object with the accumulated
changes, call the ``.build`` method after calling ``.apply_update``.
"""
for path, value in _traverse_tree((), self._obj, update_fn=update_fn):
child = self
for key, access_type in path[:-1]:
if access_type is AccessType.ITEM:
child = child[key]
else: # access_type is AccessType.ATTR
child = getattr(child, key)
key, access_type = path[-1]
if access_type is AccessType.ITEM:
child[key] = value
else: # access_type is AccessType.ATTR
setattr(child, key, value)
return self
def find(self, cond_fn: Callable[[str, Any], bool]) -> 'Cursor[A]':
"""Traverse the Cursor object and return a child Cursor object that fulfill the
conditions in the ``cond_fn``. The ``cond_fn`` has a function signature of ``(str, Any) -> bool``:
- The input arguments are the current key path (in the form of a string delimited
by ``'/'``) and value at that current key path
- The output is a boolean, denoting whether to return the child Cursor object at this path
Raises a :meth:`CursorFindError <flax.errors.CursorFindError>` if no object or more
than one object is found that fulfills the condition of the ``cond_fn``. We raise an
error because the user should always expect this method to return the only object whose
corresponding key path and value fulfill the condition of the ``cond_fn``.
.. note::
- If the ``cond_fn`` evaluates to True at a particular key path, this method will not recurse
any further down that branch; i.e. this method will find and return the "earliest" child node
that fulfills the condition in ``cond_fn`` in a particular key path
- ``.find`` WILL NOT search the the value at the top-most level of the pytree (i.e. the root
node). The ``cond_fn`` will be evaluated recursively, starting at the root node's children.
Example::
>>> import flax.linen as nn
>>> from flax.cursor import cursor
>>> import jax, jax.numpy as jnp
>>> class Model(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... return x
>>> params = Model().init(jax.random.PRNGKey(0), jnp.empty((1, 2)))['params']
>>> def cond_fn(path, value):
... '''Find the second dense layer params.'''
... return 'Dense_1' in path
>>> new_params = cursor(params).find(cond_fn)['bias'].set(params['Dense_1']['bias'] + 1)
>>> for layer in ('Dense_0', 'Dense_1', 'Dense_2'):
... if layer == 'Dense_1':
... assert (new_params[layer]['bias'] == params[layer]['bias'] + 1).all()
... else:
... assert (new_params[layer]['bias'] == params[layer]['bias']).all()
>>> c = cursor(params)
>>> c2 = c.find(cond_fn)
>>> c2['kernel'] += 2
>>> c2['bias'] += 2
>>> new_params = c.build()
>>> for layer in ('Dense_0', 'Dense_1', 'Dense_2'):
... if layer == 'Dense_1':
... assert (new_params[layer]['kernel'] == params[layer]['kernel'] + 2).all()
... assert (new_params[layer]['bias'] == params[layer]['bias'] + 2).all()
... else:
... assert (new_params[layer]['kernel'] == params[layer]['kernel']).all()
... assert (new_params[layer]['bias'] == params[layer]['bias']).all()
>>> assert jax.tree_util.tree_all(
... jax.tree_util.tree_map(
... lambda x, y: (x == y).all(),
... params,
... Model().init(jax.random.PRNGKey(0), jnp.empty((1, 2)))[
... 'params'
... ],
... )
... ) # make sure original params are unchanged
Args:
cond_fn: the function that will conditionally find child Cursor objects
Returns:
A child Cursor object that fulfills the condition in the ``cond_fn``.
"""
generator = self.find_all(cond_fn)
try:
cursor = next(generator)
except StopIteration:
raise CursorFindError()
try:
cursor2 = next(generator)
raise CursorFindError(cursor, cursor2)
except StopIteration:
return cursor
def find_all(
self, cond_fn: Callable[[str, Any], bool]
) -> Generator['Cursor[A]', None, None]:
"""Traverse the Cursor object and return a generator of child Cursor objects that fulfill the
conditions in the ``cond_fn``. The ``cond_fn`` has a function signature of ``(str, Any) -> bool``:
- The input arguments are the current key path (in the form of a string delimited
by ``'/'``) and value at that current key path
- The output is a boolean, denoting whether to return the child Cursor object at this path
.. note::
- If the ``cond_fn`` evaluates to True at a particular key path, this method will not recurse
any further down that branch; i.e. this method will find and return the "earliest" child nodes
that fulfill the condition in ``cond_fn`` in a particular key path
- ``.find_all`` WILL NOT search the the value at the top-most level of the pytree (i.e. the root
node). The ``cond_fn`` will be evaluated recursively, starting at the root node's children.
Example::
>>> import flax.linen as nn
>>> from flax.cursor import cursor
>>> import jax, jax.numpy as jnp
>>> class Model(nn.Module):
... @nn.compact
... def __call__(self, x):
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... x = nn.Dense(3)(x)
... x = nn.relu(x)
... return x
>>> params = Model().init(jax.random.PRNGKey(0), jnp.empty((1, 2)))['params']
>>> def cond_fn(path, value):
... '''Find all dense layer params.'''
... return 'Dense' in path
>>> c = cursor(params)
>>> for dense_params in c.find_all(cond_fn):
... dense_params['bias'] += 1
>>> new_params = c.build()
>>> for layer in ('Dense_0', 'Dense_1', 'Dense_2'):
... assert (new_params[layer]['bias'] == params[layer]['bias'] + 1).all()
>>> assert jax.tree_util.tree_all(
... jax.tree_util.tree_map(
... lambda x, y: (x == y).all(),
... params,
... Model().init(jax.random.PRNGKey(0), jnp.empty((1, 2)))[
... 'params'
... ],
... )
... ) # make sure original params are unchanged
Args:
cond_fn: the function that will conditionally find child Cursor objects
Returns:
A generator of child Cursor objects that fulfill the condition in the ``cond_fn``.
"""
for path in _traverse_tree((), self._obj, cond_fn=cond_fn):
child = self
for key, access_type in path:
if access_type is AccessType.ITEM:
child = child[key]
else: # access_type is AccessType.ATTR
child = getattr(child, key)
yield child
def __str__(self):
return str(self._obj)
def __repr__(self):
return self._pretty_repr()
def _pretty_repr(self, indent=2, _prefix_indent=0):
s = 'Cursor(\n'
obj_str = repr(self._obj).replace(
'\n', '\n' + ' ' * (_prefix_indent + indent)
)
s += ' ' * (_prefix_indent + indent) + f'_obj={obj_str},\n'
s += ' ' * (_prefix_indent + indent) + '_changes={'
if self._changes:
s += '\n'
for key in self._changes:
str_key = repr(key)
prefix = ' ' * (_prefix_indent + 2 * indent) + str_key + ': '
s += (
prefix
+ self._changes[key]._pretty_repr(
indent=indent, _prefix_indent=len(prefix)
)
+ ',\n'
)
s = s[
:-2
] # remove comma and newline character for last element in self._changes
s += '\n' + ' ' * (_prefix_indent + indent) + '}\n'
else:
s += '}\n'
s += ' ' * _prefix_indent + ')'
return s
def __len__(self):
return len(self._obj)
def __iter__(self):
if isinstance(self._obj, (tuple, list)):
return (self[i] for i in range(len(self._obj)))
else:
raise NotImplementedError(
'__iter__ method only implemented for tuples and lists, not type'
f' {type(self._obj)}'
)
def __reversed__(self):
if isinstance(self._obj, (tuple, list)):
return (self[i] for i in range(len(self._obj) - 1, -1, -1))
else:
raise NotImplementedError(
'__reversed__ method only implemented for tuples and lists, not type'
f' {type(self._obj)}'
)
def __add__(self, other):
return self._obj + other
def __sub__(self, other):
return self._obj - other
def __mul__(self, other):
return self._obj * other
def __matmul__(self, other):
return self._obj @ other
def __truediv__(self, other):
return self._obj / other
def __floordiv__(self, other):
return self._obj // other
def __mod__(self, other):
return self._obj % other
def __divmod__(self, other):
return divmod(self._obj, other)
def __pow__(self, other):
return pow(self._obj, other)
def __lshift__(self, other):
return self._obj << other
def __rshift__(self, other):
return self._obj >> other
def __and__(self, other):
return self._obj & other
def __xor__(self, other):
return self._obj ^ other
def __or__(self, other):
return self._obj | other
def __radd__(self, other):
return other + self._obj
def __rsub__(self, other):
return other - self._obj
def __rmul__(self, other):
return other * self._obj
def __rmatmul__(self, other):
return other @ self._obj
def __rtruediv__(self, other):
return other / self._obj
def __rfloordiv__(self, other):
return other // self._obj
def __rmod__(self, other):
return other % self._obj
def __rdivmod__(self, other):
return divmod(other, self._obj)
def __rpow__(self, other):
return pow(other, self._obj)
def __rlshift__(self, other):
return other << self._obj
def __rrshift__(self, other):
return other >> self._obj
def __rand__(self, other):
return other & self._obj
def __rxor__(self, other):
return other ^ self._obj
def __ror__(self, other):
return other | self._obj
def __neg__(self):
return -self._obj
def __pos__(self):
return +self._obj
def __abs__(self):
return abs(self._obj)
def __invert__(self):
return ~self._obj
def __round__(self, ndigits=None):
return round(self._obj, ndigits)
def __lt__(self, other):
return self._obj < other
def __le__(self, other):
return self._obj <= other
def __eq__(self, other):
return self._obj == other
def __ne__(self, other):
return self._obj != other
def __gt__(self, other):
return self._obj > other
def __ge__(self, other):
return self._obj >= other
The provided code snippet includes necessary dependencies for implementing the `cursor` function. Write a Python function `def cursor(obj: A) -> Cursor[A]` to solve the following problem:
Wrap :class:`Cursor <flax.cursor.Cursor>` over ``obj`` and return it. Changes can then be applied to the Cursor object in the following ways: - single-line change via the ``.set`` method - multiple changes, and then calling the ``.build`` method - multiple changes conditioned on the pytree path and node value via the ``.apply_update`` method, and then calling the ``.build`` method ``.set`` example:: >>> from flax.cursor import cursor >>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]} >>> modified_dict_obj = cursor(dict_obj)['b'][0].set(10) >>> assert modified_dict_obj == {'a': 1, 'b': (10, 3), 'c': [4, 5]} ``.build`` example:: >>> from flax.cursor import cursor >>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]} >>> c = cursor(dict_obj) >>> c['b'][0] = 10 >>> c['a'] = (100, 200) >>> modified_dict_obj = c.build() >>> assert modified_dict_obj == {'a': (100, 200), 'b': (10, 3), 'c': [4, 5]} ``.apply_update`` example:: >>> from flax.cursor import cursor >>> from flax.training import train_state >>> import optax >>> def update_fn(path, value): ... '''Replace params with empty dictionary.''' ... if 'params' in path: ... return {} ... return value >>> state = train_state.TrainState.create( ... apply_fn=lambda x: x, ... params={'a': 1, 'b': 2}, ... tx=optax.adam(1e-3), ... ) >>> c = cursor(state) >>> state2 = c.apply_update(update_fn).build() >>> assert state2.params == {} >>> assert state.params == {'a': 1, 'b': 2} # make sure original params are unchanged If the underlying ``obj`` is a ``list`` or ``tuple``, iterating over the Cursor object to get the child Cursors is also possible:: >>> from flax.cursor import cursor >>> c = cursor(((1, 2), (3, 4))) >>> for child_c in c: ... child_c[1] *= -1 >>> assert c.build() == ((1, -2), (3, -4)) View the docstrings for each method to see more examples of their usage. Args: obj: the object you want to wrap the Cursor in Returns: A Cursor object wrapped around obj.
Here is the function:
def cursor(obj: A) -> Cursor[A]:
"""Wrap :class:`Cursor <flax.cursor.Cursor>` over ``obj`` and return it.
Changes can then be applied to the Cursor object in the following ways:
- single-line change via the ``.set`` method
- multiple changes, and then calling the ``.build`` method
- multiple changes conditioned on the pytree path and node value via the
``.apply_update`` method, and then calling the ``.build`` method
``.set`` example::
>>> from flax.cursor import cursor
>>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]}
>>> modified_dict_obj = cursor(dict_obj)['b'][0].set(10)
>>> assert modified_dict_obj == {'a': 1, 'b': (10, 3), 'c': [4, 5]}
``.build`` example::
>>> from flax.cursor import cursor
>>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]}
>>> c = cursor(dict_obj)
>>> c['b'][0] = 10
>>> c['a'] = (100, 200)
>>> modified_dict_obj = c.build()
>>> assert modified_dict_obj == {'a': (100, 200), 'b': (10, 3), 'c': [4, 5]}
``.apply_update`` example::
>>> from flax.cursor import cursor
>>> from flax.training import train_state
>>> import optax
>>> def update_fn(path, value):
... '''Replace params with empty dictionary.'''
... if 'params' in path:
... return {}
... return value
>>> state = train_state.TrainState.create(
... apply_fn=lambda x: x,
... params={'a': 1, 'b': 2},
... tx=optax.adam(1e-3),
... )
>>> c = cursor(state)
>>> state2 = c.apply_update(update_fn).build()
>>> assert state2.params == {}
>>> assert state.params == {'a': 1, 'b': 2} # make sure original params are unchanged
If the underlying ``obj`` is a ``list`` or ``tuple``, iterating over the Cursor object
to get the child Cursors is also possible::
>>> from flax.cursor import cursor
>>> c = cursor(((1, 2), (3, 4)))
>>> for child_c in c:
... child_c[1] *= -1
>>> assert c.build() == ((1, -2), (3, -4))
View the docstrings for each method to see more examples of their usage.
Args:
obj: the object you want to wrap the Cursor in
Returns:
A Cursor object wrapped around obj.
"""
return Cursor(obj, None) | Wrap :class:`Cursor <flax.cursor.Cursor>` over ``obj`` and return it. Changes can then be applied to the Cursor object in the following ways: - single-line change via the ``.set`` method - multiple changes, and then calling the ``.build`` method - multiple changes conditioned on the pytree path and node value via the ``.apply_update`` method, and then calling the ``.build`` method ``.set`` example:: >>> from flax.cursor import cursor >>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]} >>> modified_dict_obj = cursor(dict_obj)['b'][0].set(10) >>> assert modified_dict_obj == {'a': 1, 'b': (10, 3), 'c': [4, 5]} ``.build`` example:: >>> from flax.cursor import cursor >>> dict_obj = {'a': 1, 'b': (2, 3), 'c': [4, 5]} >>> c = cursor(dict_obj) >>> c['b'][0] = 10 >>> c['a'] = (100, 200) >>> modified_dict_obj = c.build() >>> assert modified_dict_obj == {'a': (100, 200), 'b': (10, 3), 'c': [4, 5]} ``.apply_update`` example:: >>> from flax.cursor import cursor >>> from flax.training import train_state >>> import optax >>> def update_fn(path, value): ... '''Replace params with empty dictionary.''' ... if 'params' in path: ... return {} ... return value >>> state = train_state.TrainState.create( ... apply_fn=lambda x: x, ... params={'a': 1, 'b': 2}, ... tx=optax.adam(1e-3), ... ) >>> c = cursor(state) >>> state2 = c.apply_update(update_fn).build() >>> assert state2.params == {} >>> assert state.params == {'a': 1, 'b': 2} # make sure original params are unchanged If the underlying ``obj`` is a ``list`` or ``tuple``, iterating over the Cursor object to get the child Cursors is also possible:: >>> from flax.cursor import cursor >>> c = cursor(((1, 2), (3, 4))) >>> for child_c in c: ... child_c[1] *= -1 >>> assert c.build() == ((1, -2), (3, -4)) View the docstrings for each method to see more examples of their usage. Args: obj: the object you want to wrap the Cursor in Returns: A Cursor object wrapped around obj. |
22,703 | import os
from contextlib import contextmanager
from typing import Any, Generic, NoReturn, TypeVar, overload
class Config:
# See https://google.github.io/pytype/faq.html.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self):
self._values = {}
def _add_option(self, name, default):
if name in self._values:
raise RuntimeError(f'Config option {name} already defined')
self._values[name] = default
def _read(self, name):
try:
return self._values[name]
except KeyError:
raise LookupError(f'Unrecognized config option: {name}')
def update(self, name: str, value: Any, /) -> None:
...
def update(self, holder: 'FlagHolder[_T]', value: _T, /) -> None:
...
def update(self, name_or_holder, value, /):
"""Modify the value of a given flag.
Args:
name_or_holder: the name of the flag to modify or the corresponding
flag holder object.
value: new value to set.
"""
name = name_or_holder
if isinstance(name_or_holder, FlagHolder):
name = name_or_holder.name
if name not in self._values:
raise LookupError(f'Unrecognized config option: {name}')
self._values[name] = value
config = Config()
class FlagHolder(Generic[_T]):
def __init__(self, name, help):
self.name = name
self.__name__ = name[4:] if name.startswith('flax_') else name
self.__doc__ = f'Flag holder for `{name}`.\n\n{help}'
def __bool__(self) -> NoReturn:
raise TypeError(
"bool() not supported for instances of type '{0}' "
"(did you mean to use '{0}.value' instead?)".format(type(self).__name__)
)
def value(self) -> _T:
return config._read(self.name)
def static_bool_env(varname: str, default: bool) -> bool:
"""Read an environment variable and interpret it as a boolean.
This is deprecated. Please use bool_flag() unless your flag
will be used in a static method and does not require runtime updates.
True values are (case insensitive): 'y', 'yes', 't', 'true', 'on', and '1';
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Args:
varname: the name of the variable
default: the default boolean value
Returns:
boolean return value derived from defaults and environment.
Raises: ValueError if the environment variable is anything else.
"""
val = os.getenv(varname, str(default))
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError(
'invalid truth value {!r} for environment {!r}'.format(val, varname)
)
The provided code snippet includes necessary dependencies for implementing the `bool_flag` function. Write a Python function `def bool_flag(name: str, *, default: bool, help: str) -> FlagHolder[bool]` to solve the following problem:
Set up a boolean flag. Example:: enable_foo = bool_flag( name='flax_enable_foo', default=False, help='Enable foo.', ) Now the ``FLAX_ENABLE_FOO`` shell environment variable can be used to control the process-level value of the flag, in addition to using e.g. ``config.update("flax_enable_foo", True)`` directly. Args: name: converted to lowercase to define the name of the flag. It is converted to uppercase to define the corresponding shell environment variable. default: a default value for the flag. help: used to populate the docstring of the returned flag holder object. Returns: A flag holder object for accessing the value of the flag.
Here is the function:
def bool_flag(name: str, *, default: bool, help: str) -> FlagHolder[bool]:
"""Set up a boolean flag.
Example::
enable_foo = bool_flag(
name='flax_enable_foo',
default=False,
help='Enable foo.',
)
Now the ``FLAX_ENABLE_FOO`` shell environment variable can be used to
control the process-level value of the flag, in addition to using e.g.
``config.update("flax_enable_foo", True)`` directly.
Args:
name: converted to lowercase to define the name of the flag. It is
converted to uppercase to define the corresponding shell environment
variable.
default: a default value for the flag.
help: used to populate the docstring of the returned flag holder object.
Returns:
A flag holder object for accessing the value of the flag.
"""
name = name.lower()
config._add_option(name, static_bool_env(name.upper(), default))
fh = FlagHolder[bool](name, help)
setattr(Config, name, property(lambda _: fh.value, doc=help))
return fh | Set up a boolean flag. Example:: enable_foo = bool_flag( name='flax_enable_foo', default=False, help='Enable foo.', ) Now the ``FLAX_ENABLE_FOO`` shell environment variable can be used to control the process-level value of the flag, in addition to using e.g. ``config.update("flax_enable_foo", True)`` directly. Args: name: converted to lowercase to define the name of the flag. It is converted to uppercase to define the corresponding shell environment variable. default: a default value for the flag. help: used to populate the docstring of the returned flag holder object. Returns: A flag holder object for accessing the value of the flag. |
22,704 | import os
from contextlib import contextmanager
from typing import Any, Generic, NoReturn, TypeVar, overload
config = Config()
The provided code snippet includes necessary dependencies for implementing the `temp_flip_flag` function. Write a Python function `def temp_flip_flag(var_name: str, var_value: bool)` to solve the following problem:
Context manager to temporarily flip feature flags for test functions. Args: var_name: the config variable name (without the 'flax_' prefix) var_value: the boolean value to set var_name to temporarily
Here is the function:
def temp_flip_flag(var_name: str, var_value: bool):
"""Context manager to temporarily flip feature flags for test functions.
Args:
var_name: the config variable name (without the 'flax_' prefix)
var_value: the boolean value to set var_name to temporarily
"""
old_value = getattr(config, f'flax_{var_name}')
try:
config.update(f'flax_{var_name}', var_value)
yield
finally:
config.update(f'flax_{var_name}', old_value) | Context manager to temporarily flip feature flags for test functions. Args: var_name: the config variable name (without the 'flax_' prefix) var_value: the boolean value to set var_name to temporarily |
22,705 | import dataclasses
from typing import TypeVar
import jax
from typing_extensions import (
dataclass_transform, # pytype: disable=not-supported-yet
)
from . import serialization
_T = TypeVar('_T')
The provided code snippet includes necessary dependencies for implementing the `dataclass` function. Write a Python function `def dataclass(clz: _T, **kwargs) -> _T` to solve the following problem:
Create a class which can be passed to functional transformations. .. note:: Inherit from ``PyTreeNode`` instead to avoid type checking issues when using PyType. Jax transformations such as ``jax.jit`` and ``jax.grad`` require objects that are immutable and can be mapped over using the ``jax.tree_util`` methods. The ``dataclass`` decorator makes it easy to define custom classes that can be passed safely to Jax. For example:: >>> from flax import struct >>> import jax >>> from typing import Any, Callable >>> @struct.dataclass ... class Model: ... params: Any ... # use pytree_node=False to indicate an attribute should not be touched ... # by Jax transformations. ... apply_fn: Callable = struct.field(pytree_node=False) ... def __apply__(self, *args): ... return self.apply_fn(*args) >>> params = {} >>> params_b = {} >>> apply_fn = lambda v, x: x >>> model = Model(params, apply_fn) >>> # model.params = params_b # Model is immutable. This will raise an error. >>> model_b = model.replace(params=params_b) # Use the replace method instead. >>> # This class can now be used safely in Jax to compute gradients w.r.t. the >>> # parameters. >>> model = Model(params, apply_fn) >>> loss_fn = lambda model: 3. >>> model_grad = jax.grad(loss_fn)(model) Note that dataclasses have an auto-generated ``__init__`` where the arguments of the constructor and the attributes of the created instance match 1:1. This correspondence is what makes these objects valid containers that work with JAX transformations and more generally the ``jax.tree_util`` library. Sometimes a "smart constructor" is desired, for example because some of the attributes can be (optionally) derived from others. The way to do this with Flax dataclasses is to make a static or class method that provides the smart constructor. This way the simple constructor used by ``jax.tree_util`` is preserved. Consider the following example:: >>> @struct.dataclass ... class DirectionAndScaleKernel: ... direction: jax.Array ... scale: jax.Array ... @classmethod ... def create(cls, kernel): ... scale = jax.numpy.linalg.norm(kernel, axis=0, keepdims=True) ... direction = direction / scale ... return cls(direction, scale) Args: clz: the class that will be transformed by the decorator. Returns: The new class.
Here is the function:
def dataclass(clz: _T, **kwargs) -> _T:
"""Create a class which can be passed to functional transformations.
.. note::
Inherit from ``PyTreeNode`` instead to avoid type checking issues when
using PyType.
Jax transformations such as ``jax.jit`` and ``jax.grad`` require objects that are
immutable and can be mapped over using the ``jax.tree_util`` methods.
The ``dataclass`` decorator makes it easy to define custom classes that can be
passed safely to Jax. For example::
>>> from flax import struct
>>> import jax
>>> from typing import Any, Callable
>>> @struct.dataclass
... class Model:
... params: Any
... # use pytree_node=False to indicate an attribute should not be touched
... # by Jax transformations.
... apply_fn: Callable = struct.field(pytree_node=False)
... def __apply__(self, *args):
... return self.apply_fn(*args)
>>> params = {}
>>> params_b = {}
>>> apply_fn = lambda v, x: x
>>> model = Model(params, apply_fn)
>>> # model.params = params_b # Model is immutable. This will raise an error.
>>> model_b = model.replace(params=params_b) # Use the replace method instead.
>>> # This class can now be used safely in Jax to compute gradients w.r.t. the
>>> # parameters.
>>> model = Model(params, apply_fn)
>>> loss_fn = lambda model: 3.
>>> model_grad = jax.grad(loss_fn)(model)
Note that dataclasses have an auto-generated ``__init__`` where
the arguments of the constructor and the attributes of the created
instance match 1:1. This correspondence is what makes these objects
valid containers that work with JAX transformations and
more generally the ``jax.tree_util`` library.
Sometimes a "smart constructor" is desired, for example because
some of the attributes can be (optionally) derived from others.
The way to do this with Flax dataclasses is to make a static or
class method that provides the smart constructor.
This way the simple constructor used by ``jax.tree_util`` is
preserved. Consider the following example::
>>> @struct.dataclass
... class DirectionAndScaleKernel:
... direction: jax.Array
... scale: jax.Array
... @classmethod
... def create(cls, kernel):
... scale = jax.numpy.linalg.norm(kernel, axis=0, keepdims=True)
... direction = direction / scale
... return cls(direction, scale)
Args:
clz: the class that will be transformed by the decorator.
Returns:
The new class.
"""
# check if already a flax dataclass
if '_flax_dataclass' in clz.__dict__:
return clz
if 'frozen' not in kwargs.keys():
kwargs['frozen'] = True
data_clz = dataclasses.dataclass(**kwargs)(clz) # type: ignore
meta_fields = []
data_fields = []
for field_info in dataclasses.fields(data_clz):
is_pytree_node = field_info.metadata.get('pytree_node', True)
if is_pytree_node:
data_fields.append(field_info.name)
else:
meta_fields.append(field_info.name)
def replace(self, **updates):
""" "Returns a new object replacing the specified fields with new values."""
return dataclasses.replace(self, **updates)
data_clz.replace = replace
def iterate_clz(x):
meta = tuple(getattr(x, name) for name in meta_fields)
data = tuple(getattr(x, name) for name in data_fields)
return data, meta
def iterate_clz_with_keys(x):
meta = tuple(getattr(x, name) for name in meta_fields)
data = tuple(
(jax.tree_util.GetAttrKey(name), getattr(x, name)) for name in data_fields
)
return data, meta
def clz_from_iterable(meta, data):
meta_args = tuple(zip(meta_fields, meta))
data_args = tuple(zip(data_fields, data))
kwargs = dict(meta_args + data_args)
return data_clz(**kwargs)
jax.tree_util.register_pytree_with_keys(
data_clz, iterate_clz_with_keys, clz_from_iterable, iterate_clz,
)
def to_state_dict(x):
state_dict = {
name: serialization.to_state_dict(getattr(x, name))
for name in data_fields
}
return state_dict
def from_state_dict(x, state):
"""Restore the state of a data class."""
state = state.copy() # copy the state so we can pop the restored fields.
updates = {}
for name in data_fields:
if name not in state:
raise ValueError(
f'Missing field {name} in state dict while restoring'
f' an instance of {clz.__name__},'
f' at path {serialization.current_path()}'
)
value = getattr(x, name)
value_state = state.pop(name)
updates[name] = serialization.from_state_dict(
value, value_state, name=name
)
if state:
names = ','.join(state.keys())
raise ValueError(
f'Unknown field(s) "{names}" in state dict while'
f' restoring an instance of {clz.__name__}'
f' at path {serialization.current_path()}'
)
return x.replace(**updates)
serialization.register_serialization_state(
data_clz, to_state_dict, from_state_dict
)
# add a _flax_dataclass flag to distinguish from regular dataclasses
data_clz._flax_dataclass = True # type: ignore[attr-defined]
return data_clz # type: ignore | Create a class which can be passed to functional transformations. .. note:: Inherit from ``PyTreeNode`` instead to avoid type checking issues when using PyType. Jax transformations such as ``jax.jit`` and ``jax.grad`` require objects that are immutable and can be mapped over using the ``jax.tree_util`` methods. The ``dataclass`` decorator makes it easy to define custom classes that can be passed safely to Jax. For example:: >>> from flax import struct >>> import jax >>> from typing import Any, Callable >>> @struct.dataclass ... class Model: ... params: Any ... # use pytree_node=False to indicate an attribute should not be touched ... # by Jax transformations. ... apply_fn: Callable = struct.field(pytree_node=False) ... def __apply__(self, *args): ... return self.apply_fn(*args) >>> params = {} >>> params_b = {} >>> apply_fn = lambda v, x: x >>> model = Model(params, apply_fn) >>> # model.params = params_b # Model is immutable. This will raise an error. >>> model_b = model.replace(params=params_b) # Use the replace method instead. >>> # This class can now be used safely in Jax to compute gradients w.r.t. the >>> # parameters. >>> model = Model(params, apply_fn) >>> loss_fn = lambda model: 3. >>> model_grad = jax.grad(loss_fn)(model) Note that dataclasses have an auto-generated ``__init__`` where the arguments of the constructor and the attributes of the created instance match 1:1. This correspondence is what makes these objects valid containers that work with JAX transformations and more generally the ``jax.tree_util`` library. Sometimes a "smart constructor" is desired, for example because some of the attributes can be (optionally) derived from others. The way to do this with Flax dataclasses is to make a static or class method that provides the smart constructor. This way the simple constructor used by ``jax.tree_util`` is preserved. Consider the following example:: >>> @struct.dataclass ... class DirectionAndScaleKernel: ... direction: jax.Array ... scale: jax.Array ... @classmethod ... def create(cls, kernel): ... scale = jax.numpy.linalg.norm(kernel, axis=0, keepdims=True) ... direction = direction / scale ... return cls(direction, scale) Args: clz: the class that will be transformed by the decorator. Returns: The new class. |
22,706 | import contextlib
import glob as glob_module
import importlib
import os
import shutil
from enum import Enum
from absl import logging
from . import errors
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
The provided code snippet includes necessary dependencies for implementing the `override_mode` function. Write a Python function `def override_mode(override: BackendMode)` to solve the following problem:
Returns a context manager that changes backend IO mode. Args: override: BackendMode enum value to set IO mode inside context.
Here is the function:
def override_mode(override: BackendMode):
# pylint: disable=g-doc-return-or-yield
"""Returns a context manager that changes backend IO mode.
Args:
override: BackendMode enum value to set IO mode inside context.
"""
# pylint: enable=g-doc-return-or-yield
global io_mode
io_mode_prev = io_mode
io_mode = override
try:
yield
finally:
io_mode = io_mode_prev | Returns a context manager that changes backend IO mode. Args: override: BackendMode enum value to set IO mode inside context. |
22,707 | import contextlib
import glob as glob_module
import importlib
import os
import shutil
from enum import Enum
from absl import logging
from . import errors
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
The provided code snippet includes necessary dependencies for implementing the `set_mode` function. Write a Python function `def set_mode(override: BackendMode)` to solve the following problem:
Sets global io mode. Args: override: BackendMode enum value to set for IO mode.
Here is the function:
def set_mode(override: BackendMode):
"""Sets global io mode.
Args:
override: BackendMode enum value to set for IO mode.
"""
global io_mode
io_mode = override | Sets global io mode. Args: override: BackendMode enum value to set for IO mode. |
22,708 | import contextlib
import glob as glob_module
import importlib
import os
import shutil
from enum import Enum
from absl import logging
from . import errors
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
gfile = None
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
def exists(path):
if io_mode == BackendMode.DEFAULT:
return os.path.exists(path)
elif io_mode == BackendMode.TF:
return gfile.exists(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def copy(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.exists(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
shutil.copy(src, dst)
return
elif io_mode == BackendMode.TF:
return gfile.copy(src, dst, overwrite=overwrite)
else:
raise ValueError('Unknown IO Backend Mode.') | null |
22,709 | import contextlib
import glob as glob_module
import importlib
import os
import shutil
from enum import Enum
from absl import logging
from . import errors
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
gfile = None
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
def glob(pattern):
if io_mode == BackendMode.DEFAULT:
return [
path.rstrip('/') for path in glob_module.glob(pattern, recursive=False)
]
elif io_mode == BackendMode.TF:
return gfile.glob(pattern)
else:
raise ValueError('Unknown IO Backend Mode.') | null |
22,710 | from __future__ import annotations
import functools
from typing import Any, Callable, Optional, overload
import jax
import jax.numpy as jnp
from jax import lax, random
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import initializers
from flax.experimental.nnx.nnx.nn.dtypes import promote_dtype
from flax.experimental.nnx.nnx.nn.linear import (
LinearGeneral,
default_kernel_init,
)
from flax.experimental.nnx.nnx.nn.normalization import LayerNorm
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
)
def dot_product_attention_weights(
query: Array,
key: Array,
bias: Optional[Array] = None,
mask: Optional[Array] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[Array] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Optional[Dtype] = None,
precision: PrecisionLike = None,
module: Optional[Module] = None,
):
"""Computes dot-product attention weights given query and key.
Used by :func:`dot_product_attention`, which is what you'll most likely use.
But if you want access to the attention weights for introspection, then
you can directly call this function and call einsum yourself.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
num_heads, qk_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`. This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
mask: mask for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`. This can be used for
incorporating causal masks. Attention weights are masked out if their
corresponding mask value is `False`.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: infer from inputs and params)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
module: the Module that will sow the attention weights into the
``nnx.Intermediate`` collection. If ``module`` is None, the attention
weights will not be sowed.
Returns:
Output of shape `[batch..., num_heads, q_length, kv_length]`.
"""
query, key = promote_dtype(query, key, dtype=dtype)
dtype = query.dtype
assert query.ndim == key.ndim, 'q, k must have same rank.'
assert query.shape[:-3] == key.shape[:-3], 'q, k batch dims must match.'
assert query.shape[-2] == key.shape[-2], 'q, k num_heads must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# calculate attention matrix
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum(
'...qhd,...khd->...hqk', query, key, precision=precision
)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias
# apply attention mask
if mask is not None:
big_neg = jnp.finfo(dtype).min
attn_weights = jnp.where(mask, attn_weights, big_neg)
# normalize the attention weights
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
if module:
module.sow(nnx.Intermediate, 'attention_weights', attn_weights)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape) # type: ignore
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape) # type: ignore
multiplier = keep.astype(dtype) / jnp.asarray(keep_prob, dtype=dtype)
attn_weights = attn_weights * multiplier
return attn_weights
class Module(reprlib.Representable, metaclass=ModuleMeta):
if tp.TYPE_CHECKING:
_module__state: ModuleState
if not tp.TYPE_CHECKING:
def __setattr__(self, name: str, value: Any) -> None:
self._setattr(name, value)
def _setattr(self, name: str, value: tp.Any) -> None:
if not self._module__state.trace_state.is_valid():
raise errors.TraceContextError(
'Cannot mutate Module from different trace level'
)
if isinstance(value, (jax.Array, np.ndarray, State)):
raise ValueError(
f"Trying to assign a '{type(value).__name__}' to the Module"
f" attribute '{name}'. This is not supported. Non-hashable "
'objects are not valid static state in JAX. Please wrap '
'the value in a Variable type instead.'
)
object.__setattr__(self, name, value)
def __deepcopy__(self: M, memo=None) -> M:
state, graphdef = self.split()
graphdef = deepcopy(graphdef)
state = deepcopy(state)
return graphdef.merge(state)
def __hash__(self) -> int:
return hash(self._module__state.id)
def __nnx_repr__(self):
global SEEN_MODULES_REPR
if SEEN_MODULES_REPR is None:
SEEN_MODULES_REPR = set()
clear_seen = True
else:
clear_seen = False
if self._module__state.id in SEEN_MODULES_REPR:
yield reprlib.Object(type=type(self), empty_repr='...')
return
yield reprlib.Object(type=type(self))
SEEN_MODULES_REPR.add(self._module__state.id)
try:
for name, value in vars(self).items():
if isinstance(value, Module) or (
not isinstance(value, Variable) and not name.startswith('_')
):
yield reprlib.Attr(name, repr(value))
finally:
if clear_seen:
SEEN_MODULES_REPR = None
def init(cls: type[M], *args, **kwargs) -> tuple[State, GraphDef[M]]:
return cls(*args, **kwargs).split()
def create_abstract(cls: type[M]) -> type[M]:
def lift_rngs(kwargs: dict[str, tp.Any]):
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], tp.Mapping):
kwargs['rngs'] = Rngs(rngs)
return kwargs
def _create_abstract(accessor: DelayedAccessor, *args, **kwargs):
constructor = accessor(cls)
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], Rngs):
kwargs['rngs'] = rngs.fork()
state, graphdef = jax.eval_shape(
lambda: constructor(*args, **lift_rngs(kwargs)).split()
)
return graphdef.merge(state)
return CallableProxy(_create_abstract) # type: ignore
def partial_init(cls: type[M], state: State, *states: State) -> type[M]:
"""Creates a constuctor that initializes the Module with the given state.
``partial_init`` takes one or more States and returns a constructor that uses
``jax.jit`` to initialize the Module and update its state with the given
States. Its semantically equivalent to::
module = MyModule(*args, **kwargs)
module.update(state, *states)
However, thanks to dead code elimination the resulting constructor will only
initialize the subset of ``Variable``'s that were part of the given state(s).
Example::
>>> import jax.numpy as jnp
>>> import jax
>>> from flax.experimental import nnx
...
>>> bias = jax.random.normal(jax.random.key(0), (4,))
>>> state = nnx.State({'bias': bias}) # in reality load it from a checkpoint
>>> linear = nnx.Linear.partial_init(state)(2, 4, rngs=nnx.Rngs(1))
>>> y = linear(jnp.ones((1, 2)))
...
>>> assert jnp.allclose(linear.bias, bias)
>>> assert y.shape == (1, 4)
Args:
state: The State to initialize the Module with.
*states: Additional States to initialize the Module with.
Returns:
A constructor that initializes the Module with the given States.
"""
states = (state, *states)
def lift_rngs(kwargs: dict[str, tp.Any]):
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], tp.Mapping):
kwargs['rngs'] = Rngs(rngs)
return kwargs
def _partial_init(accessor: DelayedAccessor, *args, **kwargs):
constructor: tp.Callable[[], M] = accessor(cls)
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], Rngs):
kwargs['rngs'] = rngs.fork()
def _partial_init_constructor():
module = constructor(*args, **lift_rngs(kwargs))
module.update(*states)
return module.split()
graphdef: GraphDef[M]
state: State
state, graphdef = jax.jit(_partial_init_constructor)()
module = graphdef.merge(state)
return module
return CallableProxy(_partial_init) # type: ignore
def clone(self: M) -> M:
return merge(self.split())
def split(self: M) -> tuple[State, GraphDef[M]]:
...
def split(self: M, first: filterlib.Filter, /) -> tuple[State, GraphDef[M]]:
...
def split(
self: M,
first: filterlib.Filter,
second: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tuple[State, tpe.Unpack[tuple[State, ...]], GraphDef[M]]:
...
def split(
self: M, *filters: filterlib.Filter
) -> tuple[State, tpe.Unpack[tuple[State, ...]], GraphDef[M]]:
state, graphdef, _ = graph_utils.graph_flatten(self)
if len(filters) == 0:
states = (state,)
elif len(filters) == 1:
states = (state.split(filters[0]),)
else:
states = state.split(filters[0], filters[1], *filters[2:])
return *states, graphdef
def get_state(self) -> State:
state, _ = self.split()
return state
def get_graphdef(self: M) -> GraphDef[M]:
_, graphdef = self.split()
return graphdef
def extract(self, first: filterlib.Filter, /) -> State:
...
def extract(
self,
first: filterlib.Filter,
second: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tuple[State, ...]:
...
def extract(
self,
first: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tp.Union[State, tuple[State, ...]]:
state = self.get_state()
if len(filters) == 0:
states = state.extract(first)
else:
states = state.extract(first, filters[0], *filters[1:])
return states
def pop(
self,
filter: filterlib.Filter,
/,
) -> State:
...
def pop(
self,
filter: filterlib.Filter,
filter2: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tuple[State, ...]:
...
def pop(
self, *filters: filterlib.Filter
) -> tp.Union[State, tuple[State, ...]]:
if len(filters) == 0:
raise ValueError('Expected at least one filter')
states = graph_utils.graph_pop(self, filters)
if len(states) == 1:
return states[0]
else:
return states
def apply(self: M) -> ApplyCaller[M]:
def _apply(accessor: DelayedAccessor, *args, **kwargs) -> tuple[tp.Any, M]:
module = self.clone()
fn = accessor(module)
out = fn(*args, **kwargs)
return out, module
return CallableProxy(_apply) # type: ignore
def update(self: M, update: Updates[M], /, *updates: Updates[M]) -> None:
updates = (update, *updates)
def _states_and_moduledef(
updates,
) -> tuple[list[State], tp.Optional[Module]]:
leaves = jax.tree_util.tree_leaves(
updates, is_leaf=lambda x: isinstance(x, (GraphDef, State))
)
states: list[State] = []
module: tp.Optional[Module] = None
for leaf in leaves:
if isinstance(leaf, (Module, GraphDef)):
if module is not None:
raise ValueError(
'Expected only one GraphDef or Module in the updates'
)
if isinstance(leaf, Module):
module = leaf
states.append(leaf.get_state())
else:
module = leaf.make_empty()
elif isinstance(leaf, State):
states.append(leaf)
else:
raise ValueError(
'Expected a GraphDef, Module or State, got'
f' {type(leaf).__name__}'
)
return states, module
states, module_update = _states_and_moduledef(updates)
if module_update is not None:
graph_utils.graph_update_static(self, module_update)
if states:
graph_utils.graph_update_dynamic(self, states)
def sow(
self,
variable_type: tp.Type[variableslib.Variable[tp.Any]],
name: str,
value: A,
reduce_fn: tp.Callable[[B, A], B] = tuple_reduce,
init_fn: tp.Callable[[], B] = tuple_init, # type: ignore
) -> None:
if hasattr(self, name):
variable = getattr(self, name)
if not isinstance(variable, variableslib.Variable):
raise ValueError(
f"Expected '{name}' to be a Variable, got {type(variable).__name__}"
)
elif type(variable) != variable_type:
raise ValueError(
f"Expected '{name}' to be of type '{variable_type.__name__}', "
f"got '{type(variable).__name__}'"
)
variable.raw_value = reduce_fn(variable.raw_value, value)
else:
reduced_value = reduce_fn(init_fn(), value)
setattr(self, name, variable_type(reduced_value))
def modules(self) -> tp.Iterator[tuple[Path, Module]]:
for path, value in graph_utils.iter_nodes(self):
if isinstance(value, Module):
yield path, value
def set_attributes(
self,
*filters: filterlib.Filter,
raise_if_not_found: bool = True,
**attributes: tp.Any,
) -> None:
"""Sets the attributes of nested Modules including the current Module.
If the attribute is not found in the Module, it is ignored.
Example::
>>> from flax.experimental import nnx
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... self.dropout = nnx.Dropout(0.5, deterministic=False)
... self.batch_norm = nnx.BatchNorm(10, use_running_average=False, rngs=rngs)
...
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(False, False)
>>> block.set_attributes(deterministic=True, use_running_average=True)
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, True)
``Filter``'s can be used to set the attributes of specific Modules::
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.set_attributes(nnx.Dropout, deterministic=True, use_running_average=True)
>>> # Only the dropout will be modified
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, False)
Args:
*filters: Filters to select the Modules to set the attributes of.
raise_if_not_found: If True (default), raises a ValueError if at least one attribute
instance is not found in one of the selected Modules.
**attributes: The attributes to set.
"""
remaining_attributes = set(attributes.keys())
if not filters:
filters = (True,)
predicates = tuple(map(filterlib.to_predicate, filters))
for path, module in self.modules():
for predicate in predicates:
if predicate(path, module):
for name, value in attributes.items():
if hasattr(module, name):
if name in remaining_attributes:
remaining_attributes.remove(name)
setattr(module, name, value)
break
if remaining_attributes and raise_if_not_found:
raise ValueError(
f'Could not find at least one instance of the following attributes: {remaining_attributes}'
)
def __init_subclass__(cls, experimental_pytree: bool = False) -> None:
super().__init_subclass__()
graph_utils.register_mutable_node_type(
type=cls,
flatten=_module_graph_flatten,
set_key=_module_graph_set_key,
pop_key=_module_graph_pop_key,
create_empty=_module_graph_create_empty,
clear=_module_graph_clear,
)
if experimental_pytree:
jtu.register_pytree_with_keys(
cls,
partial(_module_flatten, with_keys=True),
_module_unflatten,
flatten_func=partial(_module_flatten, with_keys=False),
)
def promote_dtype(
*args: Unpack[T], dtype=None, inexact=True
) -> tuple[Unpack[T]]:
""" "Promotes input arguments to a specified or inferred dtype.
All args are cast to the same dtype. See ``canonicalize_dtype`` for how
this dtype is determined.
The behavior of promote_dtype is mostly a convinience wrapper around
``jax.numpy.promote_types``. The differences being that it automatically casts
all input to the inferred dtypes, allows inference to be overridden by a
forced dtype, and has an optional check to garantuee the resulting dtype is
inexact.
Args:
*args: JAX array compatible values. None values
are returned as is.
dtype: Optional dtype override. If specified the arguments are cast to
the specified dtype instead and dtype inference is disabled.
inexact: When True, the output dtype must be a subdtype
of `jnp.inexact`. Inexact dtypes are real or complex floating points. This
is useful when you want to apply operations that don't work directly on
integers like taking a mean for example.
Returns:
The arguments cast to arrays of the same dtype.
"""
dtype = canonicalize_dtype(*args, dtype=dtype, inexact=inexact)
return tuple(jnp.asarray(x, dtype) if x is not None else None for x in args)
Array = Union[jax.Array, Any]
Dtype = Union[jax.typing.DTypeLike, Any]
PrecisionLike = Union[
None,
str,
jax.lax.Precision,
Tuple[str, str],
Tuple[jax.lax.Precision, jax.lax.Precision],
]
The provided code snippet includes necessary dependencies for implementing the `dot_product_attention` function. Write a Python function `def dot_product_attention( query: Array, key: Array, value: Array, bias: Optional[Array] = None, mask: Optional[Array] = None, broadcast_dropout: bool = True, dropout_rng: Optional[Array] = None, dropout_rate: float = 0.0, deterministic: bool = False, dtype: Optional[Dtype] = None, precision: PrecisionLike = None, module: Optional[Module] = None, )` to solve the following problem:
Computes dot-product attention given query, key, and value. This is the core function for applying attention based on https://arxiv.org/abs/1706.03762. It calculates the attention weights given query and key and combines the values using the attention weights. .. note:: ``query``, ``key``, ``value`` needn't have any batch dimensions. Args: query: queries for calculating attention with shape of ``[batch..., q_length, num_heads, qk_depth_per_head]``. key: keys for calculating attention with shape of ``[batch..., kv_length, num_heads, qk_depth_per_head]``. value: values to be used in attention with shape of ``[batch..., kv_length, num_heads, v_depth_per_head]``. bias: bias for the attention weights. This should be broadcastable to the shape `[batch..., num_heads, q_length, kv_length]`. This can be used for incorporating causal masks, padding masks, proximity bias, etc. mask: mask for the attention weights. This should be broadcastable to the shape `[batch..., num_heads, q_length, kv_length]`. This can be used for incorporating causal masks. Attention weights are masked out if their corresponding mask value is `False`. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) dtype: the dtype of the computation (default: infer from inputs) precision: numerical precision of the computation see `jax.lax.Precision` for details. module: the Module that will sow the attention weights into the ``nnx.Intermediate`` collection. If ``module`` is None, the attention weights will not be sowed. Returns: Output of shape `[batch..., q_length, num_heads, v_depth_per_head]`.
Here is the function:
def dot_product_attention(
query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
mask: Optional[Array] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[Array] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Optional[Dtype] = None,
precision: PrecisionLike = None,
module: Optional[Module] = None,
):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights.
.. note::
``query``, ``key``, ``value`` needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of ``[batch..., q_length,
num_heads, qk_depth_per_head]``.
key: keys for calculating attention with shape of ``[batch..., kv_length,
num_heads, qk_depth_per_head]``.
value: values to be used in attention with shape of ``[batch..., kv_length,
num_heads, v_depth_per_head]``.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`. This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
mask: mask for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]`. This can be used for
incorporating causal masks. Attention weights are masked out if their
corresponding mask value is `False`.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: infer from inputs)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
module: the Module that will sow the attention weights into the
``nnx.Intermediate`` collection. If ``module`` is None, the attention
weights will not be sowed.
Returns:
Output of shape `[batch..., q_length, num_heads, v_depth_per_head]`.
"""
query, key, value = promote_dtype(query, key, value, dtype=dtype)
dtype = query.dtype
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert (
query.shape[:-3] == key.shape[:-3] == value.shape[:-3]
), 'q, k, v batch dims must match.'
assert (
query.shape[-2] == key.shape[-2] == value.shape[-2]
), 'q, k, v num_heads must match.'
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
# compute attention weights
attn_weights = dot_product_attention_weights(
query,
key,
bias,
mask,
broadcast_dropout,
dropout_rng,
dropout_rate,
deterministic,
dtype,
precision,
module,
)
# return weighted sum over values for each query position
return jnp.einsum(
'...hqk,...khd->...qhd', attn_weights, value, precision=precision
) | Computes dot-product attention given query, key, and value. This is the core function for applying attention based on https://arxiv.org/abs/1706.03762. It calculates the attention weights given query and key and combines the values using the attention weights. .. note:: ``query``, ``key``, ``value`` needn't have any batch dimensions. Args: query: queries for calculating attention with shape of ``[batch..., q_length, num_heads, qk_depth_per_head]``. key: keys for calculating attention with shape of ``[batch..., kv_length, num_heads, qk_depth_per_head]``. value: values to be used in attention with shape of ``[batch..., kv_length, num_heads, v_depth_per_head]``. bias: bias for the attention weights. This should be broadcastable to the shape `[batch..., num_heads, q_length, kv_length]`. This can be used for incorporating causal masks, padding masks, proximity bias, etc. mask: mask for the attention weights. This should be broadcastable to the shape `[batch..., num_heads, q_length, kv_length]`. This can be used for incorporating causal masks. Attention weights are masked out if their corresponding mask value is `False`. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) dtype: the dtype of the computation (default: infer from inputs) precision: numerical precision of the computation see `jax.lax.Precision` for details. module: the Module that will sow the attention weights into the ``nnx.Intermediate`` collection. If ``module`` is None, the attention weights will not be sowed. Returns: Output of shape `[batch..., q_length, num_heads, v_depth_per_head]`. |
22,711 | from __future__ import annotations
import functools
from typing import Any, Callable, Optional, overload
import jax
import jax.numpy as jnp
from jax import lax, random
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import initializers
from flax.experimental.nnx.nnx.nn.dtypes import promote_dtype
from flax.experimental.nnx.nnx.nn.linear import (
LinearGeneral,
default_kernel_init,
)
from flax.experimental.nnx.nnx.nn.normalization import LayerNorm
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
)
def make_attention_mask(
query_input: Array,
key_input: Array,
pairwise_fn: Callable[..., Any] = jnp.multiply,
extra_batch_dims: int = 0,
dtype: Dtype = jnp.float32,
):
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the
attention weights will be `[batch..., heads, len_q, len_kv]` and this
function will produce `[batch..., 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
extra_batch_dims: number of extra batch dims to add singleton axes for, none
by default
dtype: mask return dtype
Returns:
A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention.
"""
mask = pairwise_fn(
jnp.expand_dims(query_input, axis=-1), jnp.expand_dims(key_input, axis=-2)
)
mask = jnp.expand_dims(mask, axis=-3)
mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))
return mask.astype(dtype)
Array = Union[jax.Array, Any]
Dtype = Union[jax.typing.DTypeLike, Any]
The provided code snippet includes necessary dependencies for implementing the `make_causal_mask` function. Write a Python function `def make_causal_mask( x: Array, extra_batch_dims: int = 0, dtype: Dtype = jnp.float32 ) -> Array` to solve the following problem:
Make a causal mask for self-attention. In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights will be `[batch..., heads, len, len]` and this function will produce a causal mask of shape `[batch..., 1, len, len]`. Args: x: input array of shape `[batch..., len]` extra_batch_dims: number of batch dims to add singleton axes for, none by default dtype: mask return dtype Returns: A `[batch..., 1, len, len]` shaped causal mask for 1d attention.
Here is the function:
def make_causal_mask(
x: Array, extra_batch_dims: int = 0, dtype: Dtype = jnp.float32
) -> Array:
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights
will be `[batch..., heads, len, len]` and this function will produce a
causal mask of shape `[batch..., 1, len, len]`.
Args:
x: input array of shape `[batch..., len]`
extra_batch_dims: number of batch dims to add singleton axes for, none by
default
dtype: mask return dtype
Returns:
A `[batch..., 1, len, len]` shaped causal mask for 1d attention.
"""
idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)
return make_attention_mask(
idxs,
idxs,
jnp.greater_equal,
extra_batch_dims=extra_batch_dims,
dtype=dtype,
) | Make a causal mask for self-attention. In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights will be `[batch..., heads, len, len]` and this function will produce a causal mask of shape `[batch..., 1, len, len]`. Args: x: input array of shape `[batch..., len]` extra_batch_dims: number of batch dims to add singleton axes for, none by default dtype: mask return dtype Returns: A `[batch..., 1, len, len]` shaped causal mask for 1d attention. |
22,712 | from __future__ import annotations
import functools
from typing import Any, Callable, Optional, overload
import jax
import jax.numpy as jnp
from jax import lax, random
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import initializers
from flax.experimental.nnx.nnx.nn.dtypes import promote_dtype
from flax.experimental.nnx.nnx.nn.linear import (
LinearGeneral,
default_kernel_init,
)
from flax.experimental.nnx.nnx.nn.normalization import LayerNorm
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
)
Array = Union[jax.Array, Any]
Dtype = Union[jax.typing.DTypeLike, Any]
The provided code snippet includes necessary dependencies for implementing the `combine_masks` function. Write a Python function `def combine_masks(*masks: Optional[Array], dtype: Dtype = jnp.float32) -> Array` to solve the following problem:
Combine attention masks. Args: *masks: set of attention mask arguments to combine, some can be None. dtype: dtype for the returned mask. Returns: Combined mask, reduced by logical and, returns None if no masks given.
Here is the function:
def combine_masks(*masks: Optional[Array], dtype: Dtype = jnp.float32) -> Array:
"""Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: dtype for the returned mask.
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
"""
masks_list = [m for m in masks if m is not None]
if not masks_list:
return None
assert all(
map(lambda x: x.ndim == masks_list[0].ndim, masks_list)
), f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks_list))}'
mask, *other_masks = masks_list
for other_mask in other_masks:
mask = jnp.logical_and(mask, other_mask)
return mask.astype(dtype) | Combine attention masks. Args: *masks: set of attention mask arguments to combine, some can be None. dtype: dtype for the returned mask. Returns: Combined mask, reduced by logical and, returns None if no masks given. |
22,713 | import typing as tp
import jax
import jax.numpy as jnp
from jax import lax
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import dtypes, initializers
from flax.typing import (
Array,
Dtype,
Initializer,
Axes,
)
def _canonicalize_axes(rank: int, axes: Axes) -> tp.Tuple[int, ...]:
"""Returns a tuple of deduplicated, sorted, and positive axes."""
if not isinstance(axes, tp.Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
def _abs_sq(x):
"""Computes the elementwise square of the absolute value |x|^2."""
if jnp.iscomplexobj(x):
return lax.square(lax.real(x)) + lax.square(lax.imag(x))
else:
return lax.square(x)
Array = Union[jax.Array, Any]
Dtype = Union[jax.typing.DTypeLike, Any]
Axes = Union[int, Sequence[int]]
The provided code snippet includes necessary dependencies for implementing the `_compute_stats` function. Write a Python function `def _compute_stats( x: Array, axes: Axes, dtype: tp.Optional[Dtype], axis_name: tp.Optional[str] = None, axis_index_groups: tp.Any = None, use_mean: bool = True, use_fast_variance: bool = True, mask: tp.Optional[Array] = None, )` to solve the following problem:
Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for stability in half precision training. - If `use_fast_variance` is `True`, mean and variance are computed using Var = E[|x|^2] - |E[x]|^2, instead of Var = E[|x - E[x]|^2]), in a single XLA fusion. - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency. Arguments: x: Input array. axes: The axes in ``x`` to compute mean and variance statistics for. dtype: Optional dtype specifying the minimal precision. Statistics are always at least float32 for stability (default: dtype of x). axis_name: Optional name for the pmapped axis to compute mean over. Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. axis_index_groups: Optional axis indices. use_mean: If true, calculate the mean from the input and use it when computing the variance. If false, set the mean to zero and compute the variance without subtracting the mean. use_fast_variance: If true, use a faster, but less numerically stable, calculation for the variance. mask: Binary array of shape broadcastable to `inputs` tensor, indicating the positions for which the mean and variance should be computed. Returns: A pair ``(mean, var)``.
Here is the function:
def _compute_stats(
x: Array,
axes: Axes,
dtype: tp.Optional[Dtype],
axis_name: tp.Optional[str] = None,
axis_index_groups: tp.Any = None,
use_mean: bool = True,
use_fast_variance: bool = True,
mask: tp.Optional[Array] = None,
):
"""Computes mean and variance statistics.
This implementation takes care of a few important details:
- Computes in float32 precision for stability in half precision training.
- If `use_fast_variance` is `True`, mean and variance are computed using
Var = E[|x|^2] - |E[x]|^2, instead of Var = E[|x - E[x]|^2]), in a single
XLA fusion.
- Clips negative variances to zero which can happen due to
roundoff errors. This avoids downstream NaNs.
- Supports averaging across a parallel axis and subgroups of a parallel axis
with a single `lax.pmean` call to avoid latency.
Arguments:
x: Input array.
axes: The axes in ``x`` to compute mean and variance statistics for.
dtype: Optional dtype specifying the minimal precision. Statistics are
always at least float32 for stability (default: dtype of x).
axis_name: Optional name for the pmapped axis to compute mean over. Note,
this is only used for pmap and shard map. For SPMD jit, you do not need to
manually synchronize. Just make sure that the axes are correctly annotated
and XLA:SPMD will insert the necessary collectives.
axis_index_groups: Optional axis indices.
use_mean: If true, calculate the mean from the input and use it when
computing the variance. If false, set the mean to zero and compute the
variance without subtracting the mean.
use_fast_variance: If true, use a faster, but less numerically stable,
calculation for the variance.
mask: Binary array of shape broadcastable to `inputs` tensor, indicating
the positions for which the mean and variance should be computed.
Returns:
A pair ``(mean, var)``.
"""
if dtype is None:
dtype = jnp.result_type(x)
# promote x to at least float32, this avoids half precision computation
# but preserves double or complex floating points
dtype = jnp.promote_types(dtype, jnp.float32)
x = jnp.asarray(x, dtype)
axes = _canonicalize_axes(x.ndim, axes)
def maybe_distributed_mean(*xs, mask=None):
mus = tuple(x.mean(axes, where=mask) for x in xs)
if axis_name is None:
return mus if len(xs) > 1 else mus[0]
else:
# In the distributed case we stack multiple arrays to speed comms.
if len(xs) > 1:
reduced_mus = lax.pmean(
jnp.stack(mus, axis=0),
axis_name,
axis_index_groups=axis_index_groups,
)
return tuple(reduced_mus[i] for i in range(len(xs)))
else:
return lax.pmean(mus[0], axis_name, axis_index_groups=axis_index_groups)
if use_mean:
if use_fast_variance:
mu, mu2 = maybe_distributed_mean(x, _abs_sq(x), mask=mask)
# mean2 - _abs_sq(mean) is not guaranteed to be non-negative due
# to floating point round-off errors.
var = jnp.maximum(0.0, mu2 - _abs_sq(mu))
else:
mu = maybe_distributed_mean(x, mask=mask)
var = maybe_distributed_mean(
_abs_sq(x - jnp.expand_dims(mu, axes)), mask=mask
)
else:
var = maybe_distributed_mean(_abs_sq(x), mask=mask)
mu = jnp.zeros_like(var)
return mu, var | Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for stability in half precision training. - If `use_fast_variance` is `True`, mean and variance are computed using Var = E[|x|^2] - |E[x]|^2, instead of Var = E[|x - E[x]|^2]), in a single XLA fusion. - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency. Arguments: x: Input array. axes: The axes in ``x`` to compute mean and variance statistics for. dtype: Optional dtype specifying the minimal precision. Statistics are always at least float32 for stability (default: dtype of x). axis_name: Optional name for the pmapped axis to compute mean over. Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. axis_index_groups: Optional axis indices. use_mean: If true, calculate the mean from the input and use it when computing the variance. If false, set the mean to zero and compute the variance without subtracting the mean. use_fast_variance: If true, use a faster, but less numerically stable, calculation for the variance. mask: Binary array of shape broadcastable to `inputs` tensor, indicating the positions for which the mean and variance should be computed. Returns: A pair ``(mean, var)``. |
22,714 | import typing as tp
import jax
import jax.numpy as jnp
from jax import lax
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import dtypes, initializers
from flax.typing import (
Array,
Dtype,
Initializer,
Axes,
)
def _canonicalize_axes(rank: int, axes: Axes) -> tp.Tuple[int, ...]:
"""Returns a tuple of deduplicated, sorted, and positive axes."""
if not isinstance(axes, tp.Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
Array = Union[jax.Array, Any]
Dtype = Union[jax.typing.DTypeLike, Any]
Axes = Union[int, Sequence[int]]
The provided code snippet includes necessary dependencies for implementing the `_normalize` function. Write a Python function `def _normalize( x: Array, mean: Array, var: Array, scale: tp.Optional[Array], bias: tp.Optional[Array], reduction_axes: Axes, feature_axes: Axes, dtype: tp.Optional[Dtype], epsilon: float, )` to solve the following problem:
"Normalizes the input of a normalization layer and optionally applies a learned scale and bias. Arguments: x: The input. mean: Mean to use for normalization. var: Variance to use for normalization. reduction_axes: The axes in ``x`` to reduce. feature_axes: Axes containing features. A separate bias and scale is learned for each specified feature. dtype: The dtype of the result (default: infer from input and params). epsilon: Normalization epsilon. Returns: The normalized input.
Here is the function:
def _normalize(
x: Array,
mean: Array,
var: Array,
scale: tp.Optional[Array],
bias: tp.Optional[Array],
reduction_axes: Axes,
feature_axes: Axes,
dtype: tp.Optional[Dtype],
epsilon: float,
):
""" "Normalizes the input of a normalization layer and optionally applies a learned scale and bias.
Arguments:
x: The input.
mean: Mean to use for normalization.
var: Variance to use for normalization.
reduction_axes: The axes in ``x`` to reduce.
feature_axes: Axes containing features. A separate bias and scale is learned
for each specified feature.
dtype: The dtype of the result (default: infer from input and params).
epsilon: Normalization epsilon.
Returns:
The normalized input.
"""
reduction_axes = _canonicalize_axes(x.ndim, reduction_axes)
feature_axes = _canonicalize_axes(x.ndim, feature_axes)
stats_shape = list(x.shape)
for axis in reduction_axes:
stats_shape[axis] = 1
mean = mean.reshape(stats_shape)
var = var.reshape(stats_shape)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
y = x - mean
mul = lax.rsqrt(var + epsilon)
args = [x]
if scale is not None:
scale = scale.reshape(feature_shape)
mul *= scale
args.append(scale)
y *= mul
if bias is not None:
bias = bias.reshape(feature_shape)
y += bias
args.append(bias)
dtype = dtypes.canonicalize_dtype(*args, dtype=dtype)
return jnp.asarray(y, dtype) | "Normalizes the input of a normalization layer and optionally applies a learned scale and bias. Arguments: x: The input. mean: Mean to use for normalization. var: Variance to use for normalization. reduction_axes: The axes in ``x`` to reduce. feature_axes: Axes containing features. A separate bias and scale is learned for each specified feature. dtype: The dtype of the result (default: infer from input and params). epsilon: Normalization epsilon. Returns: The normalized input. |
22,715 | import typing as tp
from jax.nn.initializers import constant as constant
from jax.nn.initializers import delta_orthogonal as delta_orthogonal
from jax.nn.initializers import glorot_normal as glorot_normal
from jax.nn.initializers import glorot_uniform as glorot_uniform
from jax.nn.initializers import he_normal as he_normal
from jax.nn.initializers import he_uniform as he_uniform
from jax.nn.initializers import kaiming_normal as kaiming_normal
from jax.nn.initializers import kaiming_uniform as kaiming_uniform
from jax.nn.initializers import lecun_normal as lecun_normal
from jax.nn.initializers import lecun_uniform as lecun_uniform
from jax.nn.initializers import normal as normal
from jax.nn.initializers import ones as ones
from jax.nn.initializers import orthogonal as orthogonal
from jax.nn.initializers import truncated_normal as truncated_normal
from jax.nn.initializers import uniform as uniform
from jax.nn.initializers import variance_scaling as variance_scaling
from jax.nn.initializers import xavier_normal as xavier_normal
from jax.nn.initializers import xavier_uniform as xavier_uniform
from jax.nn.initializers import zeros as zeros
from flax.typing import Initializer
Initializer = Union[jax.nn.initializers.Initializer, Callable[..., Any]]
The provided code snippet includes necessary dependencies for implementing the `zeros_init` function. Write a Python function `def zeros_init() -> Initializer` to solve the following problem:
Builds an initializer that returns a constant array full of zeros. >>> import jax, jax.numpy as jnp >>> from flax.experimental.nnx import initializers >>> zeros_initializer = initializers.zeros_init() >>> zeros_initializer(jax.random.key(42), (2, 3), jnp.float32) Array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)
Here is the function:
def zeros_init() -> Initializer:
"""Builds an initializer that returns a constant array full of zeros.
>>> import jax, jax.numpy as jnp
>>> from flax.experimental.nnx import initializers
>>> zeros_initializer = initializers.zeros_init()
>>> zeros_initializer(jax.random.key(42), (2, 3), jnp.float32)
Array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
return zeros | Builds an initializer that returns a constant array full of zeros. >>> import jax, jax.numpy as jnp >>> from flax.experimental.nnx import initializers >>> zeros_initializer = initializers.zeros_init() >>> zeros_initializer(jax.random.key(42), (2, 3), jnp.float32) Array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) |
22,716 | import typing as tp
from jax.nn.initializers import constant as constant
from jax.nn.initializers import delta_orthogonal as delta_orthogonal
from jax.nn.initializers import glorot_normal as glorot_normal
from jax.nn.initializers import glorot_uniform as glorot_uniform
from jax.nn.initializers import he_normal as he_normal
from jax.nn.initializers import he_uniform as he_uniform
from jax.nn.initializers import kaiming_normal as kaiming_normal
from jax.nn.initializers import kaiming_uniform as kaiming_uniform
from jax.nn.initializers import lecun_normal as lecun_normal
from jax.nn.initializers import lecun_uniform as lecun_uniform
from jax.nn.initializers import normal as normal
from jax.nn.initializers import ones as ones
from jax.nn.initializers import orthogonal as orthogonal
from jax.nn.initializers import truncated_normal as truncated_normal
from jax.nn.initializers import uniform as uniform
from jax.nn.initializers import variance_scaling as variance_scaling
from jax.nn.initializers import xavier_normal as xavier_normal
from jax.nn.initializers import xavier_uniform as xavier_uniform
from jax.nn.initializers import zeros as zeros
from flax.typing import Initializer
Initializer = Union[jax.nn.initializers.Initializer, Callable[..., Any]]
The provided code snippet includes necessary dependencies for implementing the `ones_init` function. Write a Python function `def ones_init() -> Initializer` to solve the following problem:
Builds an initializer that returns a constant array full of ones. >>> import jax, jax.numpy as jnp >>> from flax.experimental.nnx import initializers >>> ones_initializer = initializers.ones_init() >>> ones_initializer(jax.random.key(42), (3, 2), jnp.float32) Array([[1., 1.], [1., 1.], [1., 1.]], dtype=float32)
Here is the function:
def ones_init() -> Initializer:
"""Builds an initializer that returns a constant array full of ones.
>>> import jax, jax.numpy as jnp
>>> from flax.experimental.nnx import initializers
>>> ones_initializer = initializers.ones_init()
>>> ones_initializer(jax.random.key(42), (3, 2), jnp.float32)
Array([[1., 1.],
[1., 1.],
[1., 1.]], dtype=float32)
"""
return ones | Builds an initializer that returns a constant array full of ones. >>> import jax, jax.numpy as jnp >>> from flax.experimental.nnx import initializers >>> ones_initializer = initializers.ones_init() >>> ones_initializer(jax.random.key(42), (3, 2), jnp.float32) Array([[1., 1.], [1., 1.], [1., 1.]], dtype=float32) |
22,717 | from __future__ import annotations
import typing as tp
from types import MappingProxyType
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
import opt_einsum
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib, variables
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import dtypes, initializers
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
PaddingLike = Union[str, int, Sequence[Union[int, Tuple[int, int]]]]
LaxPadding = Union[str, Sequence[Tuple[int, int]]]
The provided code snippet includes necessary dependencies for implementing the `canonicalize_padding` function. Write a Python function `def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding` to solve the following problem:
"Canonicalizes conv padding to a jax.lax supported format.
Here is the function:
def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding:
""" "Canonicalizes conv padding to a jax.lax supported format."""
if isinstance(padding, str):
return padding
if isinstance(padding, int):
return [(padding, padding)] * rank
if isinstance(padding, tp.Sequence) and len(padding) == rank:
new_pad = []
for p in padding:
if isinstance(p, int):
new_pad.append((p, p))
elif isinstance(p, tuple) and len(p) == 2:
new_pad.append(p)
else:
break
if len(new_pad) == rank:
return new_pad
raise ValueError(
f'Invalid padding format: {padding}, should be str, int,'
f' or a sequence of len {rank} where each element is an'
' int or pair of ints.'
) | "Canonicalizes conv padding to a jax.lax supported format. |
22,718 | from __future__ import annotations
import typing as tp
from types import MappingProxyType
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
import opt_einsum
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib, variables
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import dtypes, initializers
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
The provided code snippet includes necessary dependencies for implementing the `_conv_dimension_numbers` function. Write a Python function `def _conv_dimension_numbers(input_shape)` to solve the following problem:
Computes the dimension numbers based on the input shape.
Here is the function:
def _conv_dimension_numbers(input_shape):
"""Computes the dimension numbers based on the input shape."""
ndim = len(input_shape)
lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1))
rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2))
out_spec = lhs_spec
return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec) | Computes the dimension numbers based on the input shape. |
22,719 | from __future__ import annotations
import typing as tp
from types import MappingProxyType
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
import opt_einsum
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib, variables
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import dtypes, initializers
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
def _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple(sorted(ax if ax >= 0 else ndim + ax for ax in axes)) | null |
22,720 | from __future__ import annotations
import typing as tp
from types import MappingProxyType
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
import opt_einsum
from flax.experimental import nnx
from flax.experimental.nnx.nnx import rnglib, variables
from flax.experimental.nnx.nnx.module import Module, first_from
from flax.experimental.nnx.nnx.nn import dtypes, initializers
from flax.typing import (
Array,
Dtype,
Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
def _canonicalize_tuple(x: tp.Sequence[int] | int) -> tuple[int, ...]:
if isinstance(x, tp.Iterable):
return tuple(x)
else:
return (x,) | null |
22,721 | import functools
import typing as tp
import jax
from jax.experimental import maps
from jax.sharding import Mesh, PartitionSpec
from flax.experimental.nnx.nnx import variables
from flax.experimental.nnx.nnx.pytreelib import TreeNode
from flax.experimental.nnx.nnx.state import State
from flax.typing import (
Array,
ArrayPytree, # pylint: disable=invalid-name
PartitionSpecPytree, # pylint: disable=invalid-name
Sharding,
)
A = tp.TypeVar('A')
def get_partition_spec(tree: A) -> A:
"""Extracts a PartitionSpec tree from a PyTree containing ``Variable`` values."""
def _maybe_replicate(x):
if hasattr(x, 'shape'):
return PartitionSpec()
else:
return None
def f(x):
if isinstance(x, variables.Variable):
if isinstance(x, HasSharding) and x.sharding:
return x.replace(raw_value=PartitionSpec(*x.sharding))
else:
return x.replace(raw_value=_maybe_replicate(x.raw_value))
return _maybe_replicate(x)
return jax.tree_map(
f,
tree,
is_leaf=lambda x: isinstance(x, variables.Variable)
and not isinstance(x, TreeNode),
)
def get_named_sharding(tree: A, mesh: jax.sharding.Mesh) -> A:
spec = get_partition_spec(tree)
sharding = jax.tree_map(lambda p: jax.sharding.NamedSharding(mesh, p), spec)
return sharding | null |
22,722 | import functools
import typing as tp
import jax
from jax.experimental import maps
from jax.sharding import Mesh, PartitionSpec
from flax.experimental.nnx.nnx import variables
from flax.experimental.nnx.nnx.pytreelib import TreeNode
from flax.experimental.nnx.nnx.state import State
from flax.typing import (
Array,
ArrayPytree, # pylint: disable=invalid-name
PartitionSpecPytree, # pylint: disable=invalid-name
Sharding,
)
A = tp.TypeVar('A')
F = tp.TypeVar('F', bound=tp.Callable[..., tp.Any])
def sharding_hook(
node: variables.Variable[tp.Any],
value: tp.Any,
/,
) -> tp.Any:
def with_partitioning(
initializer: F,
sharding: Sharding,
mesh: tp.Optional[jax.sharding.Mesh] = None,
get_value_hooks: tp.Union[
variables.GetValueHook[A], tp.Sequence[variables.GetValueHook[A]]
] = (),
create_value_hooks: tp.Union[
variables.CreateValueHook[A], tp.Sequence[variables.CreateValueHook[A]]
] = (),
**metadata: tp.Any,
) -> F:
if callable(get_value_hooks):
get_value_hooks = (get_value_hooks, sharding_hook)
else:
get_value_hooks = (*get_value_hooks, sharding_hook)
if callable(create_value_hooks):
create_value_hooks = (create_value_hooks, sharding_hook)
else:
create_value_hooks = (*create_value_hooks, sharding_hook)
return variables.with_metadata(
initializer,
get_value_hooks=get_value_hooks,
create_value_hooks=create_value_hooks,
sharding=sharding,
mesh=mesh,
**metadata,
) | null |
22,723 | from __future__ import annotations
import dataclasses
import functools
import typing as tp
from abc import abstractmethod
from types import MappingProxyType
from typing import Any
import jax
import jax.numpy as jnp
import jax.stages
from flax.experimental.nnx.nnx import (
filterlib,
rnglib,
spmd,
variables,
)
from flax.experimental.nnx.nnx.module import GraphDef, Module, ModuleMeta
from flax.experimental.nnx.nnx.proxy_caller import (
CallableProxy,
DelayedAccessor,
)
from flax.experimental.nnx.nnx.state import State
from flax.typing import Leaf
F = tp.TypeVar('F', bound=tp.Callable[..., tp.Any])
class RematOptions:
prevent_cse: bool
static_argnums: tp.Union[int, tuple[int, ...]]
policy: tp.Optional[tp.Callable[..., bool]]
def __post_init__(self):
if isinstance(self.static_argnums, int):
self.static_argnums = (self.static_argnums,)
# add 2 as an offset to account for state and keys
self.static_argnums = tuple(
x + 2 if x >= 0 else x for x in self.static_argnums
)
def remat_apply(
options: RematOptions,
f: RematCall,
module: Module,
args: tuple[tp.Any, ...],
rngs: tp.Optional[rnglib.Rngs],
):
_check_args(args)
state, graphdef = module.split()
keys = rngs.fork() if rngs is not None else None
def _remat_fn(
state: State,
keys: tp.Optional[dict[str, jax.Array]],
*args,
) -> tuple[tuple[State, GraphDef[Module]], tp.Any]:
kwargs = {}
if keys is not None:
kwargs['rngs'] = rnglib.Rngs(keys)
module = graphdef.merge(state)
out = f(module, *args, **kwargs)
state_and_def = module.split()
return state_and_def, out
state_and_def: tuple[State, GraphDef[Module]]
state_and_def, out = jax.checkpoint(
_remat_fn,
prevent_cse=options.prevent_cse,
static_argnums=options.static_argnums,
policy=options.policy,
)(state, keys, *args)
module.update(state_and_def)
return out
class Module(reprlib.Representable, metaclass=ModuleMeta):
if tp.TYPE_CHECKING:
_module__state: ModuleState
if not tp.TYPE_CHECKING:
def __setattr__(self, name: str, value: Any) -> None:
self._setattr(name, value)
def _setattr(self, name: str, value: tp.Any) -> None:
if not self._module__state.trace_state.is_valid():
raise errors.TraceContextError(
'Cannot mutate Module from different trace level'
)
if isinstance(value, (jax.Array, np.ndarray, State)):
raise ValueError(
f"Trying to assign a '{type(value).__name__}' to the Module"
f" attribute '{name}'. This is not supported. Non-hashable "
'objects are not valid static state in JAX. Please wrap '
'the value in a Variable type instead.'
)
object.__setattr__(self, name, value)
def __deepcopy__(self: M, memo=None) -> M:
state, graphdef = self.split()
graphdef = deepcopy(graphdef)
state = deepcopy(state)
return graphdef.merge(state)
def __hash__(self) -> int:
return hash(self._module__state.id)
def __nnx_repr__(self):
global SEEN_MODULES_REPR
if SEEN_MODULES_REPR is None:
SEEN_MODULES_REPR = set()
clear_seen = True
else:
clear_seen = False
if self._module__state.id in SEEN_MODULES_REPR:
yield reprlib.Object(type=type(self), empty_repr='...')
return
yield reprlib.Object(type=type(self))
SEEN_MODULES_REPR.add(self._module__state.id)
try:
for name, value in vars(self).items():
if isinstance(value, Module) or (
not isinstance(value, Variable) and not name.startswith('_')
):
yield reprlib.Attr(name, repr(value))
finally:
if clear_seen:
SEEN_MODULES_REPR = None
def init(cls: type[M], *args, **kwargs) -> tuple[State, GraphDef[M]]:
return cls(*args, **kwargs).split()
def create_abstract(cls: type[M]) -> type[M]:
def lift_rngs(kwargs: dict[str, tp.Any]):
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], tp.Mapping):
kwargs['rngs'] = Rngs(rngs)
return kwargs
def _create_abstract(accessor: DelayedAccessor, *args, **kwargs):
constructor = accessor(cls)
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], Rngs):
kwargs['rngs'] = rngs.fork()
state, graphdef = jax.eval_shape(
lambda: constructor(*args, **lift_rngs(kwargs)).split()
)
return graphdef.merge(state)
return CallableProxy(_create_abstract) # type: ignore
def partial_init(cls: type[M], state: State, *states: State) -> type[M]:
"""Creates a constuctor that initializes the Module with the given state.
``partial_init`` takes one or more States and returns a constructor that uses
``jax.jit`` to initialize the Module and update its state with the given
States. Its semantically equivalent to::
module = MyModule(*args, **kwargs)
module.update(state, *states)
However, thanks to dead code elimination the resulting constructor will only
initialize the subset of ``Variable``'s that were part of the given state(s).
Example::
>>> import jax.numpy as jnp
>>> import jax
>>> from flax.experimental import nnx
...
>>> bias = jax.random.normal(jax.random.key(0), (4,))
>>> state = nnx.State({'bias': bias}) # in reality load it from a checkpoint
>>> linear = nnx.Linear.partial_init(state)(2, 4, rngs=nnx.Rngs(1))
>>> y = linear(jnp.ones((1, 2)))
...
>>> assert jnp.allclose(linear.bias, bias)
>>> assert y.shape == (1, 4)
Args:
state: The State to initialize the Module with.
*states: Additional States to initialize the Module with.
Returns:
A constructor that initializes the Module with the given States.
"""
states = (state, *states)
def lift_rngs(kwargs: dict[str, tp.Any]):
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], tp.Mapping):
kwargs['rngs'] = Rngs(rngs)
return kwargs
def _partial_init(accessor: DelayedAccessor, *args, **kwargs):
constructor: tp.Callable[[], M] = accessor(cls)
if 'rngs' in kwargs and isinstance(rngs := kwargs['rngs'], Rngs):
kwargs['rngs'] = rngs.fork()
def _partial_init_constructor():
module = constructor(*args, **lift_rngs(kwargs))
module.update(*states)
return module.split()
graphdef: GraphDef[M]
state: State
state, graphdef = jax.jit(_partial_init_constructor)()
module = graphdef.merge(state)
return module
return CallableProxy(_partial_init) # type: ignore
def clone(self: M) -> M:
return merge(self.split())
def split(self: M) -> tuple[State, GraphDef[M]]:
...
def split(self: M, first: filterlib.Filter, /) -> tuple[State, GraphDef[M]]:
...
def split(
self: M,
first: filterlib.Filter,
second: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tuple[State, tpe.Unpack[tuple[State, ...]], GraphDef[M]]:
...
def split(
self: M, *filters: filterlib.Filter
) -> tuple[State, tpe.Unpack[tuple[State, ...]], GraphDef[M]]:
state, graphdef, _ = graph_utils.graph_flatten(self)
if len(filters) == 0:
states = (state,)
elif len(filters) == 1:
states = (state.split(filters[0]),)
else:
states = state.split(filters[0], filters[1], *filters[2:])
return *states, graphdef
def get_state(self) -> State:
state, _ = self.split()
return state
def get_graphdef(self: M) -> GraphDef[M]:
_, graphdef = self.split()
return graphdef
def extract(self, first: filterlib.Filter, /) -> State:
...
def extract(
self,
first: filterlib.Filter,
second: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tuple[State, ...]:
...
def extract(
self,
first: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tp.Union[State, tuple[State, ...]]:
state = self.get_state()
if len(filters) == 0:
states = state.extract(first)
else:
states = state.extract(first, filters[0], *filters[1:])
return states
def pop(
self,
filter: filterlib.Filter,
/,
) -> State:
...
def pop(
self,
filter: filterlib.Filter,
filter2: filterlib.Filter,
/,
*filters: filterlib.Filter,
) -> tuple[State, ...]:
...
def pop(
self, *filters: filterlib.Filter
) -> tp.Union[State, tuple[State, ...]]:
if len(filters) == 0:
raise ValueError('Expected at least one filter')
states = graph_utils.graph_pop(self, filters)
if len(states) == 1:
return states[0]
else:
return states
def apply(self: M) -> ApplyCaller[M]:
def _apply(accessor: DelayedAccessor, *args, **kwargs) -> tuple[tp.Any, M]:
module = self.clone()
fn = accessor(module)
out = fn(*args, **kwargs)
return out, module
return CallableProxy(_apply) # type: ignore
def update(self: M, update: Updates[M], /, *updates: Updates[M]) -> None:
updates = (update, *updates)
def _states_and_moduledef(
updates,
) -> tuple[list[State], tp.Optional[Module]]:
leaves = jax.tree_util.tree_leaves(
updates, is_leaf=lambda x: isinstance(x, (GraphDef, State))
)
states: list[State] = []
module: tp.Optional[Module] = None
for leaf in leaves:
if isinstance(leaf, (Module, GraphDef)):
if module is not None:
raise ValueError(
'Expected only one GraphDef or Module in the updates'
)
if isinstance(leaf, Module):
module = leaf
states.append(leaf.get_state())
else:
module = leaf.make_empty()
elif isinstance(leaf, State):
states.append(leaf)
else:
raise ValueError(
'Expected a GraphDef, Module or State, got'
f' {type(leaf).__name__}'
)
return states, module
states, module_update = _states_and_moduledef(updates)
if module_update is not None:
graph_utils.graph_update_static(self, module_update)
if states:
graph_utils.graph_update_dynamic(self, states)
def sow(
self,
variable_type: tp.Type[variableslib.Variable[tp.Any]],
name: str,
value: A,
reduce_fn: tp.Callable[[B, A], B] = tuple_reduce,
init_fn: tp.Callable[[], B] = tuple_init, # type: ignore
) -> None:
if hasattr(self, name):
variable = getattr(self, name)
if not isinstance(variable, variableslib.Variable):
raise ValueError(
f"Expected '{name}' to be a Variable, got {type(variable).__name__}"
)
elif type(variable) != variable_type:
raise ValueError(
f"Expected '{name}' to be of type '{variable_type.__name__}', "
f"got '{type(variable).__name__}'"
)
variable.raw_value = reduce_fn(variable.raw_value, value)
else:
reduced_value = reduce_fn(init_fn(), value)
setattr(self, name, variable_type(reduced_value))
def modules(self) -> tp.Iterator[tuple[Path, Module]]:
for path, value in graph_utils.iter_nodes(self):
if isinstance(value, Module):
yield path, value
def set_attributes(
self,
*filters: filterlib.Filter,
raise_if_not_found: bool = True,
**attributes: tp.Any,
) -> None:
"""Sets the attributes of nested Modules including the current Module.
If the attribute is not found in the Module, it is ignored.
Example::
>>> from flax.experimental import nnx
...
>>> class Block(nnx.Module):
... def __init__(self, din, dout, *, rngs: nnx.Rngs):
... self.linear = nnx.Linear(din, dout, rngs=rngs)
... self.dropout = nnx.Dropout(0.5, deterministic=False)
... self.batch_norm = nnx.BatchNorm(10, use_running_average=False, rngs=rngs)
...
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(False, False)
>>> block.set_attributes(deterministic=True, use_running_average=True)
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, True)
``Filter``'s can be used to set the attributes of specific Modules::
>>> block = Block(2, 5, rngs=nnx.Rngs(0))
>>> block.set_attributes(nnx.Dropout, deterministic=True, use_running_average=True)
>>> # Only the dropout will be modified
>>> block.dropout.deterministic, block.batch_norm.use_running_average
(True, False)
Args:
*filters: Filters to select the Modules to set the attributes of.
raise_if_not_found: If True (default), raises a ValueError if at least one attribute
instance is not found in one of the selected Modules.
**attributes: The attributes to set.
"""
remaining_attributes = set(attributes.keys())
if not filters:
filters = (True,)
predicates = tuple(map(filterlib.to_predicate, filters))
for path, module in self.modules():
for predicate in predicates:
if predicate(path, module):
for name, value in attributes.items():
if hasattr(module, name):
if name in remaining_attributes:
remaining_attributes.remove(name)
setattr(module, name, value)
break
if remaining_attributes and raise_if_not_found:
raise ValueError(
f'Could not find at least one instance of the following attributes: {remaining_attributes}'
)
def __init_subclass__(cls, experimental_pytree: bool = False) -> None:
super().__init_subclass__()
graph_utils.register_mutable_node_type(
type=cls,
flatten=_module_graph_flatten,
set_key=_module_graph_set_key,
pop_key=_module_graph_pop_key,
create_empty=_module_graph_create_empty,
clear=_module_graph_clear,
)
if experimental_pytree:
jtu.register_pytree_with_keys(
cls,
partial(_module_flatten, with_keys=True),
_module_unflatten,
flatten_func=partial(_module_flatten, with_keys=False),
)
def remat(
f: F,
*,
# variables: lift.CollectionFilter,
# rngs: lift.PRNGSequenceFilter,
prevent_cse: bool = True,
static_argnums: tp.Union[int, tuple[int, ...]] = (),
policy: tp.Optional[tp.Callable[..., bool]] = None,
is_init: tp.Optional[bool] = None,
) -> F:
if is_init is None:
is_init = f.__name__ == '__init__'
options = RematOptions(
# variables=variables,
# rngs=rngs,
prevent_cse=prevent_cse,
static_argnums=static_argnums,
policy=policy,
)
if is_init:
return f
else:
@functools.wraps(f)
def remat_wrapper(
module: Module, *args, rngs: tp.Optional[rnglib.Rngs] = None
):
return remat_apply(options, f, module, args, rngs)
return remat_wrapper # type: ignore | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.