id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
22,523
from __future__ import annotations import json import multiprocessing import os from copy import copy from logging import getLogger from pathlib import Path import PySimpleGUI as sg import sounddevice as sd import soundfile as sf import torch from pebble import ProcessFuture, ProcessPool from . import __version__ from .utils import get_optimal_device def validate_output_file_type(output_path: Path) -> bool: supported_file_types = sorted( [f".{extension.lower()}" for extension in sf.available_formats().keys()] ) if not output_path.suffix: sg.popup_ok( "Error: Output path missing file type extension, enter " + "one of the following manually:\n\n" + "\n".join(supported_file_types) ) return False if output_path.suffix.lower() not in supported_file_types: sg.popup_ok( f"Error: {output_path.suffix.lower()} is not a supported " + "extension; use one of the following:\n\n" + "\n".join(supported_file_types) ) return False return True
null
22,524
from __future__ import annotations import json import multiprocessing import os from copy import copy from logging import getLogger from pathlib import Path import PySimpleGUI as sg import sounddevice as sd import soundfile as sf import torch from pebble import ProcessFuture, ProcessPool from . import __version__ from .utils import get_optimal_device def get_devices( update: bool = True, ) -> tuple[list[str], list[str], list[int], list[int]]: if update: sd._terminate() sd._initialize() devices = sd.query_devices() hostapis = sd.query_hostapis() for hostapi in hostapis: for device_idx in hostapi["devices"]: devices[device_idx]["hostapi_name"] = hostapi["name"] input_devices = [ f"{d['name']} ({d['hostapi_name']})" for d in devices if d["max_input_channels"] > 0 ] output_devices = [ f"{d['name']} ({d['hostapi_name']})" for d in devices if d["max_output_channels"] > 0 ] input_devices_indices = [d["index"] for d in devices if d["max_input_channels"] > 0] output_devices_indices = [ d["index"] for d in devices if d["max_output_channels"] > 0 ] return input_devices, output_devices, input_devices_indices, output_devices_indices
null
22,525
from __future__ import annotations import json import multiprocessing import os from copy import copy from logging import getLogger from pathlib import Path import PySimpleGUI as sg import sounddevice as sd import soundfile as sf import torch from pebble import ProcessFuture, ProcessPool from . import __version__ from .utils import get_optimal_device LOG = getLogger(__name__) def play_audio(path: Path | str): if isinstance(path, Path): path = path.as_posix() data, sr = sf.read(path) sd.play(data, sr) def after_inference(window: sg.Window, path: Path, auto_play: bool, output_path: Path): try: LOG.info(f"Finished inference for {path.stem}{path.suffix}") window["infer"].update(disabled=False) if auto_play: play_audio(output_path) except Exception as e: LOG.exception(e)
null
22,526
import os import sys from logging import DEBUG, INFO, StreamHandler, basicConfig, captureWarnings, getLogger from pathlib import Path from rich.logging import RichHandler LOGGER_INIT = False def is_notebook(): try: from IPython import get_ipython if "IPKernelApp" not in get_ipython().config: # pragma: no cover raise ImportError("console") return False if "VSCODE_PID" in os.environ: # pragma: no cover raise ImportError("vscode") return False except Exception: return False else: # pragma: no cover return True def init_logger() -> None: global LOGGER_INIT if LOGGER_INIT: return IS_TEST = "test" in Path.cwd().stem package_name = sys.modules[__name__].__package__ basicConfig( level=INFO, format="%(asctime)s %(message)s", datefmt="[%X]", handlers=[ StreamHandler() if is_notebook() else RichHandler(), # FileHandler(f"{package_name}.log"), ], ) if IS_TEST: getLogger(package_name).setLevel(DEBUG) captureWarnings(True) LOGGER_INIT = True
null
22,527
from typing import List, Tuple import sphinx from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util.docutils import SphinxDirective class CodeDiffDirective(SphinxDirective): has_content = True option_spec = { 'title_left': directives.unchanged, 'title_right': directives.unchanged, 'code_sep': directives.unchanged, 'sync': directives.flag, } def run(self): table_code, test_code = CodeDiffParser().parse( list(self.content), **self.options ) # Create a test node as a comment node so it won't show up in the docs. # We add attribute "testnodetype" so it is be picked up by the doctest # builder. This functionality is not officially documented but can be found # in the source code: # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/doctest.py # (search for 'testnodetype'). test_code = '\n'.join(test_code) test_node = nodes.comment(test_code, test_code, testnodetype='testcode') # Set the source info so the error message is correct when testing. self.set_source_info(test_node) test_node['options'] = {} test_node['language'] = 'python3' # The table node is the side-by-side diff view that will be shown on RTD. table_node = nodes.paragraph() self.content = ViewList(table_code, self.content.parent) self.state.nested_parse(self.content, self.content_offset, table_node) return [table_node, test_node] def setup(app): app.add_directive('codediff', CodeDiffDirective) return { 'version': sphinx.__display_version__, 'parallel_read_safe': True, 'parallel_write_safe': True, }
null
22,528
import importlib import sphinx import sphinx.ext.autosummary.generate as ag from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util.docutils import SphinxDirective from docs.conf_sphinx_patch import generate_autosummary_content def generate_autosummary_content( name: str, obj: Any, parent: Any, template: ag.AutosummaryRenderer, template_name: str, imported_members: bool, app: Any, recursive: bool, context: Dict, modname: str = None, qualname: str = None, ) -> str: def render_module(modname: str, qualname: str, app): parent = importlib.import_module(modname) obj = getattr(parent, qualname) template = ag.AutosummaryRenderer(app) template_name = 'flax_module' imported_members = False recursive = False context = {} return generate_autosummary_content( qualname, obj, parent, template, template_name, imported_members, app, recursive, context, modname, qualname, )
null
22,529
import importlib import sphinx import sphinx.ext.autosummary.generate as ag from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util.docutils import SphinxDirective from docs.conf_sphinx_patch import generate_autosummary_content class FlaxModuleDirective(SphinxDirective): has_content = True option_spec = { 'module': directives.unchanged, 'class': directives.unchanged, } def run(self): module_template = render_module( self.options['module'], self.options['class'], self.env.app ) module_template = module_template.splitlines() # Create a container for the rendered nodes container_node = nodes.container() self.content = ViewList(module_template, self.content.parent) self.state.nested_parse(self.content, self.content_offset, container_node) return [container_node] def setup(app): app.add_directive('flax_module', FlaxModuleDirective) return { 'version': sphinx.__display_version__, 'parallel_read_safe': True, 'parallel_write_safe': True, }
null
22,530
import collections import os from absl import logging from clu import metric_writers from clu import periodic_actions from flax import linen as nn from flax.training import checkpoints from flax.training import common_utils import jax from jax import random import jax.numpy as jnp from jax.sharding import PartitionSpec as P, Mesh, NamedSharding import ml_collections import numpy as np import optax import tensorflow as tf import input_pipeline import models import temperature_sampler import utils The provided code snippet includes necessary dependencies for implementing the `per_host_sum_pmap` function. Write a Python function `def per_host_sum_pmap(in_tree)` to solve the following problem: Execute psum on in_tree"s leaves over one device per host. Here is the function: def per_host_sum_pmap(in_tree): """Execute psum on in_tree"s leaves over one device per host.""" host2devices = collections.defaultdict(list) for d in jax.devices(): host2devices[d.process_index].append(d) devices = [host2devices[k][0] for k in host2devices] host_psum = jax.pmap(lambda x: jax.lax.psum(x, "i"), "i", devices=devices) def pre_pmap(xs): return jax.tree_util.tree_map( lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs ) def post_pmap(xs): return jax.tree_util.tree_map(lambda x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree)))
Execute psum on in_tree"s leaves over one device per host.
22,531
import collections import os from absl import logging from clu import metric_writers from clu import periodic_actions from flax import linen as nn from flax.training import checkpoints from flax.training import common_utils import jax from jax import random import jax.numpy as jnp from jax.sharding import PartitionSpec as P, Mesh, NamedSharding import ml_collections import numpy as np import optax import tensorflow as tf import input_pipeline import models import temperature_sampler import utils def create_learning_rate_schedule(learning_rate: float, warmup_steps: int): """Creates a rsqrt schedule with linear warmup.""" return optax.join_schedules( [ optax.linear_schedule( init_value=0, end_value=learning_rate, transition_steps=warmup_steps, ), rsqrt_schedule(init_value=learning_rate, shift=warmup_steps), ], boundaries=[warmup_steps], ) def train_step( state, batch, config, learning_rate_fn, label_smoothing=0.0, dropout_rng=None, ): """Perform a single training step.""" # X_position and X_segmentation are needed only when using "packed examples" # where multiple sequences are packed into the same example with this # metadata. # if such features are not present they are ignored and the example is treated # like a normal, unpacked sequence example. train_keys = ["inputs", "inputs_position", "inputs_segmentation"] (inputs, inputs_positions, inputs_segmentation) = ( batch.get(k, None) for k in train_keys ) weights = jnp.where(inputs > 0, 1, 0).astype(jnp.float32) dropout_rng = jax.random.fold_in(dropout_rng, state.step) def loss_fn(params): """loss function used for training.""" logits = models.TransformerLM(config).apply( {"params": params}, inputs, inputs_positions=inputs_positions, inputs_segmentation=inputs_segmentation, rngs={"dropout": dropout_rng}, ) loss, weight_sum = compute_weighted_cross_entropy( logits, inputs, weights, label_smoothing ) mean_loss = loss / weight_sum return mean_loss, logits step = state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grads = grad_fn(state.params) new_state = state.apply_gradients(grads=grads) metrics = compute_metrics(logits, inputs, weights) metrics["learning_rate"] = lr return new_state, metrics def eval_step(params, batch, config, label_smoothing=0.0): """Calculate evaluation metrics on a batch.""" inputs = batch["inputs"] weights = jnp.where(inputs > 0, 1.0, 0.0) logits = models.TransformerLM(config).apply({"params": params}, inputs) return compute_metrics(logits, inputs, weights, label_smoothing) def predict_step( inputs, params, rngkey, eos_id, max_decode_len, config, temperature, top_k ): """Predict language model on a batch.""" target_shape = (inputs.shape[0], max_decode_len) + inputs.shape[2:] initial_variables = models.TransformerLM(config).init( jax.random.PRNGKey(0), jnp.ones(target_shape, config.dtype) ) cache = initial_variables["cache"] def tokens_ids_to_logits(flat_ids, flat_cache): """Token slice to logits from decoder model.""" # --> [batch * beam, 1, vocab] flat_logits, new_vars = models.TransformerLM(config).apply( {"params": params, "cache": flat_cache}, flat_ids, mutable=["cache"] ) new_flat_cache = new_vars["cache"] # Remove singleton sequence-length dimension: # [batch, 1, vocab] --> [batch, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the above-defined single-step decoder function, run a # beam search over possible sequences given input encoding. seqs = temperature_sampler.temperature_sample( inputs, cache, tokens_ids_to_logits, rngkey, temperature=temperature, topk=top_k, eos_token=eos_id, ) return seqs def evaluate( *, jit_eval_step, params, eval_ds: tf.data.Dataset, num_eval_steps: int, config, ): """Evaluate the target an return a dictionary with the metrics.""" logging.info("Gathering evaluation metrics.") eval_metrics = [] eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types for _, eval_batch in zip(range(num_eval_steps), eval_iter): eval_batch = jax.tree_util.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access metrics = jit_eval_step(params, eval_batch, config) eval_metrics.append(metrics) eval_metrics = common_utils.stack_forest(eval_metrics) eval_metrics_sums = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop("denominator") eval_summary = jax.tree_util.tree_map( lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums, ) return eval_summary def generate_prediction( *, jit_pred_step, params, tokenized_prompts, eos_id, inference_rng, decode_tokens, config, predict_config, ): """Generate text from the prompt.""" n_devices = jax.local_device_count() logging.info("Generating text.") predictions = [] # Use batch of prompts provided by user. for pred_batch in jnp.array_split( tokenized_prompts, int(np.ceil(len(tokenized_prompts) / n_devices)) ): cur_pred_batch_size = pred_batch.shape[0] if cur_pred_batch_size % n_devices: padded_size = int(np.ceil(cur_pred_batch_size / n_devices) * n_devices) pred_batch = jax.tree_util.tree_map( lambda x: pad_examples(x, padded_size), pred_batch ) # pylint: disable=cell-var-from-loop pred_batch = common_utils.shard(pred_batch) inference_rng, sub_rng = random.split(inference_rng) inference_rngs = random.split(sub_rng, n_devices) predicted = jit_pred_step( pred_batch, params, inference_rngs, eos_id, config.max_predict_length, predict_config, config.sampling_temperature, config.sampling_top_k, ) predicted = tohost(predicted) # Iterate through non-padding examples of batch. for s in predicted[:cur_pred_batch_size]: prediction = decode_tokens(s) logging.info("Sample: %s", str(prediction)) predictions.append(prediction) # Save generated texts for tensorboard. exemplars = "" for prediction in predictions: exemplars += f"{prediction}\n\n" return exemplars The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate(config: ml_collections.ConfigDict, workdir: str)` to solve the following problem: Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint. Here is the function: def train_and_evaluate(config: ml_collections.ConfigDict, workdir: str): """Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint. """ tf.io.gfile.makedirs(workdir) vocab_path = config.vocab_path if vocab_path is None: vocab_path = os.path.join(workdir, "sentencepiece_model") config.vocab_path = vocab_path tf.io.gfile.makedirs(os.path.split(vocab_path)[0]) # Load Dataset # --------------------------------------------------------------------------- logging.info("Initializing dataset.") train_ds, eval_ds, _, encoder = input_pipeline.get_datasets( n_devices=jax.local_device_count(), config=config, vocab_path=vocab_path ) train_iter = iter(train_ds) vocab_size = int(encoder.vocab_size()) eos_id = temperature_sampler.EOS_ID # Default Sentencepiece EOS token. def decode_tokens(toks): valid_toks = toks[: np.argmax(toks == eos_id) + 1].astype(np.int32) return encoder.detokenize(valid_toks).numpy().decode("utf-8") def encode_strings(strs, max_len): tokenized_batch = np.zeros((len(strs), max_len), np.int32) for i, s in enumerate(strs): toks = encoder.tokenize(s).numpy() # Remove EOS token in prompt. tokenized_batch[i, : toks.shape[0] - 1] = toks[:-1] return tokenized_batch tokenized_prompts = encode_strings( [config.prompts], config.max_predict_length ) logging.info("Initializing model, optimizer, and step functions.") # Build Model and Optimizer # --------------------------------------------------------------------------- train_config = models.TransformerConfig( vocab_size=vocab_size, output_vocab_size=vocab_size, logits_via_embedding=config.logits_via_embedding, dtype=jnp.bfloat16 if config.use_bfloat16 else jnp.float32, emb_dim=config.emb_dim, num_heads=config.num_heads, num_layers=config.num_layers, qkv_dim=config.qkv_dim, mlp_dim=config.mlp_dim, max_len=max(config.max_target_length, config.max_eval_target_length), dropout_rate=config.dropout_rate, attention_dropout_rate=config.attention_dropout_rate, deterministic=False, decode=False, kernel_init=nn.initializers.xavier_uniform(), bias_init=nn.initializers.normal(stddev=1e-6), ) eval_config = train_config.replace(deterministic=True) predict_config = train_config.replace(deterministic=True, decode=True) # Mesh definition devices_array = utils.create_device_mesh(config) mesh = Mesh(devices_array, config.mesh_axes) start_step = 0 rng = jax.random.PRNGKey(config.seed) rng, init_rng = jax.random.split(rng) rng, inference_rng = random.split(rng) m = models.TransformerLM(eval_config) learning_rate_fn = create_learning_rate_schedule( learning_rate=config.learning_rate, warmup_steps=config.warmup_steps ) optimizer = optax.adamw( learning_rate_fn, b1=0.9, b2=0.98, eps=1e-9, weight_decay=config.weight_decay, ) state, state_mesh_annotations = utils.setup_initial_state( m, optimizer, config, init_rng, mesh ) data_sharding = NamedSharding(mesh, P(config.data_sharding)) if config.restore_checkpoints: # Restore unreplicated optimizer + model state from last checkpoint. state = checkpoints.restore_checkpoint(workdir, state) # Grab last step. start_step = int(state.step) writer = metric_writers.create_default_writer( workdir, just_logging=jax.process_index() > 0 ) if start_step == 0: writer.write_hparams(dict(config)) # compile multidevice versions of train/eval/predict step fn. jit_train_step = jax.jit( train_step, in_shardings=( state_mesh_annotations, data_sharding, None, ), # type: ignore out_shardings=(state_mesh_annotations, None), # type: ignore static_argnums=(2, 3, 4), donate_argnums=0, ) jit_eval_step = jax.jit( eval_step, in_shardings=( state_mesh_annotations.params, data_sharding, ), # type: ignore out_shardings=None, # type: ignore static_argnums=(2, 3), ) # Since the inputs and rngkey args for predict_step will be batched, # we must vmap them, otherwise the global arrays will be seen in each device jit_pred_step = jax.jit( jax.vmap( predict_step, in_axes=( 0, jax.tree_util.tree_map(lambda x: None, state.params), 0, None, None, jax.tree_util.tree_map(lambda x: None, predict_config), None, None, ), ), in_shardings=( data_sharding, state_mesh_annotations.params, data_sharding, ), # type: ignore out_shardings=data_sharding, # type: ignore static_argnums=(3, 4, 5, 6, 7), ) # Main Train Loop # --------------------------------------------------------------------------- # We init the first set of dropout PRNG keys, but update it afterwards inside # the main pmap"d training update for performance. dropout_rngs = rng logging.info("Starting training loop.") hooks = [] report_progress = periodic_actions.ReportProgress( num_train_steps=config.num_train_steps, writer=writer ) if jax.process_index() == 0: hooks += [ report_progress, periodic_actions.Profile(logdir=workdir, num_profile_steps=5), ] train_metrics = [] with metric_writers.ensure_flushes(writer): for step in range(start_step, config.num_train_steps): is_last_step = step == config.num_train_steps - 1 # Shard data to devices and do a training step. with jax.profiler.StepTraceAnnotation("train", step_num=step): batch = next(train_iter) batch = jax.tree_util.tree_map(lambda x: jnp.array(x), batch) state, metrics = jit_train_step( state, batch, train_config, learning_rate_fn, 0.0, dropout_rngs ) train_metrics.append(metrics) # Quick indication that training is happening. logging.log_first_n(logging.INFO, "Finished training step %d.", 5, step) for h in hooks: h(step) # Periodic metric handling. if step % config.eval_every_steps == 0 or is_last_step: with report_progress.timed("training_metrics"): logging.info("Gathering training metrics.") train_metrics = common_utils.stack_forest(train_metrics) lr = train_metrics.pop("learning_rate").mean() metrics_sums = jax.tree_util.tree_map(jnp.sum, train_metrics) denominator = metrics_sums.pop("denominator") summary = jax.tree_util.tree_map( lambda x: x / denominator, metrics_sums ) # pylint: disable=cell-var-from-loop summary["learning_rate"] = lr summary["perplexity"] = jnp.clip( jnp.exp(summary["loss"]), a_max=1.0e4 ) summary = {"train_" + k: v for k, v in summary.items()} writer.write_scalars(step, summary) train_metrics = [] with report_progress.timed("eval"): eval_results = evaluate( jit_eval_step=jit_eval_step, params=state.params, eval_ds=eval_ds, num_eval_steps=config.num_eval_steps, config=eval_config, ) # (clipped) perplexity after averaging log-perplexitie eval_results["perplexity"] = jnp.clip( jnp.exp(eval_results["loss"]), a_max=1.0e4 ) writer.write_scalars( step, {"eval_" + k: v for k, v in eval_results.items()} ) with report_progress.timed("generate_text"): exemplars = generate_prediction( jit_pred_step=jit_pred_step, params=state.params, tokenized_prompts=tokenized_prompts, eos_id=eos_id, inference_rng=inference_rng, decode_tokens=decode_tokens, config=config, predict_config=predict_config, ) writer.write_texts(step, {"samples": exemplars}) # Save a checkpoint on one host after every checkpoint_freq steps. save_checkpoint = ( step % config.checkpoint_every_steps == 0 or is_last_step ) if config.save_checkpoints and save_checkpoint: logging.info("Saving checkpoint step %d.", step) with report_progress.timed("checkpoint"): checkpoints.save_checkpoint_multiprocess(workdir, state, step)
Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint.
22,532
from typing import Callable, Any, Optional from flax import linen as nn from flax import struct from jax import lax import jax.numpy as jnp import numpy as np def shift_right(x, axis=1): """Shift the input to the right by padding and slicing on axis.""" pad_widths = [(0, 0)] * len(x.shape) pad_widths[axis] = (1, 0) padded = jnp.pad( x, pad_widths, mode='constant', constant_values=x.dtype.type(0) ) return lax.dynamic_slice_in_dim(padded, 0, padded.shape[axis] - 1, axis) The provided code snippet includes necessary dependencies for implementing the `shift_inputs` function. Write a Python function `def shift_inputs(x, segment_ids=None, axis=1)` to solve the following problem: Shift inputs and replace EOS by 0 for packed inputs. Here is the function: def shift_inputs(x, segment_ids=None, axis=1): """Shift inputs and replace EOS by 0 for packed inputs.""" shifted = shift_right(x, axis=axis) # For packed targets, the first shifted token of a new sequence is made # 0, rather than being the EOS token for the last sequence. if segment_ids is not None: shifted *= segment_ids == shift_right(segment_ids, axis=axis) return shifted
Shift inputs and replace EOS by 0 for packed inputs.
22,533
from typing import Callable, Any, Optional from flax import linen as nn from flax import struct from jax import lax import jax.numpy as jnp import numpy as np The provided code snippet includes necessary dependencies for implementing the `sinusoidal_init` function. Write a Python function `def sinusoidal_init(max_len=2048, min_scale=1.0, max_scale=10000.0)` to solve the following problem: 1D Sinusoidal Position Embedding Initializer. Args: max_len: maximum possible length for the input. min_scale: float: minimum frequency-scale in sine grating. max_scale: float: maximum frequency-scale in sine grating. Returns: output: init function returning `(1, max_len, d_feature)` Here is the function: def sinusoidal_init(max_len=2048, min_scale=1.0, max_scale=10000.0): """1D Sinusoidal Position Embedding Initializer. Args: max_len: maximum possible length for the input. min_scale: float: minimum frequency-scale in sine grating. max_scale: float: maximum frequency-scale in sine grating. Returns: output: init function returning `(1, max_len, d_feature)` """ def init(key, shape, dtype=np.float32): """Sinusoidal init.""" del key, dtype d_feature = shape[-1] pe = np.zeros((max_len, d_feature), dtype=np.float32) position = np.arange(0, max_len)[:, np.newaxis] scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1) div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor) pe[:, : d_feature // 2] = np.sin(position * div_term) pe[:, d_feature // 2 : 2 * (d_feature // 2)] = np.cos(position * div_term) pe = pe[np.newaxis, :, :] # [1, max_len, d_feature] return jnp.array(pe) return init
1D Sinusoidal Position Embedding Initializer. Args: max_len: maximum possible length for the input. min_scale: float: minimum frequency-scale in sine grating. max_scale: float: maximum frequency-scale in sine grating. Returns: output: init function returning `(1, max_len, d_feature)`
22,534
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() # Path to load or store sentencepiece vocab file. config.vocab_path = None # Vocabulary size if `vocab_path` is not given. config.vocab_size = 30_000 config.max_corpus_chars = 10**7 # Name of TFDS translation dataset to use. config.dataset_name = 'lm1b' # Optional name of TFDS translation dataset to use for evaluation. config.eval_dataset_name = 'lm1b' config.eval_split = 'test' # Per device batch size for training. config.per_device_batch_size = 32 # Per device batch size for training. config.eval_per_device_batch_size = 32 # Sampling temperature for language model inference. config.sampling_temperature = 0.6 # Top k cutoff for logit sampling. If 0 then no top-k cutoff is used. config.sampling_top_k = 20 config.num_train_steps = 500_000 # Number of steps to take during evaluation. Large enough to evaluate all. # Large enough to evaluate all samples: 306_688 / (32 * 8) = 1198 config.num_eval_steps = 2_000 # Number of steps to generate predictions. # -1 will use the whole eval dataset. config.num_predict_steps = -1 # Base learning rate. config.learning_rate = 0.0016 # Linear learning rate warmup. config.warmup_steps = 1000 # Cross entropy loss label smoothing. config.label_smoothing = 0.0 # Decay factor for AdamW style weight decay. config.weight_decay = 0.1 # Maximum length cutoff for training examples. config.max_target_length = 128 # Maximum length cutoff for eval examples. config.max_eval_target_length = 512 # Maximum length cutoff for predicted tokens. config.max_predict_length = 50 # Final logit transform uses embedding matrix transpose. config.logits_via_embedding = False # Number of transformer layers. config.num_layers = 6 # Size of query/key/value for attention. config.qkv_dim = 512 # Size of embeddings. config.emb_dim = 512 # Size of the MLP. config.mlp_dim = 2048 # Number of attention heads. config.num_heads = 8 # Dropout rate. config.dropout_rate = 0.1 # Attention dropout rate. config.attention_dropout_rate = 0.1 # Whether to save model checkpoints. config.save_checkpoints = True # Whether to restore from existing model checkpoints. config.restore_checkpoints = True # Save a checkpoint every these number of steps. config.checkpoint_every_steps = 10_000 # Frequency of eval during training, e.g. every 1_000 steps. config.eval_every_steps = 1_000 # Use bfloat16 mixed precision training instead of float32. config.use_bfloat16 = True # Integer for PRNG random seed. config.seed = 0 # Prompt for language model sampling, # taken from MaxText (https://github.com/google/maxtext/blob/main/MaxText/configs/base.yml). config.prompts = 'I love to ' # Parallelism config.mesh_axes = ['data', 'fsdp', 'tensor'] config.logical_axis_rules = [ ['activation_batch', ['data', 'fsdp']], ['activation_length', ['data', 'fsdp']], ['activation_embed', 'tensor'], ['activation_mlp', 'tensor'], ['activation_heads', 'tensor'], ['activation_kv', 'tensor'], ['activation_vocab', 'tensor'], ['mlp', 'tensor'], ['vocab', 'tensor'], ['embed', 'fsdp'], ['heads', 'tensor'], ] config.data_sharding = ['data'] # One axis for each parallelism type may hold a placeholder (-1) # value to auto-shard based on available slices and devices. # By default, product of the DCN axes should equal number of slices # and product of the ICI axes should equal number of devices per slice. # ICI (Inter-Chip Interconnection): A high-speed connection between # sets of TPU chips, which form the TPU network. # DCN (Data Center Network): A connection between the TPU networks; # not as fast as ICI. # ICI has around 100x the bandwidth of DCN, but it is not a general # purpose connection, which is why DCN is necessary for scaling to # extremely large ML models. config.dcn_data_parallelism = -1 # recommended DCN axis to be auto-sharded config.dcn_fsdp_parallelism = 1 config.dcn_tensor_parallelism = 1 config.ici_data_parallelism = 1 config.ici_fsdp_parallelism = -1 # recommended ICI axis to be auto-sharded config.ici_tensor_parallelism = 1 return config
Get the default hyperparameter configuration.
22,535
import functools from typing import Any, Dict, Tuple from absl import app from absl import flags from absl import logging from clu import metric_writers from flax import linen as nn from flax.training import train_state import jax import jax.numpy as jnp import optax import models from input_pipeline import CharacterTable as CTable from input_pipeline import get_sequence_lengths from input_pipeline import mask_sequences FLAGS = flags.FLAGS def get_train_state(rng: PRNGKey, ctable: CTable) -> train_state.TrainState: """Returns a train state.""" model = get_model(ctable) params = get_initial_params(model, rng, ctable) tx = optax.adam(FLAGS.learning_rate) state = train_state.TrainState.create( apply_fn=model.apply, params=params, tx=tx ) return state def train_step( state: train_state.TrainState, batch: Array, lstm_rng: PRNGKey, eos_id: int ) -> Tuple[train_state.TrainState, Dict[str, jax.Array]]: """Trains one step.""" labels = batch['answer'][:, 1:] lstm_key = jax.random.fold_in(lstm_rng, state.step) def loss_fn(params): logits, _ = state.apply_fn( {'params': params}, batch['query'], batch['answer'], rngs={'lstm': lstm_key}, ) loss = cross_entropy_loss( logits, labels, get_sequence_lengths(labels, eos_id) ) return loss, logits grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grads = grad_fn(state.params) state = state.apply_gradients(grads=grads) metrics = compute_metrics(logits, labels, eos_id) return state, metrics def decode_batch( state: train_state.TrainState, batch: Dict[str, Array], decode_rng: PRNGKey, ctable: CTable, ): """Decodes and log results for a batch.""" inputs, outputs = batch['query'], batch['answer'][:, 1:] decode_rng = jax.random.fold_in(decode_rng, state.step) inferred = decode(state.params, inputs, decode_rng, ctable) questions = ctable.decode_onehot(inputs) infers = ctable.decode_onehot(inferred) goldens = ctable.decode_onehot(outputs) for question, inferred, golden in zip(questions, infers, goldens): log_decode(question, inferred, golden) The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate(workdir: str) -> train_state.TrainState` to solve the following problem: Trains for a fixed number of steps and decode during training. Here is the function: def train_and_evaluate(workdir: str) -> train_state.TrainState: """Trains for a fixed number of steps and decode during training.""" # TODO(marcvanzee): Integrate ctable with train_state. ctable = CTable('0123456789+= ', FLAGS.max_len_query_digit) rng = jax.random.key(0) state = get_train_state(rng, ctable) writer = metric_writers.create_default_writer(workdir) for step in range(FLAGS.num_train_steps): batch = ctable.get_batch(FLAGS.batch_size) state, metrics = train_step(state, batch, rng, ctable.eos_id) if step and step % FLAGS.decode_frequency == 0: writer.write_scalars(step, metrics) batch = ctable.get_batch(5) decode_batch(state, batch, rng, ctable) return state
Trains for a fixed number of steps and decode during training.
22,536
import os from typing import Any, Dict, Iterable, Tuple, Optional from absl import logging from clu import checkpoint from clu import metric_writers from clu import metrics from clu import parameter_overview from clu import periodic_actions import flax import flax.core import flax.linen as nn from flax.training import train_state import jax import jax.numpy as jnp import jraph import ml_collections import numpy as np import optax import sklearn.metrics import tensorflow as tf import input_pipeline import models The provided code snippet includes necessary dependencies for implementing the `predictions_match_labels` function. Write a Python function `def predictions_match_labels( *, logits: jnp.ndarray, labels: jnp.ndarray, **kwargs ) -> jnp.ndarray` to solve the following problem: Returns a binary array indicating where predictions match the labels. Here is the function: def predictions_match_labels( *, logits: jnp.ndarray, labels: jnp.ndarray, **kwargs ) -> jnp.ndarray: """Returns a binary array indicating where predictions match the labels.""" del kwargs # Unused. preds = logits > 0 return (preds == labels).astype(jnp.float32)
Returns a binary array indicating where predictions match the labels.
22,537
import os from typing import Any, Dict, Iterable, Tuple, Optional from absl import logging from clu import checkpoint from clu import metric_writers from clu import metrics from clu import parameter_overview from clu import periodic_actions import flax import flax.core import flax.linen as nn from flax.training import train_state import jax import jax.numpy as jnp import jraph import ml_collections import numpy as np import optax import sklearn.metrics import tensorflow as tf import input_pipeline import models def create_model( config: ml_collections.ConfigDict, deterministic: bool ) -> nn.Module: """Creates a Flax model, as specified by the config.""" if config.model == 'GraphNet': return models.GraphNet( latent_size=config.latent_size, num_mlp_layers=config.num_mlp_layers, message_passing_steps=config.message_passing_steps, output_globals_size=config.num_classes, dropout_rate=config.dropout_rate, skip_connections=config.skip_connections, layer_norm=config.layer_norm, use_edge_model=config.use_edge_model, deterministic=deterministic, ) if config.model == 'GraphConvNet': return models.GraphConvNet( latent_size=config.latent_size, num_mlp_layers=config.num_mlp_layers, message_passing_steps=config.message_passing_steps, output_globals_size=config.num_classes, dropout_rate=config.dropout_rate, skip_connections=config.skip_connections, layer_norm=config.layer_norm, deterministic=deterministic, ) raise ValueError(f'Unsupported model: {config.model}.') def create_optimizer( config: ml_collections.ConfigDict, ) -> optax.GradientTransformation: """Creates an optimizer, as specified by the config.""" if config.optimizer == 'adam': return optax.adam(learning_rate=config.learning_rate) if config.optimizer == 'sgd': return optax.sgd( learning_rate=config.learning_rate, momentum=config.momentum ) raise ValueError(f'Unsupported optimizer: {config.optimizer}.') def add_prefix_to_keys(result: Dict[str, Any], prefix: str) -> Dict[str, Any]: """Adds a prefix to the keys of a dict, returning a new dict.""" return {f'{prefix}_{key}': val for key, val in result.items()} def replace_globals(graphs: jraph.GraphsTuple) -> jraph.GraphsTuple: """Replaces the globals attribute with a constant feature for each graph.""" return graphs._replace(globals=jnp.ones([graphs.n_node.shape[0], 1])) def train_step( state: train_state.TrainState, graphs: jraph.GraphsTuple, rngs: Dict[str, jnp.ndarray], ) -> Tuple[train_state.TrainState, metrics.Collection]: """Performs one update step over the current batch of graphs.""" def loss_fn(params, graphs): curr_state = state.replace(params=params) # Extract labels. labels = graphs.globals # Replace the global feature for graph classification. graphs = replace_globals(graphs) # Compute logits and resulting loss. logits = get_predicted_logits(curr_state, graphs, rngs) mask = get_valid_mask(labels, graphs) loss = binary_cross_entropy_with_mask( logits=logits, labels=labels, mask=mask ) mean_loss = jnp.sum(jnp.where(mask, loss, 0)) / jnp.sum(mask) return mean_loss, (loss, logits, labels, mask) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, (loss, logits, labels, mask)), grads = grad_fn(state.params, graphs) state = state.apply_gradients(grads=grads) metrics_update = TrainMetrics.single_from_model_output( loss=loss, logits=logits, labels=labels, mask=mask ) return state, metrics_update def evaluate_model( state: train_state.TrainState, datasets: Dict[str, tf.data.Dataset], splits: Iterable[str], ) -> Dict[str, metrics.Collection]: """Evaluates the model on metrics over the specified splits.""" # Loop over each split independently. eval_metrics = {} for split in splits: split_metrics = None # Loop over graphs. for graphs in datasets[split].as_numpy_iterator(): split_metrics_update = evaluate_step(state, graphs) # Update metrics. if split_metrics is None: split_metrics = split_metrics_update else: split_metrics = split_metrics.merge(split_metrics_update) eval_metrics[split] = split_metrics return eval_metrics # pytype: disable=bad-return-type The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> train_state.TrainState` to solve the following problem: Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the TensorBoard summaries are written to. Returns: The train state (which includes the `.params`). Here is the function: def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> train_state.TrainState: """Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the TensorBoard summaries are written to. Returns: The train state (which includes the `.params`). """ # We only support single-host training. assert jax.process_count() == 1 # Create writer for logs. writer = metric_writers.create_default_writer(workdir) writer.write_hparams(config.to_dict()) # Get datasets, organized by split. logging.info('Obtaining datasets.') datasets = input_pipeline.get_datasets( config.batch_size, add_virtual_node=config.add_virtual_node, add_undirected_edges=config.add_undirected_edges, add_self_loops=config.add_self_loops, ) train_iter = iter(datasets['train']) # Create and initialize the network. logging.info('Initializing network.') rng = jax.random.key(0) rng, init_rng = jax.random.split(rng) init_graphs = next(datasets['train'].as_numpy_iterator()) init_graphs = replace_globals(init_graphs) init_net = create_model(config, deterministic=True) params = jax.jit(init_net.init)(init_rng, init_graphs) parameter_overview.log_parameter_overview(params) # Create the optimizer. tx = create_optimizer(config) # Create the training state. net = create_model(config, deterministic=False) state = train_state.TrainState.create( apply_fn=net.apply, params=params, tx=tx ) # Set up checkpointing of the model. # The input pipeline cannot be checkpointed in its current form, # due to the use of stateful operations. checkpoint_dir = os.path.join(workdir, 'checkpoints') ckpt = checkpoint.Checkpoint(checkpoint_dir, max_to_keep=2) state = ckpt.restore_or_initialize(state) initial_step = int(state.step) + 1 # Create the evaluation state, corresponding to a deterministic model. eval_net = create_model(config, deterministic=True) eval_state = state.replace(apply_fn=eval_net.apply) # Hooks called periodically during training. report_progress = periodic_actions.ReportProgress( num_train_steps=config.num_train_steps, writer=writer ) profiler = periodic_actions.Profile(num_profile_steps=5, logdir=workdir) hooks = [report_progress, profiler] # Begin training loop. logging.info('Starting training.') train_metrics = None for step in range(initial_step, config.num_train_steps + 1): # Split PRNG key, to ensure different 'randomness' for every step. rng, dropout_rng = jax.random.split(rng) # Perform one step of training. with jax.profiler.StepTraceAnnotation('train', step_num=step): graphs = jax.tree_util.tree_map(np.asarray, next(train_iter)) state, metrics_update = train_step( state, graphs, rngs={'dropout': dropout_rng} ) # Update metrics. if train_metrics is None: train_metrics = metrics_update else: train_metrics = train_metrics.merge(metrics_update) # Quick indication that training is happening. logging.log_first_n(logging.INFO, 'Finished training step %d.', 10, step) for hook in hooks: hook(step) # Log, if required. is_last_step = step == config.num_train_steps - 1 if step % config.log_every_steps == 0 or is_last_step: writer.write_scalars( step, add_prefix_to_keys(train_metrics.compute(), 'train') ) train_metrics = None # Evaluate on validation and test splits, if required. if step % config.eval_every_steps == 0 or is_last_step: eval_state = eval_state.replace(params=state.params) splits = ['validation', 'test'] with report_progress.timed('eval'): eval_metrics = evaluate_model(eval_state, datasets, splits=splits) for split in splits: writer.write_scalars( step, add_prefix_to_keys(eval_metrics[split].compute(), split) ) # Checkpoint model, if required. if step % config.checkpoint_every_steps == 0 or is_last_step: with report_progress.timed('checkpoint'): ckpt.save(state) return state
Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the TensorBoard summaries are written to. Returns: The train state (which includes the `.params`).
22,538
from typing import Callable, Sequence from flax import linen as nn import jax.numpy as jnp import jraph The provided code snippet includes necessary dependencies for implementing the `add_graphs_tuples` function. Write a Python function `def add_graphs_tuples( graphs: jraph.GraphsTuple, other_graphs: jraph.GraphsTuple ) -> jraph.GraphsTuple` to solve the following problem: Adds the nodes, edges and global features from other_graphs to graphs. Here is the function: def add_graphs_tuples( graphs: jraph.GraphsTuple, other_graphs: jraph.GraphsTuple ) -> jraph.GraphsTuple: """Adds the nodes, edges and global features from other_graphs to graphs.""" return graphs._replace( nodes=graphs.nodes + other_graphs.nodes, edges=graphs.edges + other_graphs.edges, globals=graphs.globals + other_graphs.globals, )
Adds the nodes, edges and global features from other_graphs to graphs.
22,539
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() # Optimizer. config.optimizer = 'adam' config.learning_rate = 1e-3 # Training hyperparameters. config.batch_size = 256 config.num_train_steps = 500_000 config.log_every_steps = 50 config.eval_every_steps = 1_000 config.checkpoint_every_steps = 10_000 config.add_virtual_node = True config.add_undirected_edges = True config.add_self_loops = True # GNN hyperparameters. config.model = 'GraphConvNet' config.message_passing_steps = 5 config.latent_size = 256 config.dropout_rate = 0.1 config.num_mlp_layers = 2 config.num_classes = 128 config.skip_connections = True config.layer_norm = True return config
Get the default hyperparameter configuration.
22,540
import ml_collections def sweep(add): for add_virtual_node in (True, False): for add_undirected_edges in (True, False): for add_self_loops in (True, False): for layer_norm in (True, False): for skip_connections in (True, False): add( add_virtual_node=add_virtual_node, add_undirected_edges=add_undirected_edges, add_self_loops=add_self_loops, layer_norm=layer_norm, skip_connections=skip_connections, )
null
22,541
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the hyperparameter configuration for the GraphNetwork model. Here is the function: def get_config(): """Get the hyperparameter configuration for the GraphNetwork model.""" config = ml_collections.ConfigDict() # Optimizer. config.optimizer = 'adam' config.learning_rate = 1e-3 # Training hyperparameters. config.batch_size = 256 config.num_train_steps = 100_000 config.log_every_steps = 100 config.eval_every_steps = 10_000 config.checkpoint_every_steps = 10_000 config.add_virtual_node = True config.add_undirected_edges = True config.add_self_loops = True # GNN hyperparameters. config.model = 'GraphNet' config.message_passing_steps = 5 config.latent_size = 256 config.dropout_rate = 0.1 config.num_mlp_layers = 1 config.num_classes = 128 config.use_edge_model = True config.skip_connections = True config.layer_norm = True return config
Get the hyperparameter configuration for the GraphNetwork model.
22,542
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() # Optimizer. config.optimizer = 'adam' config.learning_rate = 1e-3 # Training hyperparameters. config.batch_size = 256 config.num_train_steps = 100_000 config.log_every_steps = 100 config.eval_every_steps = 1_000 config.checkpoint_every_steps = 10_000 config.add_virtual_node = False config.add_undirected_edges = True config.add_self_loops = True # GNN hyperparameters. config.model = 'GraphConvNet' config.message_passing_steps = 5 config.latent_size = 256 config.dropout_rate = 0.1 config.num_mlp_layers = 2 config.num_classes = 128 config.skip_connections = True config.layer_norm = True return config
Get the default hyperparameter configuration.
22,543
import collections import gymnasium as gym import numpy as np import seed_rl_atari_preprocessing The provided code snippet includes necessary dependencies for implementing the `get_num_actions` function. Write a Python function `def get_num_actions(game: str)` to solve the following problem: Get the number of possible actions of a given Atari game. This determines the number of outputs in the actor part of the actor-critic model. Here is the function: def get_num_actions(game: str): """Get the number of possible actions of a given Atari game. This determines the number of outputs in the actor part of the actor-critic model. """ env = gym.make(game) return env.action_space.n
Get the number of possible actions of a given Atari game. This determines the number of outputs in the actor part of the actor-critic model.
22,544
import functools from typing import Any, Callable from absl import logging import flax from flax import linen as nn import agent import models import test_episodes from flax.metrics import tensorboard from flax.training import checkpoints from flax.training import train_state import jax import jax.numpy as jnp import ml_collections import numpy as np import optax def train_step( state: train_state.TrainState, trajectories: tuple, batch_size: int, *, clip_param: float, vf_coeff: float, entropy_coeff: float, ): """Compilable train step. Runs an entire epoch of training (i.e. the loop over minibatches within an epoch is included here for performance reasons). Args: state: the train state trajectories: tuple of the following five elements forming the experience: states: shape (steps_per_agent*num_agents, 84, 84, 4) actions: shape (steps_per_agent*num_agents, 84, 84, 4) old_log_probs: shape (steps_per_agent*num_agents, ) returns: shape (steps_per_agent*num_agents, ) advantages: (steps_per_agent*num_agents, ) batch_size: the minibatch size, static argument clip_param: the PPO clipping parameter used to clamp ratios in loss function vf_coeff: weighs value function loss in total loss entropy_coeff: weighs entropy bonus in the total loss Returns: optimizer: new optimizer after the parameters update loss: loss summed over training steps """ iterations = trajectories[0].shape[0] // batch_size trajectories = jax.tree_util.tree_map( lambda x: x.reshape((iterations, batch_size) + x.shape[1:]), trajectories ) loss = 0.0 for batch in zip(*trajectories): grad_fn = jax.value_and_grad(loss_fn) l, grads = grad_fn( state.params, state.apply_fn, batch, clip_param, vf_coeff, entropy_coeff ) loss += l state = state.apply_gradients(grads=grads) return state, loss def get_experience( state: train_state.TrainState, simulators: list[agent.RemoteSimulator], steps_per_actor: int, ): """Collect experience from agents. Runs `steps_per_actor` time steps of the game for each of the `simulators`. """ all_experience = [] # Range up to steps_per_actor + 1 to get one more value needed for GAE. for _ in range(steps_per_actor + 1): sim_states = [] for sim in simulators: sim_state = sim.conn.recv() sim_states.append(sim_state) sim_states = np.concatenate(sim_states, axis=0) log_probs, values = agent.policy_action( state.apply_fn, state.params, sim_states ) log_probs, values = jax.device_get((log_probs, values)) probs = np.exp(np.array(log_probs)) for i, sim in enumerate(simulators): probabilities = probs[i] action = np.random.choice(probs.shape[1], p=probabilities) sim.conn.send(action) experiences = [] for i, sim in enumerate(simulators): sim_state, action, reward, done = sim.conn.recv() value = values[i, 0] log_prob = log_probs[i][action] sample = agent.ExpTuple(sim_state, action, reward, value, log_prob, done) experiences.append(sample) all_experience.append(experiences) return all_experience def process_experience( experience: list[list[agent.ExpTuple]], actor_steps: int, num_agents: int, gamma: float, lambda_: float, ): """Process experience for training, including advantage estimation. Args: experience: collected from agents in the form of nested lists/namedtuple actor_steps: number of steps each agent has completed num_agents: number of agents that collected experience gamma: dicount parameter lambda_: GAE parameter Returns: trajectories: trajectories readily accessible for `train_step()` function """ obs_shape = (84, 84, 4) exp_dims = (actor_steps, num_agents) values_dims = (actor_steps + 1, num_agents) states = np.zeros(exp_dims + obs_shape, dtype=np.float32) actions = np.zeros(exp_dims, dtype=np.int32) rewards = np.zeros(exp_dims, dtype=np.float32) values = np.zeros(values_dims, dtype=np.float32) log_probs = np.zeros(exp_dims, dtype=np.float32) dones = np.zeros(exp_dims, dtype=np.float32) for t in range(len(experience) - 1): # experience[-1] only for next_values for agent_id, exp_agent in enumerate(experience[t]): states[t, agent_id, ...] = exp_agent.state actions[t, agent_id] = exp_agent.action rewards[t, agent_id] = exp_agent.reward values[t, agent_id] = exp_agent.value log_probs[t, agent_id] = exp_agent.log_prob # Dones need to be 0 for terminal states. dones[t, agent_id] = float(not exp_agent.done) for a in range(num_agents): values[-1, a] = experience[-1][a].value advantages = gae_advantages(rewards, dones, values, gamma, lambda_) returns = advantages + values[:-1, :] # After preprocessing, concatenate data from all agents. trajectories = (states, actions, log_probs, returns, advantages) trajectory_len = num_agents * actor_steps trajectories = tuple( map( lambda x: np.reshape(x, (trajectory_len,) + x.shape[2:]), trajectories ) ) return trajectories def get_initial_params(key: jax.Array, model: nn.Module): input_dims = (1, 84, 84, 4) # (minibatch, height, width, stacked frames) init_shape = jnp.ones(input_dims, jnp.float32) initial_params = model.init(key, init_shape)['params'] return initial_params def create_train_state( params, model: nn.Module, config: ml_collections.ConfigDict, train_steps: int, ) -> train_state.TrainState: if config.decaying_lr_and_clip_param: lr = optax.linear_schedule( init_value=config.learning_rate, end_value=0.0, transition_steps=train_steps, ) else: lr = config.learning_rate tx = optax.adam(lr) state = train_state.TrainState.create( apply_fn=model.apply, params=params, tx=tx ) return state from tensorboard.plugins.hparams import api as hparams_api The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( model: models.ActorCritic, config: ml_collections.ConfigDict, model_dir: str )` to solve the following problem: Main training loop. Args: model: the actor-critic model config: object holding hyperparameters and the training information model_dir: path to dictionary where checkpoints and logging info are stored Returns: optimizer: the trained optimizer Here is the function: def train( model: models.ActorCritic, config: ml_collections.ConfigDict, model_dir: str ): """Main training loop. Args: model: the actor-critic model config: object holding hyperparameters and the training information model_dir: path to dictionary where checkpoints and logging info are stored Returns: optimizer: the trained optimizer """ game = config.game + 'NoFrameskip-v4' simulators = [agent.RemoteSimulator(game) for _ in range(config.num_agents)] summary_writer = tensorboard.SummaryWriter(model_dir) summary_writer.hparams(dict(config)) loop_steps = config.total_frames // (config.num_agents * config.actor_steps) log_frequency = 40 checkpoint_frequency = 500 # train_step does multiple steps per call for better performance # compute number of steps per call here to convert between the number of # train steps and the inner number of optimizer steps iterations_per_step = ( config.num_agents * config.actor_steps // config.batch_size ) initial_params = get_initial_params(jax.random.key(0), model) state = create_train_state( initial_params, model, config, loop_steps * config.num_epochs * iterations_per_step, ) del initial_params state = checkpoints.restore_checkpoint(model_dir, state) # number of train iterations done by each train_step start_step = int(state.step) // config.num_epochs // iterations_per_step logging.info('Start training from step: %s', start_step) for step in range(start_step, loop_steps): # Bookkeeping and testing. if step % log_frequency == 0: score = test_episodes.policy_test(1, state.apply_fn, state.params, game) frames = step * config.num_agents * config.actor_steps summary_writer.scalar('game_score', score, frames) logging.info( 'Step %s:\nframes seen %s\nscore %s\n\n', step, frames, score ) # Core training code. alpha = ( 1.0 - step / loop_steps if config.decaying_lr_and_clip_param else 1.0 ) all_experiences = get_experience(state, simulators, config.actor_steps) trajectories = process_experience( all_experiences, config.actor_steps, config.num_agents, config.gamma, config.lambda_, ) clip_param = config.clip_param * alpha for _ in range(config.num_epochs): permutation = np.random.permutation( config.num_agents * config.actor_steps ) trajectories = tuple(x[permutation] for x in trajectories) state, _ = train_step( state, trajectories, config.batch_size, clip_param=clip_param, vf_coeff=config.vf_coeff, entropy_coeff=config.entropy_coeff, ) if (step + 1) % checkpoint_frequency == 0: checkpoints.save_checkpoint(model_dir, state, step + 1) return state
Main training loop. Args: model: the actor-critic model config: object holding hyperparameters and the training information model_dir: path to dictionary where checkpoints and logging info are stored Returns: optimizer: the trained optimizer
22,545
import collections import functools import multiprocessing from typing import Any, Callable import flax import jax import numpy as np import env_utils The provided code snippet includes necessary dependencies for implementing the `rcv_action_send_exp` function. Write a Python function `def rcv_action_send_exp(conn, game: str)` to solve the following problem: Run the remote agents. Receive action from the main learner, perform one step of simulation and send back collected experience. Here is the function: def rcv_action_send_exp(conn, game: str): """Run the remote agents. Receive action from the main learner, perform one step of simulation and send back collected experience. """ env = env_utils.create_env(game, clip_rewards=True) while True: obs = env.reset() done = False # Observations fetched from Atari env need additional batch dimension. state = obs[None, ...] while not done: conn.send(state) action = conn.recv() obs, reward, done, _ = env.step(action) next_state = obs[None, ...] if not done else None experience = (state, action, reward, done) conn.send(experience) if done: break state = next_state
Run the remote agents. Receive action from the main learner, perform one step of simulation and send back collected experience.
22,546
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default configuration. The default hyperparameters originate from PPO paper arXiv:1707.06347 and openAI baselines 2:: https://github.com/openai/baselines/blob/master/baselines/ppo2/defaults.py Here is the function: def get_config(): """Get the default configuration. The default hyperparameters originate from PPO paper arXiv:1707.06347 and openAI baselines 2:: https://github.com/openai/baselines/blob/master/baselines/ppo2/defaults.py """ config = ml_collections.ConfigDict() # The Atari game used. config.game = 'Pong' # Total number of frames seen during training. config.total_frames = 40000000 # The learning rate for the Adam optimizer. config.learning_rate = 2.5e-4 # Batch size used in training. config.batch_size = 256 # Number of agents playing in parallel. config.num_agents = 8 # Number of steps each agent performs in one policy unroll. config.actor_steps = 128 # Number of training epochs per each unroll of the policy. config.num_epochs = 3 # RL discount parameter. config.gamma = 0.99 # Generalized Advantage Estimation parameter. config.lambda_ = 0.95 # The PPO clipping parameter used to clamp ratios in loss function. config.clip_param = 0.1 # Weight of value function loss in the total loss. config.vf_coeff = 0.5 # Weight of entropy bonus in the total loss. config.entropy_coeff = 0.01 # Linearly decay learning rate and clipping parameter to zero during # the training. config.decaying_lr_and_clip_param = True return config
Get the default configuration. The default hyperparameters originate from PPO paper arXiv:1707.06347 and openAI baselines 2:: https://github.com/openai/baselines/blob/master/baselines/ppo2/defaults.py
22,547
import datetime import os import re import subprocess import time from typing import Sequence from absl import app from absl import flags FLAGS = flags.FLAGS timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') def generate_startup_file(vm_name: str) -> str: directory = os.path.dirname(os.path.abspath(__file__)) startup_script_src = os.path.join(directory, 'startup_script.sh') startup_script_dst = os.path.join(directory, f'{vm_name}-startup_script.sh') assert not os.path.exists(startup_script_dst) with open(startup_script_src, encoding='utf8') as f: startup_script_content = f.read() for from_str, to_str in ( ('__REPO__', FLAGS.repo), ('__BRANCH__', FLAGS.branch), ('__EXAMPLE__', FLAGS.example), ('__TIMESTAMP__', timestamp), ('__NAME__', FLAGS.name), ('__ARGS__', FLAGS.args), ('__GCS_WORKDIR_BASE__', FLAGS.gcs_workdir_base), ('__TFDS_DATA_DIR__', FLAGS.tfds_data_dir), ('__ACCELERATOR_TYPE__', FLAGS.accelerator_type), ('__SHUTDOWN_SECS__', str(FLAGS.shutdown_secs)), ): startup_script_content = startup_script_content.replace(from_str, to_str) with open(startup_script_dst, 'w', encoding='utf8') as f: f.write(startup_script_content) return startup_script_dst
null
22,548
import datetime import os import re import subprocess import time from typing import Sequence from absl import app from absl import flags FLAGS = flags.FLAGS def launch_gce(*, vm_name: str, startup_script: str): # Note : Use `gcloud compute images list --project ml-images` to get a list # of available VM images. args = [ 'gcloud', 'compute', 'instances', 'create', vm_name, f'--project={FLAGS.project}', f'--zone={FLAGS.zone}', '--image=c1-deeplearning-tf-2-10-cu113-v20221107-debian-10', '--image-project=ml-images', f'--machine-type={FLAGS.machine_type}', '--scopes=cloud-platform,storage-full', '--boot-disk-size=256GB', '--boot-disk-type=pd-ssd', '--metadata=install-nvidia-driver=True', f'--metadata-from-file=startup-script={startup_script}', ] if FLAGS.accelerator_type and FLAGS.accelerator_count: args.extend([ '--maintenance-policy=TERMINATE', f'--accelerator=type={FLAGS.accelerator_type},count={FLAGS.accelerator_count}', ]) if FLAGS.dry_run: print() print('Would run the following command without --dry-run:') print() print(' \\\n '.join(args)) print() return print() print('Creating instance on GCE... This will take some minutes...') print() result = subprocess.run(args) if result.returncode: raise RuntimeError('Could not create VM!')
null
22,549
import datetime import os import re import subprocess import time from typing import Sequence from absl import app from absl import flags FLAGS = flags.FLAGS def print_howto(login_args: Sequence[str]): print(f""" ############################################################################### ############################################################################### You can start/stop the instace via the web UI: https://console.cloud.google.com/compute/instances?project={FLAGS.project} Once the VM has started, you can login and connect to the training session: {' '.join(login_args)} Note that you can disconnect from the tmux session without stopping the training with the keystrokes 'CTRL-B A'. See "man tmux" for help about tmux. To observe the training via Tensorboard, simply run in your local computer: $ tensorboard --logdir={FLAGS.gcs_workdir_base} You can also browse the files at https://console.cloud.google.com/storage/browser/{FLAGS.gcs_workdir_base.replace('gs://', '')} ############################################################################### ############################################################################### """)
null
22,550
from absl import logging from flax import linen as nn from flax.metrics import tensorboard from flax.training import train_state import jax import jax.numpy as jnp import ml_collections import numpy as np import optax import tensorflow_datasets as tfds def apply_model(state, images, labels): """Computes gradients, loss and accuracy for a single batch.""" def loss_fn(params): logits = state.apply_fn({'params': params}, images) one_hot = jax.nn.one_hot(labels, 10) loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) return loss, logits grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, logits), grads = grad_fn(state.params) accuracy = jnp.mean(jnp.argmax(logits, -1) == labels) return grads, loss, accuracy def train_epoch(state, train_ds, batch_size, rng): """Train for a single epoch.""" train_ds_size = len(train_ds['image']) steps_per_epoch = train_ds_size // batch_size perms = jax.random.permutation(rng, len(train_ds['image'])) perms = perms[: steps_per_epoch * batch_size] # skip incomplete batch perms = perms.reshape((steps_per_epoch, batch_size)) epoch_loss = [] epoch_accuracy = [] for perm in perms: batch_images = train_ds['image'][perm, ...] batch_labels = train_ds['label'][perm, ...] grads, loss, accuracy = apply_model(state, batch_images, batch_labels) state = update_model(state, grads) epoch_loss.append(loss) epoch_accuracy.append(accuracy) train_loss = np.mean(epoch_loss) train_accuracy = np.mean(epoch_accuracy) return state, train_loss, train_accuracy def get_datasets(): """Load MNIST train and test datasets into memory.""" ds_builder = tfds.builder('mnist') ds_builder.download_and_prepare() train_ds = tfds.as_numpy(ds_builder.as_dataset(split='train', batch_size=-1)) test_ds = tfds.as_numpy(ds_builder.as_dataset(split='test', batch_size=-1)) train_ds['image'] = jnp.float32(train_ds['image']) / 255.0 test_ds['image'] = jnp.float32(test_ds['image']) / 255.0 return train_ds, test_ds def create_train_state(rng, config): """Creates initial `TrainState`.""" cnn = CNN() params = cnn.init(rng, jnp.ones([1, 28, 28, 1]))['params'] tx = optax.sgd(config.learning_rate, config.momentum) return train_state.TrainState.create(apply_fn=cnn.apply, params=params, tx=tx) from tensorboard.plugins.hparams import api as hparams_api The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> train_state.TrainState` to solve the following problem: Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: The train state (which includes the `.params`). Here is the function: def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> train_state.TrainState: """Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: The train state (which includes the `.params`). """ train_ds, test_ds = get_datasets() rng = jax.random.key(0) summary_writer = tensorboard.SummaryWriter(workdir) summary_writer.hparams(dict(config)) rng, init_rng = jax.random.split(rng) state = create_train_state(init_rng, config) for epoch in range(1, config.num_epochs + 1): rng, input_rng = jax.random.split(rng) state, train_loss, train_accuracy = train_epoch( state, train_ds, config.batch_size, input_rng ) _, test_loss, test_accuracy = apply_model( state, test_ds['image'], test_ds['label'] ) logging.info( 'epoch:% 3d, train_loss: %.4f, train_accuracy: %.2f, test_loss: %.4f,' ' test_accuracy: %.2f' % ( epoch, train_loss, train_accuracy * 100, test_loss, test_accuracy * 100, ) ) summary_writer.scalar('train_loss', train_loss, epoch) summary_writer.scalar('train_accuracy', train_accuracy, epoch) summary_writer.scalar('test_loss', test_loss, epoch) summary_writer.scalar('test_accuracy', test_accuracy, epoch) summary_writer.flush() return state
Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: The train state (which includes the `.params`).
22,551
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() config.learning_rate = 0.1 config.momentum = 0.9 config.batch_size = 128 config.num_epochs = 10 return config
Get the default hyperparameter configuration.
22,552
import ml_collections def metrics(): return []
null
22,553
from typing import Any, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union from absl import logging from flax import struct from flax.metrics import tensorboard from flax.training import train_state import jax import jax.numpy as jnp import ml_collections import numpy as np import optax import tensorflow as tf import input_pipeline import models TrainState = train_state.TrainState def create_train_state(rng, config: ml_collections.ConfigDict, model): """Create initial training state.""" params = get_initial_params(rng, model) tx = optax.chain( optax.sgd(learning_rate=config.learning_rate, momentum=config.momentum), optax.add_decayed_weights(weight_decay=config.weight_decay), ) state = TrainState.create(apply_fn=model.apply, params=params, tx=tx) return state def model_from_config(config: ml_collections.ConfigDict): """Builds a text classification model from a config.""" model = models.TextClassifier( embedding_size=config.embedding_size, hidden_size=config.hidden_size, vocab_size=config.vocab_size, output_size=config.output_size, dropout_rate=config.dropout_rate, word_dropout_rate=config.word_dropout_rate, unk_idx=config.unk_idx, ) return model def train_step( state: TrainState, batch: Dict[str, Array], rngs: Dict[str, Any], ) -> Tuple[TrainState, Metrics]: """Train for a single step.""" # Make sure to get a new RNG at every step. step = state.step rngs = {name: jax.random.fold_in(rng, step) for name, rng in rngs.items()} def loss_fn(params): variables = {'params': params} logits = state.apply_fn( variables, batch['token_ids'], batch['length'], deterministic=False, rngs=rngs, ) labels = batch['label'] if labels.ndim == 1: labels = jnp.expand_dims(labels, 1) loss = jnp.mean( sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) ) return loss, logits grad_fn = jax.value_and_grad(loss_fn, has_aux=True) value, grads = grad_fn(state.params) (_, logits) = value new_state = state.apply_gradients(grads=grads) metrics = compute_metrics(labels=batch['label'], logits=logits) return new_state, metrics def eval_step( state: TrainState, batch: Dict[str, Array], rngs: Dict[str, Any] ) -> Metrics: """Evaluate for a single step. Model should be in deterministic mode.""" variables = {'params': state.params} logits = state.apply_fn( variables, batch['token_ids'], batch['length'], deterministic=True, rngs=rngs, ) metrics = compute_metrics(labels=batch['label'], logits=logits) return metrics def evaluate_model( eval_step_fn: Callable[..., Any], state: TrainState, batches: Union[Iterable[Example], tf.data.Dataset], epoch: int, rngs: Optional[Dict[str, Any]] = None, ) -> Metrics: """Evaluate a model on a dataset.""" batch_metrics = [] for i, batch in enumerate(batches): batch = batch_to_numpy(batch) if rngs is not None: # New RNG for each step. rngs = {name: jax.random.fold_in(rng, i) for name, rng in rngs.items()} metrics = eval_step_fn(state, batch, rngs) batch_metrics.append(metrics) batch_metrics = jax.device_get(batch_metrics) metrics = normalize_batch_metrics(batch_metrics) logging.info( 'eval epoch %03d loss %.4f accuracy %.2f', epoch, metrics.loss, metrics.accuracy * 100, ) return metrics def train_epoch( train_step_fn: Callable[..., Tuple[TrainState, Metrics]], state: TrainState, train_batches: tf.data.Dataset, epoch: int, rngs: Optional[Dict[str, Any]] = None, ) -> Tuple[TrainState, Metrics]: """Train for a single epoch.""" batch_metrics = [] for batch in train_batches: batch = batch_to_numpy(batch) state, metrics = train_step_fn(state, batch, rngs) batch_metrics.append(metrics) # Compute the metrics for this epoch. batch_metrics = jax.device_get(batch_metrics) metrics = normalize_batch_metrics(batch_metrics) logging.info( 'train epoch %03d loss %.4f accuracy %.2f', epoch, metrics.loss, metrics.accuracy * 100, ) return state, metrics from tensorboard.plugins.hparams import api as hparams_api The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> TrainState` to solve the following problem: Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: The final train state that includes the trained parameters. Here is the function: def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> TrainState: """Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: The final train state that includes the trained parameters. """ # Prepare datasets. train_dataset = input_pipeline.TextDataset( tfds_name='glue/sst2', split='train' ) eval_dataset = input_pipeline.TextDataset( tfds_name='glue/sst2', split='validation' ) train_batches = train_dataset.get_bucketed_batches( config.batch_size, config.bucket_size, max_input_length=config.max_input_length, drop_remainder=True, shuffle=True, shuffle_seed=config.seed, ) eval_batches = eval_dataset.get_batches(batch_size=config.batch_size) # Keep track of vocab size in the config so that the embedder knows it. config.vocab_size = len(train_dataset.vocab) # Compile step functions. train_step_fn = jax.jit(train_step) eval_step_fn = jax.jit(eval_step) # Create model and a state that contains the parameters. rng = jax.random.key(config.seed) model = model_from_config(config) state = create_train_state(rng, config, model) summary_writer = tensorboard.SummaryWriter(workdir) summary_writer.hparams(dict(config)) # Main training loop. logging.info('Starting training...') for epoch in range(1, config.num_epochs + 1): # Train for one epoch. rng, epoch_rng = jax.random.split(rng) rngs = {'dropout': epoch_rng} state, train_metrics = train_epoch( train_step_fn, state, train_batches, epoch, rngs ) # Evaluate current model on the validation data. eval_metrics = evaluate_model(eval_step_fn, state, eval_batches, epoch) # Write metrics to TensorBoard. summary_writer.scalar('train_loss', train_metrics.loss, epoch) summary_writer.scalar('train_accuracy', train_metrics.accuracy * 100, epoch) summary_writer.scalar('eval_loss', eval_metrics.loss, epoch) summary_writer.scalar('eval_accuracy', eval_metrics.accuracy * 100, epoch) summary_writer.flush() return state
Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: The final train state that includes the trained parameters.
22,554
import time from typing import Iterable, Sequence from absl import logging import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_text as tftext import vocabulary The provided code snippet includes necessary dependencies for implementing the `get_tokenized_sequences` function. Write a Python function `def get_tokenized_sequences( dataset: tf.data.Dataset, tokenizer: tftext.Tokenizer = tftext.WhitespaceTokenizer(), input_key: str = 'sentence', ) -> Iterable[Sequence[bytes]]` to solve the following problem: Returns tokenized sequences for vocabulary building. Here is the function: def get_tokenized_sequences( dataset: tf.data.Dataset, tokenizer: tftext.Tokenizer = tftext.WhitespaceTokenizer(), input_key: str = 'sentence', ) -> Iterable[Sequence[bytes]]: """Returns tokenized sequences for vocabulary building.""" dataset = dataset.map( lambda example: tokenizer.tokenize(example[input_key]), num_parallel_calls=tf.data.experimental.AUTOTUNE, ) yield from tfds.as_numpy(dataset)
Returns tokenized sequences for vocabulary building.
22,555
import functools from typing import Any, Callable, Optional from flax import linen as nn import jax from jax import numpy as jnp Array = jnp.ndarray The provided code snippet includes necessary dependencies for implementing the `sequence_mask` function. Write a Python function `def sequence_mask(lengths: Array, max_length: int) -> Array` to solve the following problem: Computes a boolean mask over sequence positions for each given length. Example: ``` sequence_mask([1, 2], 3) [[True, False, False], [True, True, False]] ``` Args: lengths: The length of each sequence. <int>[batch_size] max_length: The width of the boolean mask. Must be >= max(lengths). Returns: A mask with shape: <bool>[batch_size, max_length] indicating which positions are valid for each sequence. Here is the function: def sequence_mask(lengths: Array, max_length: int) -> Array: """Computes a boolean mask over sequence positions for each given length. Example: ``` sequence_mask([1, 2], 3) [[True, False, False], [True, True, False]] ``` Args: lengths: The length of each sequence. <int>[batch_size] max_length: The width of the boolean mask. Must be >= max(lengths). Returns: A mask with shape: <bool>[batch_size, max_length] indicating which positions are valid for each sequence. """ return jnp.arange(max_length)[None] < lengths[:, None]
Computes a boolean mask over sequence positions for each given length. Example: ``` sequence_mask([1, 2], 3) [[True, False, False], [True, True, False]] ``` Args: lengths: The length of each sequence. <int>[batch_size] max_length: The width of the boolean mask. Must be >= max(lengths). Returns: A mask with shape: <bool>[batch_size, max_length] indicating which positions are valid for each sequence.
22,556
import functools from typing import Any, Callable, Optional from flax import linen as nn import jax from jax import numpy as jnp Array = jnp.ndarray The provided code snippet includes necessary dependencies for implementing the `flip_sequences` function. Write a Python function `def flip_sequences(inputs: Array, lengths: Array) -> Array` to solve the following problem: Flips a sequence of inputs along the time dimension. This function can be used to prepare inputs for the reverse direction of a bidirectional LSTM. It solves the issue that, when naively flipping multiple padded sequences stored in a matrix, the first elements would be padding values for those sequences that were padded. This function keeps the padding at the end, while flipping the rest of the elements. Example: ```python inputs = [[1, 0, 0], [2, 3, 0] [4, 5, 6]] lengths = [1, 2, 3] flip_sequences(inputs, lengths) = [[1, 0, 0], [3, 2, 0], [6, 5, 4]] ``` Args: inputs: An array of input IDs <int>[batch_size, seq_length]. lengths: The length of each sequence <int>[batch_size]. Returns: An ndarray with the flipped inputs. Here is the function: def flip_sequences(inputs: Array, lengths: Array) -> Array: """Flips a sequence of inputs along the time dimension. This function can be used to prepare inputs for the reverse direction of a bidirectional LSTM. It solves the issue that, when naively flipping multiple padded sequences stored in a matrix, the first elements would be padding values for those sequences that were padded. This function keeps the padding at the end, while flipping the rest of the elements. Example: ```python inputs = [[1, 0, 0], [2, 3, 0] [4, 5, 6]] lengths = [1, 2, 3] flip_sequences(inputs, lengths) = [[1, 0, 0], [3, 2, 0], [6, 5, 4]] ``` Args: inputs: An array of input IDs <int>[batch_size, seq_length]. lengths: The length of each sequence <int>[batch_size]. Returns: An ndarray with the flipped inputs. """ # Note: since this function is vmapped, the code below is effectively for # a single example. max_length = inputs.shape[0] return jnp.flip(jnp.roll(inputs, max_length - lengths, axis=0), axis=0)
Flips a sequence of inputs along the time dimension. This function can be used to prepare inputs for the reverse direction of a bidirectional LSTM. It solves the issue that, when naively flipping multiple padded sequences stored in a matrix, the first elements would be padding values for those sequences that were padded. This function keeps the padding at the end, while flipping the rest of the elements. Example: ```python inputs = [[1, 0, 0], [2, 3, 0] [4, 5, 6]] lengths = [1, 2, 3] flip_sequences(inputs, lengths) = [[1, 0, 0], [3, 2, 0], [6, 5, 4]] ``` Args: inputs: An array of input IDs <int>[batch_size, seq_length]. lengths: The length of each sequence <int>[batch_size]. Returns: An ndarray with the flipped inputs.
22,557
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() config.embedding_size = 300 config.hidden_size = 256 config.vocab_size = None config.output_size = 1 config.vocab_path = 'vocab.txt' config.max_input_length = 60 config.dropout_rate = 0.5 config.word_dropout_rate = 0.1 config.unk_idx = 1 config.learning_rate = 0.1 config.momentum = 0.9 config.weight_decay = 3e-6 config.batch_size = 64 config.bucket_size = 8 config.num_epochs = 10 config.seed = 0 return config
Get the default hyperparameter configuration.
22,558
from typing import Any, Dict, Optional from absl import logging import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_text as text import vocabulary The provided code snippet includes necessary dependencies for implementing the `vocab_to_hashtable` function. Write a Python function `def vocab_to_hashtable( vocab: vocabulary.Vocabulary, unk_idx: int ) -> tf.lookup.StaticHashTable` to solve the following problem: Returns a TF lookup table (token -> ID) from a vocabulary. Here is the function: def vocab_to_hashtable( vocab: vocabulary.Vocabulary, unk_idx: int ) -> tf.lookup.StaticHashTable: """Returns a TF lookup table (token -> ID) from a vocabulary.""" return tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer( list(vocab.keys()), list(vocab.values()) ), default_value=unk_idx, )
Returns a TF lookup table (token -> ID) from a vocabulary.
22,559
from typing import Any, Dict, Optional from absl import logging import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_text as text import vocabulary The provided code snippet includes necessary dependencies for implementing the `vocab_to_inverse_hashtable` function. Write a Python function `def vocab_to_inverse_hashtable( vocab: vocabulary.Vocabulary, unk_token: bytes ) -> tf.lookup.StaticHashTable` to solve the following problem: Returns an inverse TF lookup table (ID -> token) from a vocabulary. Here is the function: def vocab_to_inverse_hashtable( vocab: vocabulary.Vocabulary, unk_token: bytes ) -> tf.lookup.StaticHashTable: """Returns an inverse TF lookup table (ID -> token) from a vocabulary.""" return tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer( list(vocab.values()), list(vocab.keys()), key_dtype=tf.int64, value_dtype=tf.string, ), default_value=unk_token, )
Returns an inverse TF lookup table (ID -> token) from a vocabulary.
22,560
from typing import Any, Dict, Optional from absl import logging import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_text as text import vocabulary The provided code snippet includes necessary dependencies for implementing the `_is_text_field` function. Write a Python function `def _is_text_field(feature_name_and_type)` to solve the following problem: Identifies a text field when given a feature (name, type) pair. Here is the function: def _is_text_field(feature_name_and_type): """Identifies a text field when given a feature (name, type) pair.""" _, feature_type = feature_name_and_type return isinstance(feature_type, tfds.features.Text)
Identifies a text field when given a feature (name, type) pair.
22,561
from typing import Any, Dict, Optional from absl import logging import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_text as text import vocabulary The provided code snippet includes necessary dependencies for implementing the `_is_class_label` function. Write a Python function `def _is_class_label(feature_name_and_type)` to solve the following problem: Identifies a class label field when given a feature (name, type) pair. Here is the function: def _is_class_label(feature_name_and_type): """Identifies a class label field when given a feature (name, type) pair.""" _, feature_type = feature_name_and_type return isinstance(feature_type, tfds.features.ClassLabel)
Identifies a class label field when given a feature (name, type) pair.
22,562
from absl import logging from flax import linen as nn import input_pipeline import models import utils as vae_utils from flax.training import train_state import jax from jax import random import jax.numpy as jnp import ml_collections import optax import tensorflow_datasets as tfds def train_step(state, batch, z_rng, latents): def loss_fn(params): recon_x, mean, logvar = models.model(latents).apply( {'params': params}, batch, z_rng ) bce_loss = binary_cross_entropy_with_logits(recon_x, batch).mean() kld_loss = kl_divergence(mean, logvar).mean() loss = bce_loss + kld_loss return loss grads = jax.grad(loss_fn)(state.params) return state.apply_gradients(grads=grads) def eval_f(params, images, z, z_rng, latents): def eval_model(vae): recon_images, mean, logvar = vae(images, z_rng) comparison = jnp.concatenate([ images[:8].reshape(-1, 28, 28, 1), recon_images[:8].reshape(-1, 28, 28, 1), ]) generate_images = vae.generate(z) generate_images = generate_images.reshape(-1, 28, 28, 1) metrics = compute_metrics(recon_images, images, mean, logvar) return metrics, comparison, generate_images return nn.apply(eval_model, models.model(latents))({'params': params}) The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate(config: ml_collections.ConfigDict)` to solve the following problem: Train and evaulate pipeline. Here is the function: def train_and_evaluate(config: ml_collections.ConfigDict): """Train and evaulate pipeline.""" rng = random.key(0) rng, key = random.split(rng) ds_builder = tfds.builder('binarized_mnist') ds_builder.download_and_prepare() logging.info('Initializing dataset.') train_ds = input_pipeline.build_train_set(config.batch_size, ds_builder) test_ds = input_pipeline.build_test_set(ds_builder) logging.info('Initializing model.') init_data = jnp.ones((config.batch_size, 784), jnp.float32) params = models.model(config.latents).init(key, init_data, rng)['params'] state = train_state.TrainState.create( apply_fn=models.model(config.latents).apply, params=params, tx=optax.adam(config.learning_rate), ) rng, z_key, eval_rng = random.split(rng, 3) z = random.normal(z_key, (64, config.latents)) steps_per_epoch = ( ds_builder.info.splits['train'].num_examples // config.batch_size ) for epoch in range(config.num_epochs): for _ in range(steps_per_epoch): batch = next(train_ds) rng, key = random.split(rng) state = train_step(state, batch, key, config.latents) metrics, comparison, sample = eval_f( state.params, test_ds, z, eval_rng, config.latents ) vae_utils.save_image( comparison, f'results/reconstruction_{epoch}.png', nrow=8 ) vae_utils.save_image(sample, f'results/sample_{epoch}.png', nrow=8) print( 'eval epoch: {}, loss: {:.4f}, BCE: {:.4f}, KLD: {:.4f}'.format( epoch + 1, metrics['loss'], metrics['bce'], metrics['kld'] ) )
Train and evaulate pipeline.
22,563
from flax import linen as nn from jax import random import jax.numpy as jnp def reparameterize(rng, mean, logvar): std = jnp.exp(0.5 * logvar) eps = random.normal(rng, logvar.shape) return mean + eps * std
null
22,564
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() config.learning_rate = 0.001 config.latents = 20 config.batch_size = 128 config.num_epochs = 30 return config
Get the default hyperparameter configuration.
22,565
import collections import functools import os from absl import logging from clu import metric_writers from clu import periodic_actions from flax import jax_utils from flax import linen as nn from flax.training import checkpoints from flax.training import common_utils from flax.training import dynamic_scale as dynamic_scale_lib from flax.training import train_state import jax import jax.numpy as jnp import ml_collections import numpy as np import optax import tensorflow as tf import bleu import decode import input_pipeline import models class TrainState(train_state.TrainState): dynamic_scale: dynamic_scale_lib.DynamicScale def create_learning_rate_schedule(learning_rate: float, warmup_steps: int): """Creates a rsqrt schedule with linear warmup.""" return optax.join_schedules( [ optax.linear_schedule( init_value=0, end_value=learning_rate, transition_steps=warmup_steps, ), rsqrt_schedule(init_value=learning_rate, shift=warmup_steps), ], boundaries=[warmup_steps], ) def train_step( state, batch, config, learning_rate_fn, label_smoothing=0.0, dropout_rng=None, ): """Perform a single training step.""" # X_position and X_segmentation are needed only when using "packed examples" # where multiple sequences are packed into the same example with this # metadata. # if such features are not present they are ignored and the example is treated # like a normal, unpacked sequence example. train_keys = [ "inputs", "targets", "inputs_position", "targets_position", "inputs_segmentation", "targets_segmentation", ] ( inputs, targets, inputs_positions, targets_positions, inputs_segmentation, targets_segmentation, ) = (batch.get(k, None) for k in train_keys) weights = jnp.where(targets > 0, 1, 0).astype(jnp.float32) dropout_rng = jax.random.fold_in(dropout_rng, state.step) def loss_fn(params): """loss function used for training.""" logits = models.Transformer(config).apply( {"params": params}, inputs, targets, inputs_positions=inputs_positions, targets_positions=targets_positions, inputs_segmentation=inputs_segmentation, targets_segmentation=targets_segmentation, rngs={"dropout": dropout_rng}, ) loss, weight_sum = compute_weighted_cross_entropy( logits, targets, weights, label_smoothing ) mean_loss = loss / weight_sum return mean_loss, logits step = state.step if state.dynamic_scale: # dynamic scale takes care of averaging gradients across replicas grad_fn = state.dynamic_scale.value_and_grad( loss_fn, has_aux=True, axis_name="batch" ) dynamic_scale, is_fin, (_, logits), grads = grad_fn(state.params) state = state.replace(dynamic_scale=dynamic_scale) else: grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grads = grad_fn(state.params) grads = jax.lax.pmean(grads, axis_name="batch") new_state = state.apply_gradients(grads=grads) metrics = compute_metrics(logits, targets, weights) metrics["learning_rate"] = learning_rate_fn(step) if state.dynamic_scale: # if is_fin == False the gradients contain Inf/NaNs and optimizer state and # params should be restored (= skip this step). select_fn = functools.partial(jnp.where, is_fin) new_state = new_state.replace( opt_state=jax.tree_util.tree_map( select_fn, new_state.opt_state, state.opt_state ), params=jax.tree_util.tree_map( select_fn, new_state.params, state.params ), ) metrics["loss_scale"] = dynamic_scale.scale * metrics["denominator"] return new_state, metrics def eval_step(params, batch, config, label_smoothing=0.0): """Calculate evaluation metrics on a batch.""" inputs, targets = batch["inputs"], batch["targets"] weights = jnp.where(targets > 0, 1.0, 0.0) logits = models.Transformer(config).apply({"params": params}, inputs, targets) return compute_metrics(logits, targets, weights, label_smoothing) def initialize_cache(inputs, max_decode_len, config): """Initialize a cache for a given input shape and max decode length.""" target_shape = (inputs.shape[0], max_decode_len) + inputs.shape[2:] initial_variables = models.Transformer(config).init( jax.random.key(0), jnp.ones(inputs.shape, config.dtype), jnp.ones(target_shape, config.dtype), ) return initial_variables["cache"] def predict_step( inputs, params, cache, eos_id, max_decode_len, config, beam_size=4 ): """Predict translation with fast decoding beam search on a batch.""" # Prepare transformer fast-decoder call for beam search: for beam search, we # need to set up our decoder model to handle a batch size equal to # batch_size * beam_size, where each batch item"s data is expanded in-place # rather than tiled. # i.e. if we denote each batch element subtensor as el[n]: # [el0, el1, el2] --> beamsize=2 --> [el0,el0,el1,el1,el2,el2] encoded_inputs = decode.flat_batch_beam_expand( models.Transformer(config).apply( {"params": params}, inputs, method=models.Transformer.encode ), beam_size, ) raw_inputs = decode.flat_batch_beam_expand(inputs, beam_size) def tokens_ids_to_logits(flat_ids, flat_cache): """Token slice to logits from decoder model.""" # --> [batch * beam, 1, vocab] flat_logits, new_vars = models.Transformer(config).apply( {"params": params, "cache": flat_cache}, encoded_inputs, raw_inputs, # only needed for input padding mask flat_ids, mutable=["cache"], method=models.Transformer.decode, ) new_flat_cache = new_vars["cache"] # Remove singleton sequence-length dimension: # [batch * beam, 1, vocab] --> [batch * beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the above-defined single-step decoder function, run a # beam search over possible sequences given input encoding. beam_seqs, _ = decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, eos_id=eos_id, max_decode_len=max_decode_len, ) # Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension # sorted in increasing order of log-probability. # Return the highest scoring beam sequence, drop first dummy 0 token. return beam_seqs[:, -1, 1:] def evaluate( *, p_eval_step, params, eval_ds: tf.data.Dataset, num_eval_steps: int ): """Evaluate the params an return a dictionary with the metrics.""" logging.info("Gathering evaluation metrics.") eval_metrics = [] eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types for _, eval_batch in zip(range(num_eval_steps), eval_iter): eval_batch = jax.tree_util.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access eval_batch = common_utils.shard(eval_batch) metrics = p_eval_step(params, eval_batch) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop("denominator") eval_summary = jax.tree_util.tree_map( lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums, ) return eval_summary def translate_and_calculate_bleu( *, p_pred_step, p_init_cache, params, predict_ds: tf.data.Dataset, decode_tokens, max_predict_length: int, ): """Translates the `predict_ds` and calculates the BLEU score.""" n_devices = jax.local_device_count() logging.info("Translating evaluation dataset.") sources, references, predictions = [], [], [] for pred_batch in predict_ds: pred_batch = jax.tree_util.tree_map(lambda x: x._numpy(), pred_batch) # pylint: disable=protected-access # Handle final odd-sized batch by padding instead of dropping it. cur_pred_batch_size = pred_batch["inputs"].shape[0] if cur_pred_batch_size % n_devices: padded_size = int(np.ceil(cur_pred_batch_size / n_devices) * n_devices) pred_batch = jax.tree_util.tree_map( lambda x: pad_examples(x, padded_size), # pylint: disable=cell-var-from-loop pred_batch, ) pred_batch = common_utils.shard(pred_batch) cache = p_init_cache(pred_batch["inputs"]) predicted = p_pred_step( pred_batch["inputs"], params, cache, decode.EOS_ID, max_predict_length ) predicted = tohost(predicted) inputs = tohost(pred_batch["inputs"]) targets = tohost(pred_batch["targets"]) # Iterate through non-padding examples of batch. for i, s in enumerate(predicted[:cur_pred_batch_size]): sources.append(decode_tokens(inputs[i])) references.append(decode_tokens(targets[i])) predictions.append(decode_tokens(s)) logging.info( "Translation: %d predictions %d references %d sources.", len(predictions), len(references), len(sources), ) # Calculate BLEU score for translated eval corpus against reference. bleu_matches = bleu.bleu_partial(references, predictions) all_bleu_matches = per_host_sum_pmap(bleu_matches) bleu_score = bleu.complete_bleu(*all_bleu_matches) # Save translation samples for tensorboard. exemplars = "" for n in np.random.choice(np.arange(len(predictions)), 8): exemplars += f"{sources[n]}\n\n{references[n]}\n\n{predictions[n]}\n\n" return exemplars, bleu_score def preferred_dtype(config): platform = jax.local_devices()[0].platform if config.use_mixed_precision: if platform == "tpu": return jnp.bfloat16 elif platform == "gpu": return jnp.float16 return jnp.float32 The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate(config: ml_collections.ConfigDict, workdir: str)` to solve the following problem: Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint. Here is the function: def train_and_evaluate(config: ml_collections.ConfigDict, workdir: str): """Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint. """ tf.io.gfile.makedirs(workdir) vocab_path = config.vocab_path if vocab_path is None: vocab_path = os.path.join(workdir, "sentencepiece_model") config.vocab_path = vocab_path tf.io.gfile.makedirs(os.path.split(vocab_path)[0]) # Load Dataset # --------------------------------------------------------------------------- logging.info("Initializing dataset.") train_ds, eval_ds, predict_ds, encoder = input_pipeline.get_wmt_datasets( n_devices=jax.local_device_count(), config=config, reverse_translation=config.reverse_translation, vocab_path=vocab_path, ) train_iter = iter(train_ds) vocab_size = int(encoder.vocab_size()) eos_id = decode.EOS_ID # Default Sentencepiece EOS token. def decode_tokens(toks): valid_toks = toks[: np.argmax(toks == eos_id) + 1].astype(np.int32) return encoder.detokenize(valid_toks).numpy().decode("utf-8") if config.num_predict_steps > 0: predict_ds = predict_ds.take(config.num_predict_steps) logging.info("Initializing model, optimizer, and step functions.") dtype = preferred_dtype(config) # Build Model and Optimizer # --------------------------------------------------------------------------- train_config = models.TransformerConfig( vocab_size=vocab_size, output_vocab_size=vocab_size, share_embeddings=config.share_embeddings, logits_via_embedding=config.logits_via_embedding, dtype=dtype, emb_dim=config.emb_dim, num_heads=config.num_heads, num_layers=config.num_layers, qkv_dim=config.qkv_dim, mlp_dim=config.mlp_dim, max_len=max(config.max_target_length, config.max_eval_target_length), dropout_rate=config.dropout_rate, attention_dropout_rate=config.attention_dropout_rate, deterministic=False, decode=False, kernel_init=nn.initializers.xavier_uniform(), bias_init=nn.initializers.normal(stddev=1e-6), ) eval_config = train_config.replace(deterministic=True) predict_config = train_config.replace(deterministic=True, decode=True) start_step = 0 rng = jax.random.key(config.seed) rng, init_rng = jax.random.split(rng) input_shape = (config.per_device_batch_size, config.max_target_length) target_shape = (config.per_device_batch_size, config.max_target_length) m = models.Transformer(eval_config) initial_variables = jax.jit(m.init)( init_rng, jnp.ones(input_shape, jnp.float32), jnp.ones(target_shape, jnp.float32), ) # Create train state with Adam optimizer and weight decay. learning_rate_fn = create_learning_rate_schedule( learning_rate=config.learning_rate, warmup_steps=config.warmup_steps ) dynamic_scale = None if dtype == jnp.float16: dynamic_scale = dynamic_scale_lib.DynamicScale() state = TrainState.create( apply_fn=m.apply, params=initial_variables["params"], tx=optax.adamw( learning_rate=learning_rate_fn, b1=0.9, b2=0.98, eps=1e-9, weight_decay=config.weight_decay, ), dynamic_scale=dynamic_scale, ) # We access model params only via state.params del initial_variables if config.restore_checkpoints: # Restore unreplicated optimizer + model state from last checkpoint. state = checkpoints.restore_checkpoint(workdir, state) # Grab last step. start_step = int(state.step) writer = metric_writers.create_default_writer( workdir, just_logging=jax.process_index() > 0 ) if start_step == 0: writer.write_hparams(dict(config)) # Replicate state. state = jax_utils.replicate(state) # compile multidevice versions of train/eval/predict step and cache init fn. p_train_step = jax.pmap( functools.partial( train_step, config=train_config, learning_rate_fn=learning_rate_fn, label_smoothing=config.label_smoothing, ), axis_name="batch", donate_argnums=(0,), ) # pytype: disable=wrong-arg-types p_eval_step = jax.pmap( functools.partial(eval_step, config=eval_config), axis_name="batch" ) p_init_cache = jax.pmap( functools.partial( initialize_cache, max_decode_len=config.max_predict_length, config=predict_config, ), axis_name="batch", ) p_pred_step = jax.pmap( functools.partial( predict_step, config=predict_config, beam_size=config.beam_size ), axis_name="batch", static_broadcasted_argnums=(3, 4), ) # eos token, max_length are constant # Main Train Loop # --------------------------------------------------------------------------- # We init the first set of dropout PRNG keys, but update it afterwards inside # the main pmap"d training update for performance. dropout_rngs = jax.random.split(rng, jax.local_device_count()) del rng logging.info("Starting training loop.") hooks = [] report_progress = periodic_actions.ReportProgress( num_train_steps=config.num_train_steps, writer=writer ) if jax.process_index() == 0: hooks += [ report_progress, periodic_actions.Profile(logdir=workdir, num_profile_steps=5), ] train_metrics = [] with metric_writers.ensure_flushes(writer): for step in range(start_step, config.num_train_steps): is_last_step = step == config.num_train_steps - 1 # Shard data to devices and do a training step. with jax.profiler.StepTraceAnnotation("train", step_num=step): batch = common_utils.shard( jax.tree_util.tree_map(np.asarray, next(train_iter)) ) state, metrics = p_train_step(state, batch, dropout_rng=dropout_rngs) train_metrics.append(metrics) # Quick indication that training is happening. logging.log_first_n(logging.INFO, "Finished training step %d.", 5, step) for h in hooks: h(step) # Periodic metric handling. if step % config.eval_every_steps == 0 or is_last_step: with report_progress.timed("training_metrics"): logging.info("Gathering training metrics.") train_metrics = common_utils.get_metrics(train_metrics) lr = train_metrics.pop("learning_rate").mean() metrics_sums = jax.tree_util.tree_map(jnp.sum, train_metrics) denominator = metrics_sums.pop("denominator") summary = jax.tree_util.tree_map( lambda x: x / denominator, metrics_sums ) # pylint: disable=cell-var-from-loop summary["learning_rate"] = lr summary = {"train_" + k: v for k, v in summary.items()} writer.write_scalars(step, summary) train_metrics = [] with report_progress.timed("eval"): eval_results = evaluate( p_eval_step=p_eval_step, params=state.params, eval_ds=eval_ds, num_eval_steps=config.num_eval_steps, ) writer.write_scalars( step, {"eval_" + k: v for k, v in eval_results.items()} ) with report_progress.timed("translate_and_bleu"): exemplars, bleu_score = translate_and_calculate_bleu( p_pred_step=p_pred_step, p_init_cache=p_init_cache, params=state.params, predict_ds=predict_ds, decode_tokens=decode_tokens, max_predict_length=config.max_predict_length, ) writer.write_scalars(step, {"bleu": bleu_score}) writer.write_texts(step, {"samples": exemplars}) # Save a checkpoint on one host after every checkpoint_freq steps. save_checkpoint = ( step % config.checkpoint_every_steps == 0 or is_last_step ) if config.save_checkpoints and save_checkpoint: logging.info("Saving checkpoint step %d.", step) with report_progress.timed("checkpoint"): checkpoints.save_checkpoint_multiprocess( workdir, jax_utils.unreplicate(state), step )
Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint.
22,566
import collections import math import re import sys import unicodedata import numpy as np def bleu_partial(ref_lines, hyp_lines, case_sensitive=False): """Compute n-gram statistics for two lists of references and translations.""" if len(ref_lines) != len(hyp_lines): raise ValueError( "Reference and translation lists have different numbers of lines." ) if not case_sensitive: ref_lines = [x.lower() for x in ref_lines] hyp_lines = [x.lower() for x in hyp_lines] ref_tokens = [bleu_tokenize(x) for x in ref_lines] hyp_tokens = [bleu_tokenize(x) for x in hyp_lines] return compute_bleu_matches(ref_tokens, hyp_tokens) def complete_bleu( matches_by_order, possible_matches_by_order, reference_length, translation_length, max_order=4, use_bp=True, ): """Compute BLEU score from aggregated n-gram statistics.""" precisions = [0] * max_order smooth = 1.0 geo_mean = 0.0 for i in range(0, max_order): if possible_matches_by_order[i] > 0: precisions[i] = matches_by_order[i] / possible_matches_by_order[i] if matches_by_order[i] > 0: precisions[i] = matches_by_order[i] / possible_matches_by_order[i] else: smooth *= 2 precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) else: precisions[i] = 0.0 if max(precisions) > 0: p_log_sum = sum(math.log(p) for p in precisions if p) geo_mean = math.exp(p_log_sum / max_order) if use_bp: if not reference_length: bp = 1.0 else: ratio = translation_length / reference_length if ratio <= 0.0: bp = 0.0 elif ratio >= 1.0: bp = 1.0 else: bp = math.exp(1 - 1.0 / ratio) bleu = geo_mean * bp return float(bleu) * 100.0 The provided code snippet includes necessary dependencies for implementing the `bleu_local` function. Write a Python function `def bleu_local(ref_lines, hyp_lines, case_sensitive=False)` to solve the following problem: Compute BLEU for two lists of reference and hypothesis translations. Here is the function: def bleu_local(ref_lines, hyp_lines, case_sensitive=False): """Compute BLEU for two lists of reference and hypothesis translations.""" stats = bleu_partial(ref_lines, hyp_lines, case_sensitive=case_sensitive) return complete_bleu(*stats) * 100
Compute BLEU for two lists of reference and hypothesis translations.
22,567
from typing import Callable, Any, Optional from flax import linen as nn from flax import struct from jax import lax import jax.numpy as jnp import numpy as np The provided code snippet includes necessary dependencies for implementing the `shift_right` function. Write a Python function `def shift_right(x, axis=1)` to solve the following problem: Shift the input to the right by padding on axis 1. Here is the function: def shift_right(x, axis=1): """Shift the input to the right by padding on axis 1.""" pad_widths = [(0, 0)] * len(x.shape) pad_widths[axis] = (1, 0) padded = jnp.pad( x, pad_widths, mode='constant', constant_values=x.dtype.type(0) ) return padded[:, :-1]
Shift the input to the right by padding on axis 1.
22,569
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() # Path to load or store sentencepiece vocab file. config.vocab_path = None # Vocabulary size if `vocab_path` is not given. config.vocab_size = 32_000 config.max_corpus_chars = 10**7 # Name of TFDS translation dataset to use. config.dataset_name = 'wmt17_translate/de-en' # Optional name of TFDS translation dataset to use for evaluation. config.eval_dataset_name = 'wmt14_translate/de-en' config.eval_split = 'test' # Reverse the direction of translation. config.reverse_translation = False # Per device batch size for training. config.per_device_batch_size = 32 # Beam size for inference. config.beam_size = 4 config.num_train_steps = 100_000 # Number of steps to take during evaluation. config.num_eval_steps = 20 # Number of steps to generate predictions (used for BLEU score). # -1 will use the whole eval dataset. config.num_predict_steps = -1 # Base learning rate. config.learning_rate = 0.002 # Linear learning rate warmup. config.warmup_steps = 1000 # Cross entropy loss label smoothing. config.label_smoothing = 0.1 # Decay factor for AdamW style weight decay. config.weight_decay = 0.0 # Maximum length cutoff for training examples. config.max_target_length = 256 # Maximum length cutoff for eval examples. config.max_eval_target_length = 256 # Maximum length cutoff for predicted tokens. config.max_predict_length = 256 # Inputs and targets share embedding. config.share_embeddings = True # Final logit transform uses embedding matrix transpose. config.logits_via_embedding = True # Number of transformer layers. config.num_layers = 6 # Size of query/key/value for attention. config.qkv_dim = 1024 # Size of embeddings. config.emb_dim = 1024 # Size of the MLP. config.mlp_dim = 4096 # Number of attention heads. config.num_heads = 16 # Dropout rate. config.dropout_rate = 0.1 # Attention dropout rate. config.attention_dropout_rate = 0.1 # Whether to save model checkpoints. config.save_checkpoints = True # Whether to restore from existing model checkpoints. config.restore_checkpoints = True # Save a checkpoint every these number of steps. config.checkpoint_every_steps = 10_000 # Frequency of eval during training, e.g. every 1000 steps. config.eval_every_steps = 1_000 # Use float16/bfloat16 (GPU/TPU) mixed precision training instead of float32. config.use_mixed_precision = True # Integer for PRNG random seed. config.seed = 0 return config
Get the default hyperparameter configuration.
22,570
import ml_collections def metrics(): return [ 'train_loss', 'eval_loss', 'bleu', 'eval_accuracy', 'train_accuracy', 'uptime', 'steps_per_sec', 'train_learning_rate', ]
null
22,571
import functools import os import time from absl import app from absl import flags from absl import logging from flax import jax_utils from flax import linen as nn from flax.metrics import tensorboard from flax.training import common_utils from flax.training import train_state import jax import jax.numpy as jnp from jax import random import numpy as np import optax import tensorflow as tf import input_pipeline import models The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_scheduler` function. Write a Python function `def create_learning_rate_scheduler( factors='constant * linear_warmup * rsqrt_decay', base_learning_rate=0.5, warmup_steps=8000, decay_factor=0.5, steps_per_decay=20000, steps_per_cycle=100000, )` to solve the following problem: creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: a string with factors separated by '*' that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. steps_per_cycle: Steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {'learning_rate': float}, the step-dependent lr. Here is the function: def create_learning_rate_scheduler( factors='constant * linear_warmup * rsqrt_decay', base_learning_rate=0.5, warmup_steps=8000, decay_factor=0.5, steps_per_decay=20000, steps_per_cycle=100000, ): """creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: a string with factors separated by '*' that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. steps_per_cycle: Steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {'learning_rate': float}, the step-dependent lr. """ factors = [n.strip() for n in factors.split('*')] def step_fn(step): """Step to learning rate function.""" ret = 1.0 for name in factors: if name == 'constant': ret *= base_learning_rate elif name == 'linear_warmup': ret *= jnp.minimum(1.0, step / warmup_steps) elif name == 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'decay_every': ret *= decay_factor ** (step // steps_per_decay) elif name == 'cosine_decay': progress = jnp.maximum( 0.0, (step - warmup_steps) / float(steps_per_cycle) ) ret *= jnp.maximum( 0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))) ) else: raise ValueError('Unknown factor %s.' % name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn
creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: a string with factors separated by '*' that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. steps_per_cycle: Steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {'learning_rate': float}, the step-dependent lr.
22,572
import functools import os import time from absl import app from absl import flags from absl import logging from flax import jax_utils from flax import linen as nn from flax.metrics import tensorboard from flax.training import common_utils from flax.training import train_state import jax import jax.numpy as jnp from jax import random import numpy as np import optax import tensorflow as tf import input_pipeline import models def compute_weighted_cross_entropy(logits, targets, weights=None): """Compute weighted cross entropy and entropy for log probs and targets. Args: logits: [batch, length, num_classes] float array. targets: categorical targets [batch, length] int array. weights: None or array of shape [batch x length] Returns: Tuple of scalar loss and batch normalizing factor. """ if logits.ndim != targets.ndim + 1: raise ValueError( 'Incorrect shapes. Got shape %s logits and %s targets' % (str(logits.shape), str(targets.shape)) ) onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor = onehot_targets.sum() if weights is not None: loss = loss * weights normalizing_factor = weights.sum() return loss.sum(), normalizing_factor def compute_metrics(logits, labels, weights): """Compute summary metrics.""" loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights) acc, _ = compute_weighted_accuracy(logits, labels, weights) metrics = { 'loss': loss, 'accuracy': acc, 'denominator': weight_sum, } metrics = np.sum(metrics, -1) return metrics The provided code snippet includes necessary dependencies for implementing the `train_step` function. Write a Python function `def train_step(state, batch, model, learning_rate_fn, dropout_rng=None)` to solve the following problem: Perform a single training step. Here is the function: def train_step(state, batch, model, learning_rate_fn, dropout_rng=None): """Perform a single training step.""" train_keys = ['inputs', 'targets'] (inputs, targets) = (batch.get(k, None) for k in train_keys) weights = jnp.where(targets > 0, 1, 0).astype(jnp.float32) dropout_rng = jax.random.fold_in(dropout_rng, state.step) def loss_fn(params): """loss function used for training.""" logits = model.apply( {'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng}, ) loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights) mean_loss = loss / weight_sum return mean_loss, logits lr = learning_rate_fn(state.step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grads = grad_fn(state.params) grads = jax.lax.pmean(grads, 'batch') new_state = state.apply_gradients(grads=grads) metrics = compute_metrics(logits, targets, weights) metrics['learning_rate'] = lr return new_state, metrics
Perform a single training step.
22,573
import functools import os import time from absl import app from absl import flags from absl import logging from flax import jax_utils from flax import linen as nn from flax.metrics import tensorboard from flax.training import common_utils from flax.training import train_state import jax import jax.numpy as jnp from jax import random import numpy as np import optax import tensorflow as tf import input_pipeline import models The provided code snippet includes necessary dependencies for implementing the `pad_examples` function. Write a Python function `def pad_examples(x, desired_batch_size)` to solve the following problem: Expand batch to desired size by zeros with the shape of last slice. Here is the function: def pad_examples(x, desired_batch_size): """Expand batch to desired size by zeros with the shape of last slice.""" batch_pad = desired_batch_size - x.shape[0] # Padding with zeros to avoid that they get counted in compute_metrics. return np.concatenate([x, np.tile(np.zeros_like(x[-1]), (batch_pad, 1))])
Expand batch to desired size by zeros with the shape of last slice.
22,574
from typing import Callable, Any, Optional from flax import linen as nn from flax import struct import jax.numpy as jnp import numpy as np The provided code snippet includes necessary dependencies for implementing the `sinusoidal_init` function. Write a Python function `def sinusoidal_init(max_len=2048)` to solve the following problem: 1D Sinusoidal Position Embedding Initializer. Args: max_len: maximum possible length for the input Returns: output: init function returning `(1, max_len, d_feature)` Here is the function: def sinusoidal_init(max_len=2048): """1D Sinusoidal Position Embedding Initializer. Args: max_len: maximum possible length for the input Returns: output: init function returning `(1, max_len, d_feature)` """ def init(key, shape, dtype=np.float32): """Sinusoidal init.""" del key, dtype d_feature = shape[-1] pe = np.zeros((max_len, d_feature), dtype=np.float32) position = np.arange(0, max_len)[:, np.newaxis] div_term = np.exp( np.arange(0, d_feature, 2) * -(np.log(10000.0) / d_feature) ) pe[:, 0::2] = np.sin(position * div_term) pe[:, 1::2] = np.cos(position * div_term) pe = pe[np.newaxis, :, :] # [1, max_len, d_feature] return jnp.array(pe) return init
1D Sinusoidal Position Embedding Initializer. Args: max_len: maximum possible length for the input Returns: output: init function returning `(1, max_len, d_feature)`
22,575
import codecs import collections import enum import tensorflow as tf PAD = '<p>' PAD_ID = 0 UNKNOWN = '<u>' UNKNOWN_ID = 1 ROOT = '<r>' ROOT_ID = 2 class CoNLLAttributes(enum.Enum): """CoNLL attributre names and indices. A UD CoNLL file looks like: 1 They they PRON PRP Case=Nom|Number=Plur 2 nsubj 2 buy buy VERB VBP Number=Plur|PTense=Pres 0 root 3 books book NOUN NNS Number=Plur 2 obj 4 . . PUNCT . _ 2 punct For details, please see: http://universaldependencies.org/format.html. """ ID = 0 FORM = 1 LEMMA = 2 UPOS = 3 XPOS = 4 FEATS = 5 HEAD = 6 DEPREL = 7 The provided code snippet includes necessary dependencies for implementing the `create_vocabs` function. Write a Python function `def create_vocabs(filename, max_num_forms=100000)` to solve the following problem: Loads corpus and create vocabulary lists. Args: filename: file name of a corpus. max_num_forms: maximum number of tokens included. Returns: Dictionary containing named vocab dictionaries. Here is the function: def create_vocabs(filename, max_num_forms=100000): """Loads corpus and create vocabulary lists. Args: filename: file name of a corpus. max_num_forms: maximum number of tokens included. Returns: Dictionary containing named vocab dictionaries. """ form_counter = collections.Counter() xpos_counter = collections.Counter() with tf.io.gfile.GFile(filename, 'rb') as f: for line in codecs.getreader('utf-8')(f): line = line.strip() split = line.split('\t') if not line.startswith('#') and split[0]: form_counter[split[CoNLLAttributes.FORM.value]] += 1 xpos_counter[split[CoNLLAttributes.XPOS.value]] += 1 special_tokens = {PAD: PAD_ID, UNKNOWN: UNKNOWN_ID, ROOT: ROOT_ID} # create word form vocab vocabs = {'forms': {}, 'xpos': {}} vocabs['forms'].update(special_tokens) vocabs['forms'].update( { form[0]: id for id, form in enumerate( form_counter.most_common(max_num_forms), start=ROOT_ID + 1 ) } ) # create xpos vocab vocabs['xpos'].update(special_tokens) vocabs['xpos'].update( { tag[0]: id for id, tag in enumerate( xpos_counter.most_common(), start=ROOT_ID + 1 ) } ) return vocabs
Loads corpus and create vocabulary lists. Args: filename: file name of a corpus. max_num_forms: maximum number of tokens included. Returns: Dictionary containing named vocab dictionaries.
22,576
import codecs import collections import enum import tensorflow as tf def sentences_from_conll_data( corpus_filename, vocabs, attributes, max_sentence_length=1000 ): """Load and returns conll data in list format. Args: corpus_filename: filename of corpus. vocabs: dictionary of vocabs attributes: list of conll attributes to include into the batch max_sentence_length: cut off sentences longer as max tokens Yields: A sentence as a list of tokens while tokens are lists of attributes. """ with tf.io.gfile.GFile(corpus_filename, 'rb') as f: sentence = create_sentence_with_root(attributes, vocabs) for line in codecs.getreader('utf-8')(f): line = line.strip() if line.startswith('#'): continue split = line.split('\t') if split[0]: # Not an empty line, process next token: if len(sentence) < max_sentence_length: if len(attributes) == 1: sentence.append(create_token(split, attributes, vocabs)[0]) else: sentence.append(create_token(split, attributes, vocabs)) else: # Sentences start with an empty line, yield sentence: yield sentence # Reset sentence. sentence = create_sentence_with_root(attributes, vocabs) if len(sentence) > 1: # sentences does not only contain a root. yield sentence The provided code snippet includes necessary dependencies for implementing the `sentence_dataset_dict` function. Write a Python function `def sentence_dataset_dict( filename, vocabs, attributes_input, attributes_target, batch_size, bucket_size, repeat=None, prefetch_size=tf.data.experimental.AUTOTUNE, )` to solve the following problem: Combines sentences into a dataset of padded batches. Args: filename: file name of a corpus. vocabs: dictionary of dictionaries to map from strings to ids. attributes_input: attributes for the input. attributes_target: target attributes empty targets is not included. batch_size: the size of a batch. bucket_size: the size of a bucket. repeat: number of times the dataset is repeated. prefetch_size: prefetch size of the data. Returns: Returns dataset as dictionary containing the data as key value pairs. Here is the function: def sentence_dataset_dict( filename, vocabs, attributes_input, attributes_target, batch_size, bucket_size, repeat=None, prefetch_size=tf.data.experimental.AUTOTUNE, ): """Combines sentences into a dataset of padded batches. Args: filename: file name of a corpus. vocabs: dictionary of dictionaries to map from strings to ids. attributes_input: attributes for the input. attributes_target: target attributes empty targets is not included. batch_size: the size of a batch. bucket_size: the size of a bucket. repeat: number of times the dataset is repeated. prefetch_size: prefetch size of the data. Returns: Returns dataset as dictionary containing the data as key value pairs. """ data_keys = ['inputs'] if attributes_target: data_keys.append('targets') def generator(): """Generator to create the data.""" input_generator = sentences_from_conll_data( filename, vocabs, attributes_input, max_sentence_length=bucket_size ) if attributes_target: target_generator = sentences_from_conll_data( filename, vocabs, attributes_target, max_sentence_length=bucket_size ) for inputs in input_generator: data = {'inputs': inputs} if attributes_target: data['targets'] = next(target_generator) yield data output_types = {k: tf.float32 for k in data_keys} output_shapes = {k: (None,) for k in data_keys} dataset = tf.data.Dataset.from_generator( generator, output_types=output_types, output_shapes=output_shapes ) # cache the dataset in memory and repeat. dataset = dataset.cache() dataset = dataset.repeat(repeat) # static padding up to bucket size. padded_shapes = {k: [bucket_size] for k in data_keys} dataset = dataset.padded_batch( batch_size=batch_size, padded_shapes=(padded_shapes) ) dataset = dataset.prefetch(prefetch_size) return dataset
Combines sentences into a dataset of padded batches. Args: filename: file name of a corpus. vocabs: dictionary of dictionaries to map from strings to ids. attributes_input: attributes for the input. attributes_target: target attributes empty targets is not included. batch_size: the size of a batch. bucket_size: the size of a bucket. repeat: number of times the dataset is repeated. prefetch_size: prefetch size of the data. Returns: Returns dataset as dictionary containing the data as key value pairs.
22,577
import functools import time from typing import Any from absl import logging from clu import metric_writers from clu import periodic_actions from flax import jax_utils from flax.training import checkpoints from flax.training import common_utils from flax.training import dynamic_scale as dynamic_scale_lib from flax.training import train_state import jax from jax import lax import jax.numpy as jnp from jax import random import ml_collections import optax import tensorflow as tf import tensorflow_datasets as tfds import input_pipeline import models def create_model(*, model_cls, half_precision, **kwargs): platform = jax.local_devices()[0].platform if half_precision: if platform == 'tpu': model_dtype = jnp.bfloat16 else: model_dtype = jnp.float16 else: model_dtype = jnp.float32 return model_cls(num_classes=NUM_CLASSES, dtype=model_dtype, **kwargs) def create_learning_rate_fn( config: ml_collections.ConfigDict, base_learning_rate: float, steps_per_epoch: int, ): """Create learning rate schedule.""" warmup_fn = optax.linear_schedule( init_value=0.0, end_value=base_learning_rate, transition_steps=config.warmup_epochs * steps_per_epoch, ) cosine_epochs = max(config.num_epochs - config.warmup_epochs, 1) cosine_fn = optax.cosine_decay_schedule( init_value=base_learning_rate, decay_steps=cosine_epochs * steps_per_epoch ) schedule_fn = optax.join_schedules( schedules=[warmup_fn, cosine_fn], boundaries=[config.warmup_epochs * steps_per_epoch], ) return schedule_fn def train_step(state, batch, learning_rate_fn): """Perform a single training step.""" def loss_fn(params): """loss function used for training.""" logits, new_model_state = state.apply_fn( {'params': params, 'batch_stats': state.batch_stats}, batch['image'], mutable=['batch_stats'], ) loss = cross_entropy_loss(logits, batch['label']) weight_penalty_params = jax.tree_util.tree_leaves(params) weight_decay = 0.0001 weight_l2 = sum( jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1 ) weight_penalty = weight_decay * 0.5 * weight_l2 loss = loss + weight_penalty return loss, (new_model_state, logits) step = state.step dynamic_scale = state.dynamic_scale lr = learning_rate_fn(step) if dynamic_scale: grad_fn = dynamic_scale.value_and_grad( loss_fn, has_aux=True, axis_name='batch' ) dynamic_scale, is_fin, aux, grads = grad_fn(state.params) # dynamic loss takes care of averaging gradients across replicas else: grad_fn = jax.value_and_grad(loss_fn, has_aux=True) aux, grads = grad_fn(state.params) # Re-use same axis_name as in the call to `pmap(...train_step...)` below. grads = lax.pmean(grads, axis_name='batch') new_model_state, logits = aux[1] metrics = compute_metrics(logits, batch['label']) metrics['learning_rate'] = lr new_state = state.apply_gradients( grads=grads, batch_stats=new_model_state['batch_stats'] ) if dynamic_scale: # if is_fin == False the gradients contain Inf/NaNs and optimizer state and # params should be restored (= skip this step). new_state = new_state.replace( opt_state=jax.tree_util.tree_map( functools.partial(jnp.where, is_fin), new_state.opt_state, state.opt_state, ), params=jax.tree_util.tree_map( functools.partial(jnp.where, is_fin), new_state.params, state.params ), dynamic_scale=dynamic_scale, ) metrics['scale'] = dynamic_scale.scale return new_state, metrics def eval_step(state, batch): variables = {'params': state.params, 'batch_stats': state.batch_stats} logits = state.apply_fn(variables, batch['image'], train=False, mutable=False) return compute_metrics(logits, batch['label']) def create_input_iter( dataset_builder, batch_size, image_size, dtype, train, cache, shuffle_buffer_size, prefetch, ): ds = input_pipeline.create_split( dataset_builder, batch_size, image_size=image_size, dtype=dtype, train=train, cache=cache, shuffle_buffer_size=shuffle_buffer_size, prefetch=prefetch, ) it = map(prepare_tf_data, ds) it = jax_utils.prefetch_to_device(it, 2) return it class TrainState(train_state.TrainState): batch_stats: Any dynamic_scale: dynamic_scale_lib.DynamicScale def restore_checkpoint(state, workdir): return checkpoints.restore_checkpoint(workdir, state) def save_checkpoint(state, workdir): state = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state)) step = int(state.step) logging.info('Saving checkpoint step %d.', step) checkpoints.save_checkpoint_multiprocess(workdir, state, step, keep=3) def sync_batch_stats(state): """Sync the batch statistics across replicas.""" # Each device has its own version of the running average batch statistics and # we sync them before evaluation. return state.replace(batch_stats=cross_replica_mean(state.batch_stats)) def create_train_state( rng, config: ml_collections.ConfigDict, model, image_size, learning_rate_fn ): """Create initial training state.""" dynamic_scale = None platform = jax.local_devices()[0].platform if config.half_precision and platform == 'gpu': dynamic_scale = dynamic_scale_lib.DynamicScale() else: dynamic_scale = None params, batch_stats = initialized(rng, image_size, model) tx = optax.sgd( learning_rate=learning_rate_fn, momentum=config.momentum, nesterov=True, ) state = TrainState.create( apply_fn=model.apply, params=params, tx=tx, batch_stats=batch_stats, dynamic_scale=dynamic_scale, ) return state The provided code snippet includes necessary dependencies for implementing the `train_and_evaluate` function. Write a Python function `def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> TrainState` to solve the following problem: Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: Final TrainState. Here is the function: def train_and_evaluate( config: ml_collections.ConfigDict, workdir: str ) -> TrainState: """Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: Final TrainState. """ writer = metric_writers.create_default_writer( logdir=workdir, just_logging=jax.process_index() != 0 ) rng = random.key(0) image_size = 224 if config.batch_size % jax.device_count() > 0: raise ValueError('Batch size must be divisible by the number of devices') local_batch_size = config.batch_size // jax.process_count() platform = jax.local_devices()[0].platform if config.half_precision: if platform == 'tpu': input_dtype = tf.bfloat16 else: input_dtype = tf.float16 else: input_dtype = tf.float32 dataset_builder = tfds.builder(config.dataset) train_iter = create_input_iter( dataset_builder, local_batch_size, image_size, input_dtype, train=True, cache=config.cache, shuffle_buffer_size=config.shuffle_buffer_size, prefetch=config.prefetch, ) eval_iter = create_input_iter( dataset_builder, local_batch_size, image_size, input_dtype, train=False, cache=config.cache, shuffle_buffer_size=None, prefetch=config.prefetch, ) steps_per_epoch = ( dataset_builder.info.splits['train'].num_examples // config.batch_size ) if config.num_train_steps <= 0: num_steps = int(steps_per_epoch * config.num_epochs) else: num_steps = config.num_train_steps if config.steps_per_eval == -1: num_validation_examples = dataset_builder.info.splits[ 'validation' ].num_examples steps_per_eval = num_validation_examples // config.batch_size else: steps_per_eval = config.steps_per_eval steps_per_checkpoint = steps_per_epoch * 10 base_learning_rate = config.learning_rate * config.batch_size / 256.0 model_cls = getattr(models, config.model) model = create_model( model_cls=model_cls, half_precision=config.half_precision ) learning_rate_fn = create_learning_rate_fn( config, base_learning_rate, steps_per_epoch ) state = create_train_state(rng, config, model, image_size, learning_rate_fn) state = restore_checkpoint(state, workdir) # step_offset > 0 if restarting from checkpoint step_offset = int(state.step) state = jax_utils.replicate(state) p_train_step = jax.pmap( functools.partial(train_step, learning_rate_fn=learning_rate_fn), axis_name='batch', ) p_eval_step = jax.pmap(eval_step, axis_name='batch') train_metrics = [] hooks = [] if jax.process_index() == 0: hooks += [periodic_actions.Profile(num_profile_steps=5, logdir=workdir)] train_metrics_last_t = time.time() logging.info('Initial compilation, this might take some minutes...') for step, batch in zip(range(step_offset, num_steps), train_iter): state, metrics = p_train_step(state, batch) for h in hooks: h(step) if step == step_offset: logging.info('Initial compilation completed.') if config.get('log_every_steps'): train_metrics.append(metrics) if (step + 1) % config.log_every_steps == 0: train_metrics = common_utils.get_metrics(train_metrics) summary = { f'train_{k}': v for k, v in jax.tree_util.tree_map( lambda x: x.mean(), train_metrics ).items() } summary['steps_per_second'] = config.log_every_steps / ( time.time() - train_metrics_last_t ) writer.write_scalars(step + 1, summary) train_metrics = [] train_metrics_last_t = time.time() if (step + 1) % steps_per_epoch == 0: epoch = step // steps_per_epoch eval_metrics = [] # sync batch statistics across replicas state = sync_batch_stats(state) for _ in range(steps_per_eval): eval_batch = next(eval_iter) metrics = p_eval_step(state, eval_batch) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) summary = jax.tree_util.tree_map(lambda x: x.mean(), eval_metrics) logging.info( 'eval epoch: %d, loss: %.4f, accuracy: %.2f', epoch, summary['loss'], summary['accuracy'] * 100, ) writer.write_scalars( step + 1, {f'eval_{key}': val for key, val in summary.items()} ) writer.flush() if (step + 1) % steps_per_checkpoint == 0 or step + 1 == num_steps: state = sync_batch_stats(state) save_checkpoint(state, workdir) # Wait until computations are done before exiting jax.random.normal(jax.random.key(0), ()).block_until_ready() return state
Execute model training and evaluation loop. Args: config: Hyperparameter configuration for training and evaluation. workdir: Directory where the tensorboard summaries are written to. Returns: Final TrainState.
22,578
from configs import default as default_lib The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the hyperparameter configuration to train on 8 x Nvidia V100 GPUs. Here is the function: def get_config(): """Get the hyperparameter configuration to train on 8 x Nvidia V100 GPUs.""" # Override default configuration to avoid duplication of field definition. config = default_lib.get_config() config.batch_size = 512 config.shuffle_buffer_size = 16 * 512 config.cache = True return config
Get the hyperparameter configuration to train on 8 x Nvidia V100 GPUs.
22,579
import ml_collections The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the default hyperparameter configuration. Here is the function: def get_config(): """Get the default hyperparameter configuration.""" config = ml_collections.ConfigDict() # As defined in the `models` module. config.model = 'ResNet50' # `name` argument of tensorflow_datasets.builder() config.dataset = 'imagenet2012:5.*.*' config.learning_rate = 0.1 config.warmup_epochs = 5.0 config.momentum = 0.9 config.batch_size = 128 config.shuffle_buffer_size = 16 * 128 config.prefetch = 10 config.num_epochs = 100.0 config.log_every_steps = 100 config.cache = False config.half_precision = False # If num_train_steps==-1 then the number of training steps is calculated from # num_epochs using the entire dataset. Similarly for steps_per_eval. config.num_train_steps = -1 config.steps_per_eval = -1 return config
Get the default hyperparameter configuration.
22,580
import ml_collections def metrics(): return [ 'train_loss', 'eval_loss', 'train_accuracy', 'eval_accuracy', 'steps_per_second', 'train_learning_rate', ]
null
22,581
from configs import default as default_lib The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the hyperparameter configuration to train on 8 x Nvidia V100 GPUs. Here is the function: def get_config(): """Get the hyperparameter configuration to train on 8 x Nvidia V100 GPUs.""" # Override default configuration to avoid duplication of field definition. config = default_lib.get_config() config.batch_size = 2048 config.shuffle_buffer_size = 16 * 2048 config.cache = True config.half_precision = True return config
Get the hyperparameter configuration to train on 8 x Nvidia V100 GPUs.
22,582
from configs import default as default_lib The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the hyperparameter configuration to train on TPUs. Here is the function: def get_config(): """Get the hyperparameter configuration to train on TPUs.""" config = default_lib.get_config() # Consider setting the batch size to max(tpu_chips * 256, 8 * 1024) if you # train on a larger pod slice. config.batch_size = 1024 config.shuffle_buffer_size = 16 * 1024 config.cache = True config.half_precision = True return config
Get the hyperparameter configuration to train on TPUs.
22,583
import jax from configs import default as default_lib The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config()` to solve the following problem: Get the hyperparameter configuration for Fake data benchmark. Here is the function: def get_config(): """Get the hyperparameter configuration for Fake data benchmark.""" # Override default configuration to avoid duplication of field definition. config = default_lib.get_config() config.batch_size = 256 * jax.device_count() config.half_precision = True config.num_epochs = 5 # Run for a single step: config.num_train_steps = 1 config.steps_per_eval = 1 return config
Get the hyperparameter configuration for Fake data benchmark.
22,584
from jax._src import traceback_util as jax_traceback_util from flax import config _flax_filter_tracebacks = config.flax_filter_frames _flax_exclusions = set() The provided code snippet includes necessary dependencies for implementing the `register_exclusion` function. Write a Python function `def register_exclusion(path)` to solve the following problem: Marks a Flax source file for exclusion. Here is the function: def register_exclusion(path): """Marks a Flax source file for exclusion.""" global _flax_exclusions, _flax_filter_tracebacks # Record flax exclusions so we can dynamically add and remove them. _flax_exclusions.add(path) if _flax_filter_tracebacks: jax_traceback_util.register_exclusion(path)
Marks a Flax source file for exclusion.
22,585
from jax._src import traceback_util as jax_traceback_util from flax import config _flax_filter_tracebacks = config.flax_filter_frames _flax_exclusions = set() The provided code snippet includes necessary dependencies for implementing the `hide_flax_in_tracebacks` function. Write a Python function `def hide_flax_in_tracebacks()` to solve the following problem: Hides Flax internal stack frames in tracebacks. Here is the function: def hide_flax_in_tracebacks(): """Hides Flax internal stack frames in tracebacks.""" global _flax_exclusions, _flax_filter_tracebacks _flax_filter_tracebacks = True for exclusion in _flax_exclusions: if exclusion not in jax_traceback_util._exclude_paths: jax_traceback_util._exclude_paths.append(exclusion)
Hides Flax internal stack frames in tracebacks.
22,586
from jax._src import traceback_util as jax_traceback_util from flax import config _flax_filter_tracebacks = config.flax_filter_frames _flax_exclusions = set() The provided code snippet includes necessary dependencies for implementing the `show_flax_in_tracebacks` function. Write a Python function `def show_flax_in_tracebacks()` to solve the following problem: Shows Flax internal stack frames in tracebacks. Here is the function: def show_flax_in_tracebacks(): """Shows Flax internal stack frames in tracebacks.""" global _flax_exclusions, _flax_filter_tracebacks _flax_filter_tracebacks = False for exclusion in _flax_exclusions: if exclusion in jax_traceback_util._exclude_paths: jax_traceback_util._exclude_paths.remove(exclusion)
Shows Flax internal stack frames in tracebacks.
22,587
import contextlib import functools import os import numpy as np import tensorflow as tf from tensorboard.plugins.hparams import api as hparams_api from flax import io The provided code snippet includes necessary dependencies for implementing the `_flatten_dict` function. Write a Python function `def _flatten_dict(input_dict, parent_key='', sep='.')` to solve the following problem: Flattens and simplifies dict such that it can be used by hparams. Args: input_dict: Input dict, e.g., from ConfigDict. parent_key: String used in recursion. sep: String used to separate parent and child keys. Returns: Flattened dict. Here is the function: def _flatten_dict(input_dict, parent_key='', sep='.'): """Flattens and simplifies dict such that it can be used by hparams. Args: input_dict: Input dict, e.g., from ConfigDict. parent_key: String used in recursion. sep: String used to separate parent and child keys. Returns: Flattened dict. """ items = [] for k, v in input_dict.items(): new_key = parent_key + sep + k if parent_key else k # Valid types according to https://github.com/tensorflow/tensorboard/blob/1204566da5437af55109f7a4af18f9f8b7c4f864/tensorboard/plugins/hparams/summary_v2.py valid_types = ( bool, int, float, str, np.bool_, np.integer, np.floating, np.character, ) if isinstance(v, dict): # Recursively flatten the dict. items.extend(_flatten_dict(v, new_key, sep=sep).items()) continue elif not isinstance(v, valid_types): # Cast any incompatible values as strings such that they can be handled by hparams v = str(v) items.append((new_key, v)) return dict(items)
Flattens and simplifies dict such that it can be used by hparams. Args: input_dict: Input dict, e.g., from ConfigDict. parent_key: String used in recursion. sep: String used to separate parent and child keys. Returns: Flattened dict.
22,588
import contextlib import functools import os import numpy as np import tensorflow as tf from tensorboard.plugins.hparams import api as hparams_api from flax import io class SummaryWriter: """Saves data in event and summary protos for tensorboard.""" def __init__(self, log_dir, auto_flush=True): """Create a new SummaryWriter. Args: log_dir: path to record tfevents files in. auto_flush: if true, flush after every reported metric. """ log_dir = os.fspath(log_dir) # If needed, create log_dir directory as well as missing parent directories. if not io.isdir(log_dir): io.makedirs(log_dir) self._event_writer = tf.summary.create_file_writer(log_dir) self._as_default = functools.partial(_as_default, auto_flush=auto_flush) self._closed = False def close(self): """Close SummaryWriter. Final!""" if not self._closed: self._event_writer.close() self._closed = True del self._event_writer def flush(self): self._event_writer.flush() def scalar(self, tag, value, step): """Saves scalar value. Args: tag: str: label for this data value: int/float: number to log step: int: training step """ value = float(np.array(value)) with self._as_default(self._event_writer): tf.summary.scalar(name=tag, data=value, step=step) def image(self, tag, image, step, max_outputs=3): """Saves RGB image summary from np.ndarray [H,W], [H,W,1], or [H,W,3]. Args: tag: str: label for this data image: ndarray: [H,W], [H,W,1], [H,W,3], [K,H,W], [K,H,W,1], [K,H,W,3] Save image in greyscale or colors. Pixel values could be either uint8 or float. Floating point values should be in range [0, 1). step: int: training step max_outputs: At most this many images will be emitted at each step. Defaults to 3. """ image = np.array(image) # tf.summary.image expects image to have shape [k, h, w, c] where, # k = number of samples, h = height, w = width, c = number of channels. if len(np.shape(image)) == 2: image = image[np.newaxis, :, :, np.newaxis] elif len(np.shape(image)) == 3: # this could be either [k, h, w] or [h, w, c] if np.shape(image)[-1] in (1, 3): image = image[np.newaxis, :, :, :] else: image = image[:, :, :, np.newaxis] if np.shape(image)[-1] == 1: image = np.repeat(image, 3, axis=-1) # Convert to tensor value as tf.summary.image expects data to be a tensor. image = tf.convert_to_tensor(image) with self._as_default(self._event_writer): tf.summary.image(name=tag, data=image, step=step, max_outputs=max_outputs) def audio(self, tag, audiodata, step, sample_rate=44100, max_outputs=3): """Saves audio as wave. NB: single channel only right now. Args: tag: str: label for this data audiodata: ndarray [Nsamples, Nframes, Nchannels]: audio data to be saved as wave. The data will be clipped to [-1.0, 1.0]. step: int: training step sample_rate: sample rate of passed in audio buffer max_outputs: At most this many audio clips will be emitted at each step. Defaults to 3. """ # tf.summary.audio expects the audio data to have floating values in # [-1.0, 1.0]. audiodata = np.clip(np.array(audiodata), -1, 1) # Convert to tensor value as tf.summary.audio expects data to be a tensor. audio = tf.convert_to_tensor(audiodata, dtype=tf.float32) with self._as_default(self._event_writer): tf.summary.audio( name=tag, data=audio, sample_rate=sample_rate, step=step, max_outputs=max_outputs, encoding='wav', ) def histogram(self, tag, values, step, bins=None): """Saves histogram of values. Args: tag: str: label for this data values: ndarray: will be flattened by this routine step: int: training step bins: number of bins in histogram """ values = np.array(values) values = np.reshape(values, -1) with self._as_default(self._event_writer): tf.summary.histogram(name=tag, data=values, step=step, buckets=bins) def text(self, tag, textdata, step): """Saves a text summary. Args: tag: str: label for this data textdata: string step: int: training step Note: markdown formatting is rendered by tensorboard. """ if not isinstance(textdata, (str, bytes)): raise ValueError('`textdata` should be of the type `str` or `bytes`.') with self._as_default(self._event_writer): tf.summary.text(name=tag, data=tf.constant(textdata), step=step) def write(self, tag, tensor, step, metadata=None): """Saves an arbitrary tensor summary. Useful when working with custom plugins or constructing a summary directly. Args: tag: str: label for this data tensor: ndarray: tensor data to save. step: int: training step metadata: Optional SummaryMetadata, as a proto or serialized bytes. Note: markdown formatting is rendered by tensorboard. """ with self._as_default(self._event_writer): tf.summary.write(tag=tag, tensor=tensor, step=step, metadata=metadata) def hparams(self, hparams): """Saves hyper parameters. Args: hparams: Flat mapping from hyper parameter name to value. """ with self._as_default(self._event_writer): hparams_api.hparams(hparams=_flatten_dict(hparams)) The provided code snippet includes necessary dependencies for implementing the `_as_default` function. Write a Python function `def _as_default(summary_writer: tf.summary.SummaryWriter, auto_flush: bool)` to solve the following problem: No-flush variation of summary_writer.as_default(). Here is the function: def _as_default(summary_writer: tf.summary.SummaryWriter, auto_flush: bool): """No-flush variation of summary_writer.as_default().""" context_manager = summary_writer.as_default() try: context_manager.__enter__() yield summary_writer finally: old_flush = summary_writer.flush new_flush = old_flush if auto_flush else lambda: None summary_writer.flush = new_flush context_manager.__exit__(None, None, None) summary_writer.flush = old_flush
No-flush variation of summary_writer.as_default().
22,589
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def to_state_dict(target) -> Dict[str, Any]: """Returns a dictionary with the state of the given target.""" if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return target ty_to_state_dict = _STATE_DICT_REGISTRY[ty][0] state_dict = ty_to_state_dict(target) if isinstance(state_dict, dict): for key in state_dict.keys(): assert isinstance(key, str), 'A state dict must only have string keys.' return state_dict def _list_state_dict(xs: List[Any]) -> Dict[str, Any]: return {str(i): to_state_dict(x) for i, x in enumerate(xs)}
null
22,590
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def current_path(): """Current state_dict path during deserialization for error messages.""" return '/'.join(_error_context.path) def from_state_dict(target, state: Dict[str, Any], name: str = '.'): """Restores the state of the given target using a state dict. This function takes the current target as an argument. This lets us know the exact structure of the target, as well as lets us add assertions that shapes and dtypes don't change. In practice, none of the leaf values in ``target`` are actually used. Only the tree structure, shapes and dtypes. Args: target: the object of which the state should be restored. state: a dictionary generated by ``to_state_dict`` with the desired new state for ``target``. name: name of branch taken, used to improve deserialization error messages. Returns: A copy of the object with the restored state. """ if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return state ty_from_state_dict = _STATE_DICT_REGISTRY[ty][1] with _record_path(name): return ty_from_state_dict(target, state) def _restore_list(xs, state_dict: Dict[str, Any]) -> List[Any]: if len(state_dict) != len(xs): raise ValueError( 'The size of the list and the state dict do not match,' f' got {len(xs)} and {len(state_dict)} ' f'at path {current_path()}' ) ys = [] for i in range(len(state_dict)): y = from_state_dict(xs[i], state_dict[str(i)], name=str(i)) ys.append(y) return ys
null
22,591
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def to_state_dict(target) -> Dict[str, Any]: """Returns a dictionary with the state of the given target.""" if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return target ty_to_state_dict = _STATE_DICT_REGISTRY[ty][0] state_dict = ty_to_state_dict(target) if isinstance(state_dict, dict): for key in state_dict.keys(): assert isinstance(key, str), 'A state dict must only have string keys.' return state_dict def _dict_state_dict(xs: Dict[str, Any]) -> Dict[str, Any]: str_keys = set(str(k) for k in xs.keys()) if len(str_keys) != len(xs): raise ValueError( 'Dict keys do not have a unique string representation: ' f'{str_keys} vs given: {xs}' ) return {str(key): to_state_dict(value) for key, value in xs.items()}
null
22,592
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def current_path(): """Current state_dict path during deserialization for error messages.""" return '/'.join(_error_context.path) def from_state_dict(target, state: Dict[str, Any], name: str = '.'): """Restores the state of the given target using a state dict. This function takes the current target as an argument. This lets us know the exact structure of the target, as well as lets us add assertions that shapes and dtypes don't change. In practice, none of the leaf values in ``target`` are actually used. Only the tree structure, shapes and dtypes. Args: target: the object of which the state should be restored. state: a dictionary generated by ``to_state_dict`` with the desired new state for ``target``. name: name of branch taken, used to improve deserialization error messages. Returns: A copy of the object with the restored state. """ if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return state ty_from_state_dict = _STATE_DICT_REGISTRY[ty][1] with _record_path(name): return ty_from_state_dict(target, state) def _restore_dict(xs, states: Dict[str, Any]) -> Dict[str, Any]: diff = set(map(str, xs.keys())).difference(states.keys()) if diff: raise ValueError( 'The target dict keys and state dict keys do not match, target dict' f' contains keys {diff} which are not present in state dict at path' f' {current_path()}' ) return { key: from_state_dict(value, states[str(key)], name=str(key)) for key, value in xs.items() }
null
22,593
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def to_state_dict(target) -> Dict[str, Any]: """Returns a dictionary with the state of the given target.""" if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return target ty_to_state_dict = _STATE_DICT_REGISTRY[ty][0] state_dict = ty_to_state_dict(target) if isinstance(state_dict, dict): for key in state_dict.keys(): assert isinstance(key, str), 'A state dict must only have string keys.' return state_dict def _namedtuple_state_dict(nt) -> Dict[str, Any]: return {key: to_state_dict(getattr(nt, key)) for key in nt._fields}
null
22,594
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def current_path(): """Current state_dict path during deserialization for error messages.""" return '/'.join(_error_context.path) def from_state_dict(target, state: Dict[str, Any], name: str = '.'): """Restores the state of the given target using a state dict. This function takes the current target as an argument. This lets us know the exact structure of the target, as well as lets us add assertions that shapes and dtypes don't change. In practice, none of the leaf values in ``target`` are actually used. Only the tree structure, shapes and dtypes. Args: target: the object of which the state should be restored. state: a dictionary generated by ``to_state_dict`` with the desired new state for ``target``. name: name of branch taken, used to improve deserialization error messages. Returns: A copy of the object with the restored state. """ if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return state ty_from_state_dict = _STATE_DICT_REGISTRY[ty][1] with _record_path(name): return ty_from_state_dict(target, state) The provided code snippet includes necessary dependencies for implementing the `_restore_namedtuple` function. Write a Python function `def _restore_namedtuple(xs, state_dict: Dict[str, Any])` to solve the following problem: Rebuild namedtuple from serialized dict. Here is the function: def _restore_namedtuple(xs, state_dict: Dict[str, Any]): """Rebuild namedtuple from serialized dict.""" if set(state_dict.keys()) == {'name', 'fields', 'values'}: # TODO(jheek): remove backward compatible named tuple restoration early 2022 state_dict = { state_dict['fields'][str(i)]: state_dict['values'][str(i)] for i in range(len(state_dict['fields'])) } sd_keys = set(state_dict.keys()) nt_keys = set(xs._fields) if sd_keys != nt_keys: raise ValueError( 'The field names of the state dict and the named tuple do not match,' f' got {sd_keys} and {nt_keys} at path {current_path()}' ) fields = { k: from_state_dict(getattr(xs, k), v, name=k) for k, v in state_dict.items() } return type(xs)(**fields)
Rebuild namedtuple from serialized dict.
22,595
import enum import threading from contextlib import contextmanager from typing import Any, Dict, List import jax import msgpack import numpy as np def from_state_dict(target, state: Dict[str, Any], name: str = '.'): """Restores the state of the given target using a state dict. This function takes the current target as an argument. This lets us know the exact structure of the target, as well as lets us add assertions that shapes and dtypes don't change. In practice, none of the leaf values in ``target`` are actually used. Only the tree structure, shapes and dtypes. Args: target: the object of which the state should be restored. state: a dictionary generated by ``to_state_dict`` with the desired new state for ``target``. name: name of branch taken, used to improve deserialization error messages. Returns: A copy of the object with the restored state. """ if _is_namedtuple(target): ty = _NamedTuple else: ty = type(target) if ty not in _STATE_DICT_REGISTRY: return state ty_from_state_dict = _STATE_DICT_REGISTRY[ty][1] with _record_path(name): return ty_from_state_dict(target, state) def msgpack_restore(encoded_pytree: bytes): """Restore data structure from bytes in msgpack format. Low-level function that only supports python trees with array leaves, for custom objects use ``from_bytes``. Args: encoded_pytree: msgpack-encoded bytes of python tree. Returns: Python tree of dict, list, tuple with python primitive and array leaves. """ state_dict = msgpack.unpackb( encoded_pytree, ext_hook=_msgpack_ext_unpack, raw=False ) return _unchunk_array_leaves_in_place(state_dict) The provided code snippet includes necessary dependencies for implementing the `from_bytes` function. Write a Python function `def from_bytes(target, encoded_bytes: bytes)` to solve the following problem: Restore optimizer or other object from msgpack-serialized state-dict. Args: target: template object with state-dict registrations that matches the structure being deserialized from ``encoded_bytes``. encoded_bytes: msgpack serialized object structurally isomorphic to ``target``. Typically a flax model or optimizer. Returns: A new object structurally isomorphic to ``target`` containing the updated leaf data from saved data. Here is the function: def from_bytes(target, encoded_bytes: bytes): """Restore optimizer or other object from msgpack-serialized state-dict. Args: target: template object with state-dict registrations that matches the structure being deserialized from ``encoded_bytes``. encoded_bytes: msgpack serialized object structurally isomorphic to ``target``. Typically a flax model or optimizer. Returns: A new object structurally isomorphic to ``target`` containing the updated leaf data from saved data. """ state_dict = msgpack_restore(encoded_bytes) return from_state_dict(target, state_dict)
Restore optimizer or other object from msgpack-serialized state-dict. Args: target: template object with state-dict registrations that matches the structure being deserialized from ``encoded_bytes``. encoded_bytes: msgpack serialized object structurally isomorphic to ``target``. Typically a flax model or optimizer. Returns: A new object structurally isomorphic to ``target`` containing the updated leaf data from saved data.
22,596
import functools import re from typing import (Any, Callable, Mapping, Optional, Tuple) import flax from flax import linen as nn from flax import struct from flax.core.frozen_dict import freeze from flax.core.frozen_dict import unfreeze from flax.core.scope import ( CollectionFilter as CollectionFilter, PRNGSequenceFilter as PRNGSequenceFilter, ) from flax.linen.spmd import _axis_rules from flax.linen.spmd import _AxisRules from flax.linen.spmd import _is_logical_spec from flax.linen.spmd import _with_sharding_constraint from flax.linen.spmd import get_logical_axis_rules as get_axis_rules from flax.linen.spmd import logical_axis_rules as axis_rules from flax.linen.spmd import logical_to_mesh from flax.linen.spmd import logical_to_mesh_axes from flax.linen.spmd import RulesFallback from flax.linen.spmd import set_logical_axis_rules as set_axis_rules from flax.linen.spmd import with_logical_constraint as with_sharding_constraint from flax.traverse_util import flatten_dict from flax.traverse_util import unflatten_dict from flax.typing import ( Array, In as ScanIn, # pylint: disable=unused-import Out as ScanOut, # pylint: disable=unused-import InOutAxis, InOutScanAxis, LogicalRules, # pylint: disable=unused-import ArrayPytree, # pylint: disable=unused-import LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, PartitionSpecPytree, # pylint: disable=unused-import ) import jax class AxisMetadata: """Contains a tuple of axis names, which is passed through FLAX.""" names: LogicalPartitionSpecPytree = struct.field(pytree_node=False) def _param_with_axes_sow_reduce_fn(x, y): """Reduction function for sow() calls. Args: x: Existing value, or () if there was none. y: New axis names sown. Returns: New axis names. Raises: TypeError: If the newly sown value is not an AxisMetadata. ValueError: If the newly sown axis names don't match previously sown axis names. AssertionError: If a previously sown value was truthy and not an AxisMetadata. """ if not isinstance(y, AxisMetadata): raise TypeError('Expected newly sown value to be an AxisMetadata') if isinstance(x, AxisMetadata): if x != y: raise ValueError( 'If axis names are sown twice, expected them to match. ' f'Got {x} and {y}.' ) elif x: # Shouldn't happen, so raise a fairly internal error. raise AssertionError(f'Non-initial-or-AxisMetadata value encountered: {x}') return y def _core_variable_with_axes( scope, col: str, name: str, init_fn: Callable[..., Any], *init_args, axes: Optional[Tuple[str, ...]] = None, fallback: RulesFallback = RulesFallback.AXIS_IS_UNSHARDED, **init_kwargs, ): """Variant of flax core variable scope call with sharding constraints.""" scope.reserve(name) if not scope.has_variable(col, name): if not scope.is_mutable_collection(col): raise flax.errors.ScopeVariableNotFoundError(name, col, scope.path_text) init_value = init_fn(*init_args, **init_kwargs) if axes is not None: init_value = with_sharding_constraint(init_value, axes, fallback=fallback) scope.put_variable(col, name, init_value) return PartitionedVariable(scope, col, name, axes, fallback) class RulesFallback(enum.Enum): """How a sharding constraint should behave when no matching rule is found.""" AXIS_IS_UNSHARDED = 'axis_is_unsharded' RAISE_ERROR = 'raise_error' NO_CONSTRAINT = 'no_constraint' The provided code snippet includes necessary dependencies for implementing the `variable_with_axes` function. Write a Python function `def variable_with_axes( collection: str, name: str, init_fn, *init_args, axes: Optional[Tuple[str, ...]] = None, module: Optional['nn.Module'] = None, fallback: RulesFallback = RulesFallback.AXIS_IS_UNSHARDED, **init_kwargs, )` to solve the following problem: Declares and returns a variable with logical axes in the current Module. See :mod:`flax.linen.module.variable` for original docstring. Args: collection: The name of the variable collection. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. axes: A tuple of axis names, must match the rank of the variable array. module: Use an explicit module instead of deriving the most recent from dynamic module context. fallback: How sharding should behave if there is no rule covering some axis. **init_kwargs: The key-word arguments to pass to init_fn. Returns: A flax `PartitionedVariable` object referencing the initialized variable array. Raises: TypeError: if axes specification is mal-formed. ValueError: if specified logical axes don't match parameter rank. Here is the function: def variable_with_axes( collection: str, name: str, init_fn, *init_args, axes: Optional[Tuple[str, ...]] = None, module: Optional['nn.Module'] = None, fallback: RulesFallback = RulesFallback.AXIS_IS_UNSHARDED, **init_kwargs, ): """Declares and returns a variable with logical axes in the current Module. See :mod:`flax.linen.module.variable` for original docstring. Args: collection: The name of the variable collection. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. axes: A tuple of axis names, must match the rank of the variable array. module: Use an explicit module instead of deriving the most recent from dynamic module context. fallback: How sharding should behave if there is no rule covering some axis. **init_kwargs: The key-word arguments to pass to init_fn. Returns: A flax `PartitionedVariable` object referencing the initialized variable array. Raises: TypeError: if axes specification is mal-formed. ValueError: if specified logical axes don't match parameter rank. """ # get current module if not explicitly provided if module is None: module = nn.module._context.module_stack[-1] # pylint: disable=protected-access assert module is not None module_var = _core_variable_with_axes( module.scope, collection, name, init_fn, *init_args, axes=axes, fallback=fallback, **init_kwargs, ) if axes is not None: # record logical axis constraint for global axis metadata module.sow( f'{collection}_axes', f'{name}_axes', AxisMetadata(axes), # type: ignore reduce_fn=_param_with_axes_sow_reduce_fn, ) return module_var
Declares and returns a variable with logical axes in the current Module. See :mod:`flax.linen.module.variable` for original docstring. Args: collection: The name of the variable collection. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. axes: A tuple of axis names, must match the rank of the variable array. module: Use an explicit module instead of deriving the most recent from dynamic module context. fallback: How sharding should behave if there is no rule covering some axis. **init_kwargs: The key-word arguments to pass to init_fn. Returns: A flax `PartitionedVariable` object referencing the initialized variable array. Raises: TypeError: if axes specification is mal-formed. ValueError: if specified logical axes don't match parameter rank.
22,597
import functools import re from typing import (Any, Callable, Mapping, Optional, Tuple) import flax from flax import linen as nn from flax import struct from flax.core.frozen_dict import freeze from flax.core.frozen_dict import unfreeze from flax.core.scope import ( CollectionFilter as CollectionFilter, PRNGSequenceFilter as PRNGSequenceFilter, ) from flax.linen.spmd import _axis_rules from flax.linen.spmd import _AxisRules from flax.linen.spmd import _is_logical_spec from flax.linen.spmd import _with_sharding_constraint from flax.linen.spmd import get_logical_axis_rules as get_axis_rules from flax.linen.spmd import logical_axis_rules as axis_rules from flax.linen.spmd import logical_to_mesh from flax.linen.spmd import logical_to_mesh_axes from flax.linen.spmd import RulesFallback from flax.linen.spmd import set_logical_axis_rules as set_axis_rules from flax.linen.spmd import with_logical_constraint as with_sharding_constraint from flax.traverse_util import flatten_dict from flax.traverse_util import unflatten_dict from flax.typing import ( Array, In as ScanIn, # pylint: disable=unused-import Out as ScanOut, # pylint: disable=unused-import InOutAxis, InOutScanAxis, LogicalRules, # pylint: disable=unused-import ArrayPytree, # pylint: disable=unused-import LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, PartitionSpecPytree, # pylint: disable=unused-import ) import jax def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]: """Freeze a nested dict. Makes a nested ``dict`` immutable by transforming it into ``FrozenDict``. Args: xs: Dictionary to freeze (a regualr Python dict). Returns: The frozen dictionary. """ return FrozenDict(xs) def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: """Unfreeze a FrozenDict. Makes a mutable copy of a ``FrozenDict`` mutable by transforming it into (nested) dict. Args: x: Frozen dictionary to unfreeze. Returns: The unfrozen dictionary (a regular Python dict). """ if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x def _is_logical_spec(x): return x is None or ( isinstance(x, tuple) and all(isinstance(e, str) or e is None for e in x) ) def flatten_dict(xs, keep_empty_nodes=False, is_leaf=None, sep=None): """Flatten a nested dictionary. The nested keys are flattened to a tuple. See ``unflatten_dict`` on how to restore the nested dictionary structure. Example:: >>> from flax.traverse_util import flatten_dict >>> xs = {'foo': 1, 'bar': {'a': 2, 'b': {}}} >>> flat_xs = flatten_dict(xs) >>> flat_xs {('foo',): 1, ('bar', 'a'): 2} Note that empty dictionaries are ignored and will not be restored by ``unflatten_dict``. Args: xs: a nested dictionary keep_empty_nodes: replaces empty dictionaries with ``traverse_util.empty_node``. is_leaf: an optional function that takes the next nested dictionary and nested keys and returns True if the nested dictionary is a leaf (i.e., should not be flattened further). sep: if specified, then the keys of the returned dictionary will be ``sep``-joined strings (if ``None``, then keys will be tuples). Returns: The flattened dictionary. """ assert isinstance( xs, (flax.core.FrozenDict, dict) ), f'expected (frozen)dict; got {type(xs)}' def _key(path): if sep is None: return path return sep.join(path) def _flatten(xs, prefix): if not isinstance(xs, (flax.core.FrozenDict, dict)) or ( is_leaf and is_leaf(prefix, xs) ): return {_key(prefix): xs} result = {} is_empty = True for key, value in xs.items(): is_empty = False path = prefix + (key,) result.update(_flatten(value, path)) if keep_empty_nodes and is_empty: if prefix == (): # when the whole input is empty return {} return {_key(prefix): empty_node} return result return _flatten(xs, ()) def unflatten_dict(xs, sep=None): """Unflatten a dictionary. See ``flatten_dict`` Example:: >>> flat_xs = { ... ('foo',): 1, ... ('bar', 'a'): 2, ... } >>> xs = unflatten_dict(flat_xs) >>> xs {'foo': 1, 'bar': {'a': 2}} Args: xs: a flattened dictionary sep: separator (same as used with ``flatten_dict()``). Returns: The nested dictionary. """ assert isinstance(xs, dict), f'input is not a dict; it is a {type(xs)}' result = {} for path, value in xs.items(): if sep is not None: path = path.split(sep) if value is empty_node: value = {} cursor = result for key in path[:-1]: if key not in cursor: cursor[key] = {} cursor = cursor[key] cursor[path[-1]] = value return result The provided code snippet includes necessary dependencies for implementing the `get_axis_names` function. Write a Python function `def get_axis_names(axes_metadata)` to solve the following problem: Gets axis names for variables as logical PartitionSpecs. Args: axes_metadata: a single axes-metadata collection from a flax-initialized set of collections. Returns: Collection of Partitionspecs with logical axis names, with the "_axes" suffix on variable names removed to match original variable collection for annotations. Here is the function: def get_axis_names(axes_metadata): """Gets axis names for variables as logical PartitionSpecs. Args: axes_metadata: a single axes-metadata collection from a flax-initialized set of collections. Returns: Collection of Partitionspecs with logical axis names, with the "_axes" suffix on variable names removed to match original variable collection for annotations. """ def leaf_rewrite(x): return None if x is None else jax.sharding.PartitionSpec(*x) def rewrite(tree): return jax.tree_util.tree_map(leaf_rewrite, tree, is_leaf=_is_logical_spec) axes_metadata = unfreeze(axes_metadata) # pytype: disable=wrong-arg-types flat_dict = { re.sub(r'_axes$', '', '/'.join(k)): rewrite(v.names) for k, v in flatten_dict(axes_metadata).items() } return freeze( unflatten_dict({tuple(k.split('/')): v for k, v in flat_dict.items()}) )
Gets axis names for variables as logical PartitionSpecs. Args: axes_metadata: a single axes-metadata collection from a flax-initialized set of collections. Returns: Collection of Partitionspecs with logical axis names, with the "_axes" suffix on variable names removed to match original variable collection for annotations.
22,598
import functools import re from typing import (Any, Callable, Mapping, Optional, Tuple) import flax from flax import linen as nn from flax import struct from flax.core.frozen_dict import freeze from flax.core.frozen_dict import unfreeze from flax.core.scope import ( CollectionFilter as CollectionFilter, PRNGSequenceFilter as PRNGSequenceFilter, ) from flax.linen.spmd import _axis_rules from flax.linen.spmd import _AxisRules from flax.linen.spmd import _is_logical_spec from flax.linen.spmd import _with_sharding_constraint from flax.linen.spmd import get_logical_axis_rules as get_axis_rules from flax.linen.spmd import logical_axis_rules as axis_rules from flax.linen.spmd import logical_to_mesh from flax.linen.spmd import logical_to_mesh_axes from flax.linen.spmd import RulesFallback from flax.linen.spmd import set_logical_axis_rules as set_axis_rules from flax.linen.spmd import with_logical_constraint as with_sharding_constraint from flax.traverse_util import flatten_dict from flax.traverse_util import unflatten_dict from flax.typing import ( Array, In as ScanIn, # pylint: disable=unused-import Out as ScanOut, # pylint: disable=unused-import InOutAxis, InOutScanAxis, LogicalRules, # pylint: disable=unused-import ArrayPytree, # pylint: disable=unused-import LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, PartitionSpecPytree, # pylint: disable=unused-import ) import jax def _add_axis_to_metadata(fn, axis_pos, axis_name, axis_col='params_axes'): """Insert a named axis to axes metadata.""" # Handle In() / Out() scan axis marker types. if hasattr(axis_pos, 'axis'): axis_pos = axis_pos.axis def insert_fn_leaf(names): if names is None: return names names = list(names) names.insert(axis_pos, axis_name) return tuple(names) def insert_fn(x): new_names = jax.tree_util.tree_map( insert_fn_leaf, x.names, is_leaf=_is_logical_spec ) return x.replace(names=new_names) def remove_fn_leaf(names): if names is None: return names names = list(names) if names[axis_pos] != axis_name: raise ValueError( f'Expected axis {axis_name} at position {axis_pos} in ' f'axis metadata {names}.' ) names.pop(axis_pos) return tuple(names) def remove_fn(x): new_names = jax.tree_util.tree_map( remove_fn_leaf, x.names, is_leaf=_is_logical_spec ) return x.replace(names=new_names) return nn.transforms.map_variables( fn, axis_col, mutable=_is_mutable(axis_col), trans_in_fn=lambda tree: _tree_map_axes(remove_fn, tree), trans_out_fn=lambda tree: _tree_map_axes(insert_fn, tree), ) CollectionFilter = Filter The provided code snippet includes necessary dependencies for implementing the `scan_with_axes` function. Write a Python function `def scan_with_axes( target: 'flax.linen.transforms.Target', variable_axes: Mapping[ CollectionFilter, InOutScanAxis ] = {}, variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, split_rngs: Mapping[PRNGSequenceFilter, bool] = {}, in_axes=0, out_axes=0, length: Optional[int] = None, reverse: bool = False, unroll: int = 1, axis_name: str = 'layers', axes_collections: Tuple[str, ...] = ('params',), data_transform: Optional[Callable[..., Any]] = None, methods=None, ) -> 'flax.linen.transforms.Target'` to solve the following problem: Wrapped version of nn.scan that handles logical axis metadata. Here is the function: def scan_with_axes( target: 'flax.linen.transforms.Target', variable_axes: Mapping[ CollectionFilter, InOutScanAxis ] = {}, variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, split_rngs: Mapping[PRNGSequenceFilter, bool] = {}, in_axes=0, out_axes=0, length: Optional[int] = None, reverse: bool = False, unroll: int = 1, axis_name: str = 'layers', axes_collections: Tuple[str, ...] = ('params',), data_transform: Optional[Callable[..., Any]] = None, methods=None, ) -> 'flax.linen.transforms.Target': """Wrapped version of nn.scan that handles logical axis metadata.""" # we broadcast the static metadata collections. axes_filters = tuple(f'{col}_axes' for col in axes_collections) variable_broadcast = flax.core.scope.union_filters( variable_broadcast, axes_filters ) # perform usual lifted scan scanned = flax.linen.transforms.lift_transform( flax.core.lift.scan, target, variable_axes=variable_axes, variable_broadcast=variable_broadcast, variable_carry=variable_carry, split_rngs=split_rngs, in_axes=in_axes, out_axes=out_axes, length=length, reverse=reverse, unroll=unroll, data_transform=data_transform, methods=methods, ) # add scan axis to logical axes metadata for col in axes_collections: if col in variable_axes: scanned = _add_axis_to_metadata( scanned, axis_pos=variable_axes[col], axis_name=axis_name, axis_col=f'{col}_axes', ) return scanned
Wrapped version of nn.scan that handles logical axis metadata.
22,599
import functools import re from typing import (Any, Callable, Mapping, Optional, Tuple) import flax from flax import linen as nn from flax import struct from flax.core.frozen_dict import freeze from flax.core.frozen_dict import unfreeze from flax.core.scope import ( CollectionFilter as CollectionFilter, PRNGSequenceFilter as PRNGSequenceFilter, ) from flax.linen.spmd import _axis_rules from flax.linen.spmd import _AxisRules from flax.linen.spmd import _is_logical_spec from flax.linen.spmd import _with_sharding_constraint from flax.linen.spmd import get_logical_axis_rules as get_axis_rules from flax.linen.spmd import logical_axis_rules as axis_rules from flax.linen.spmd import logical_to_mesh from flax.linen.spmd import logical_to_mesh_axes from flax.linen.spmd import RulesFallback from flax.linen.spmd import set_logical_axis_rules as set_axis_rules from flax.linen.spmd import with_logical_constraint as with_sharding_constraint from flax.traverse_util import flatten_dict from flax.traverse_util import unflatten_dict from flax.typing import ( Array, In as ScanIn, # pylint: disable=unused-import Out as ScanOut, # pylint: disable=unused-import InOutAxis, InOutScanAxis, LogicalRules, # pylint: disable=unused-import ArrayPytree, # pylint: disable=unused-import LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, PartitionSpecPytree, # pylint: disable=unused-import ) import jax def _add_axis_to_metadata(fn, axis_pos, axis_name, axis_col='params_axes'): """Insert a named axis to axes metadata.""" # Handle In() / Out() scan axis marker types. if hasattr(axis_pos, 'axis'): axis_pos = axis_pos.axis def insert_fn_leaf(names): if names is None: return names names = list(names) names.insert(axis_pos, axis_name) return tuple(names) def insert_fn(x): new_names = jax.tree_util.tree_map( insert_fn_leaf, x.names, is_leaf=_is_logical_spec ) return x.replace(names=new_names) def remove_fn_leaf(names): if names is None: return names names = list(names) if names[axis_pos] != axis_name: raise ValueError( f'Expected axis {axis_name} at position {axis_pos} in ' f'axis metadata {names}.' ) names.pop(axis_pos) return tuple(names) def remove_fn(x): new_names = jax.tree_util.tree_map( remove_fn_leaf, x.names, is_leaf=_is_logical_spec ) return x.replace(names=new_names) return nn.transforms.map_variables( fn, axis_col, mutable=_is_mutable(axis_col), trans_in_fn=lambda tree: _tree_map_axes(remove_fn, tree), trans_out_fn=lambda tree: _tree_map_axes(insert_fn, tree), ) CollectionFilter = Filter The provided code snippet includes necessary dependencies for implementing the `vmap_with_axes` function. Write a Python function `def vmap_with_axes( target: 'flax.linen.transforms.Target', variable_axes: Mapping[ CollectionFilter, InOutAxis ], split_rngs: Mapping[PRNGSequenceFilter, bool] = {}, in_axes=0, out_axes=0, axis_size: Optional[int] = None, axis_name: Optional[str] = None, partitioning_axis_names: Mapping[Any, str] = {}, spmd_axis_name: Optional[str] = None, methods=None, ) -> 'flax.linen.transforms.Target'` to solve the following problem: Wrapped version of nn.vmap that handles logical axis metadata. Here is the function: def vmap_with_axes( target: 'flax.linen.transforms.Target', variable_axes: Mapping[ CollectionFilter, InOutAxis ], split_rngs: Mapping[PRNGSequenceFilter, bool] = {}, in_axes=0, out_axes=0, axis_size: Optional[int] = None, axis_name: Optional[str] = None, partitioning_axis_names: Mapping[Any, str] = {}, spmd_axis_name: Optional[str] = None, methods=None, ) -> 'flax.linen.transforms.Target': """Wrapped version of nn.vmap that handles logical axis metadata.""" # tell normal vmap to broadcast axis metadata. variable_axes = dict(variable_axes) # shallow copy for name in partitioning_axis_names: variable_axes[f'{name}_axes'] = None # perform usual lifted vmap vmapped = flax.linen.transforms.lift_transform( flax.core.lift.vmap, target, variable_axes=variable_axes, split_rngs=split_rngs, in_axes=in_axes, out_axes=out_axes, axis_size=axis_size, axis_name=axis_name, spmd_axis_name=spmd_axis_name, methods=methods, ) for collection_name, axis in variable_axes.items(): if collection_name in partitioning_axis_names: vmapped = _add_axis_to_metadata( # pylint: disable=protected-access vmapped, axis_pos=axis, axis_name=partitioning_axis_names[collection_name], axis_col=f'{collection_name}_axes', ) return vmapped
Wrapped version of nn.vmap that handles logical axis metadata.
22,600
import collections import contextlib import dataclasses import enum import functools import threading from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import jax from jax import lax from jax.experimental import maps from flax import struct from flax.core import meta from flax.typing import ( Array, LogicalNames, LogicalRules, ArrayPytree, # pylint: disable=invalid-name LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, # pylint: disable=invalid-name ) _axis_rules = _AxisRules() LogicalRules = Sequence[Tuple[str, Union[str, Tuple[str], None]]] The provided code snippet includes necessary dependencies for implementing the `set_logical_axis_rules` function. Write a Python function `def set_logical_axis_rules(rules: LogicalRules)` to solve the following problem: Sets the global logical axis to mesh axis binding. Here is the function: def set_logical_axis_rules(rules: LogicalRules): """Sets the global logical axis to mesh axis binding.""" _axis_rules.rules = rules
Sets the global logical axis to mesh axis binding.
22,601
import collections import contextlib import dataclasses import enum import functools import threading from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import jax from jax import lax from jax.experimental import maps from flax import struct from flax.core import meta from flax.typing import ( Array, LogicalNames, LogicalRules, ArrayPytree, # pylint: disable=invalid-name LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, # pylint: disable=invalid-name ) _axis_rules = _AxisRules() LogicalRules = Sequence[Tuple[str, Union[str, Tuple[str], None]]] The provided code snippet includes necessary dependencies for implementing the `get_logical_axis_rules` function. Write a Python function `def get_logical_axis_rules() -> LogicalRules` to solve the following problem: Returns the global logical axis to mesh axis binding. Here is the function: def get_logical_axis_rules() -> LogicalRules: """Returns the global logical axis to mesh axis binding.""" return _axis_rules.rules
Returns the global logical axis to mesh axis binding.
22,602
import collections import contextlib import dataclasses import enum import functools import threading from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import jax from jax import lax from jax.experimental import maps from flax import struct from flax.core import meta from flax.typing import ( Array, LogicalNames, LogicalRules, ArrayPytree, # pylint: disable=invalid-name LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, # pylint: disable=invalid-name ) _axis_rules = _AxisRules() LogicalRules = Sequence[Tuple[str, Union[str, Tuple[str], None]]] The provided code snippet includes necessary dependencies for implementing the `logical_axis_rules` function. Write a Python function `def logical_axis_rules(rules: LogicalRules)` to solve the following problem: Context manager for setting the logical to mesh axis bindings. Here is the function: def logical_axis_rules(rules: LogicalRules): """Context manager for setting the logical to mesh axis bindings.""" old_rules = _axis_rules.rules try: _axis_rules.rules = rules yield finally: _axis_rules.rules = old_rules
Context manager for setting the logical to mesh axis bindings.
22,603
import collections import contextlib import dataclasses import enum import functools import threading from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import jax from jax import lax from jax.experimental import maps from flax import struct from flax.core import meta from flax.typing import ( Array, LogicalNames, LogicalRules, ArrayPytree, # pylint: disable=invalid-name LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, # pylint: disable=invalid-name ) def logical_to_mesh(tree: Any, rules: Optional[LogicalRules] = None) -> Any: """Applies logical_to_mesh_axes to pytrees of logical PartitionSpecs.""" return jax.tree_util.tree_map( lambda x: logical_to_mesh_axes(x, rules), tree, is_leaf=lambda x: isinstance(x, jax.sharding.PartitionSpec), ) LogicalRules = Sequence[Tuple[str, Union[str, Tuple[str], None]]] The provided code snippet includes necessary dependencies for implementing the `logical_to_mesh_sharding` function. Write a Python function `def logical_to_mesh_sharding( tree: Any, mesh: jax.sharding.Mesh, rules: Optional[LogicalRules] = None, ) -> Any` to solve the following problem: Convert pytrees of logical PartitionSpecs to shardings. Here is the function: def logical_to_mesh_sharding( tree: Any, mesh: jax.sharding.Mesh, rules: Optional[LogicalRules] = None, ) -> Any: """Convert pytrees of logical PartitionSpecs to shardings.""" return jax.tree_util.tree_map( lambda x: jax.sharding.NamedSharding(mesh, x), logical_to_mesh(tree, rules), is_leaf=lambda x: isinstance(x, jax.sharding.PartitionSpec), )
Convert pytrees of logical PartitionSpecs to shardings.
22,604
import collections import contextlib import dataclasses import enum import functools import threading from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import jax from jax import lax from jax.experimental import maps from flax import struct from flax.core import meta from flax.typing import ( Array, LogicalNames, LogicalRules, ArrayPytree, # pylint: disable=invalid-name LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, # pylint: disable=invalid-name ) _axis_rules = _AxisRules() class RulesFallback(enum.Enum): """How a sharding constraint should behave when no matching rule is found.""" AXIS_IS_UNSHARDED = 'axis_is_unsharded' RAISE_ERROR = 'raise_error' NO_CONSTRAINT = 'no_constraint' def _with_sharding_constraint_one_fallback( axis_resources: LogicalPartitionSpec, x: Array, fallback: RulesFallback = RulesFallback.AXIS_IS_UNSHARDED, rules: Optional[LogicalRules] = None, mesh: Optional[jax.sharding.Mesh] = None, ): """Either imposes a sharding constraint or applies fallback.""" mesh_axes = _logical_to_mesh_axes(axis_resources, rules) if mesh_axes is None: return _with_sharding_constraint(x, None, mesh=mesh) if fallback == RulesFallback.AXIS_IS_UNSHARDED: mesh_axes = [None if x is _unassigned_axis else x for x in mesh_axes] else: if any(x is _unassigned_axis for x in mesh_axes): if fallback == RulesFallback.RAISE_ERROR: raise ValueError(f'Axis names {axis_resources} did not match a rule') else: return x return _with_sharding_constraint( x, jax.sharding.PartitionSpec(*mesh_axes), mesh=mesh ) def _is_logical_spec(x): return x is None or ( isinstance(x, tuple) and all(isinstance(e, str) or e is None for e in x) ) LogicalRules = Sequence[Tuple[str, Union[str, Tuple[str], None]]] ArrayPytree = Any The provided code snippet includes necessary dependencies for implementing the `with_logical_constraint` function. Write a Python function `def with_logical_constraint( x: ArrayPytree, logical_axis_resources: LogicalPartitionSpecPytree, rules: Optional[LogicalRules] = None, mesh: Optional[jax.sharding.Mesh] = None, fallback: RulesFallback = RulesFallback.AXIS_IS_UNSHARDED, )` to solve the following problem: Version of jit's with_sharding_constraint that uses logical axis names. Here is the function: def with_logical_constraint( x: ArrayPytree, logical_axis_resources: LogicalPartitionSpecPytree, rules: Optional[LogicalRules] = None, mesh: Optional[jax.sharding.Mesh] = None, fallback: RulesFallback = RulesFallback.AXIS_IS_UNSHARDED, ): """Version of jit's with_sharding_constraint that uses logical axis names.""" # If no axis binding is set, this is a no-op. if rules is None: rules = _axis_rules.rules if not rules or logical_axis_resources is None: return x # Translate logical names to mesh assignments. return jax.tree_util.tree_map( functools.partial( _with_sharding_constraint_one_fallback, fallback=fallback, rules=rules, mesh=mesh, ), logical_axis_resources, x, is_leaf=_is_logical_spec, )
Version of jit's with_sharding_constraint that uses logical axis names.
22,605
import collections import contextlib import dataclasses import enum import functools import threading from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import jax from jax import lax from jax.experimental import maps from flax import struct from flax.core import meta from flax.typing import ( Array, LogicalNames, LogicalRules, ArrayPytree, # pylint: disable=invalid-name LogicalPartitionSpec, # pylint: disable=unused-import LogicalPartitionSpecPytree, # pylint: disable=invalid-name ) class LogicallyPartitioned(meta.Partitioned): rules: Optional[LogicalRules] = struct.field(default=None, pytree_node=False) def unbox(self, apply_constraint=True) -> Any: """Returns the wrapped value with the partitioning constraint applied.""" if apply_constraint and (_global_mesh_defined() or self.mesh is not None): return with_logical_constraint( self.value, self.get_partition_spec(), rules=self.rules, mesh=self.mesh, ) else: return self.value LogicalNames = Tuple[Union[str, None], ...] LogicalRules = Sequence[Tuple[str, Union[str, Tuple[str], None]]] The provided code snippet includes necessary dependencies for implementing the `with_logical_partitioning` function. Write a Python function `def with_logical_partitioning( fn: Callable[..., Any], names: LogicalNames, mesh: Optional[jax.sharding.Mesh] = None, rules: Optional[LogicalRules] = None, ) -> Callable[..., LogicallyPartitioned]` to solve the following problem: Wraps a function's return value with LogicallyPartitioned. Example:: >>> import flax.linen as nn >>> kernel_init = nn.with_logical_partitioning( ... nn.initializers.lecun_normal(), (None, "data")) >>> partitioned_dense = nn.Dense(features=3, kernel_init=kernel_init) Args: fn: The function to be wrapped. Typically this is an initializer. names: The logical axis passed to ``LogicallyPartitioned``. mesh: The mesh to use for the partitioning. If None, the global mesh resource is used if available. rules: Optional logical to mesh rules use. If None, the global rules are used if available. Returns: A function wrapping ``fn`` that will return an instance of ``LogicallyPartitioned``. Here is the function: def with_logical_partitioning( fn: Callable[..., Any], names: LogicalNames, mesh: Optional[jax.sharding.Mesh] = None, rules: Optional[LogicalRules] = None, ) -> Callable[..., LogicallyPartitioned]: """Wraps a function's return value with LogicallyPartitioned. Example:: >>> import flax.linen as nn >>> kernel_init = nn.with_logical_partitioning( ... nn.initializers.lecun_normal(), (None, "data")) >>> partitioned_dense = nn.Dense(features=3, kernel_init=kernel_init) Args: fn: The function to be wrapped. Typically this is an initializer. names: The logical axis passed to ``LogicallyPartitioned``. mesh: The mesh to use for the partitioning. If None, the global mesh resource is used if available. rules: Optional logical to mesh rules use. If None, the global rules are used if available. Returns: A function wrapping ``fn`` that will return an instance of ``LogicallyPartitioned``. """ @functools.wraps(fn) def wrapper(*args, **kwargs): return LogicallyPartitioned( fn(*args, **kwargs), names, mesh=mesh, rules=rules ) # pytype: disable=wrong-keyword-args return wrapper
Wraps a function's return value with LogicallyPartitioned. Example:: >>> import flax.linen as nn >>> kernel_init = nn.with_logical_partitioning( ... nn.initializers.lecun_normal(), (None, "data")) >>> partitioned_dense = nn.Dense(features=3, kernel_init=kernel_init) Args: fn: The function to be wrapped. Typically this is an initializer. names: The logical axis passed to ``LogicallyPartitioned``. mesh: The mesh to use for the partitioning. If None, the global mesh resource is used if available. rules: Optional logical to mesh rules use. If None, the global rules are used if available. Returns: A function wrapping ``fn`` that will return an instance of ``LogicallyPartitioned``.
22,606
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax class Module(ModuleBase): """Base class for all neural network modules. Layers and models should subclass this class. All Flax Modules are Python 3.7 `dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since dataclasses take over ``__init__``, you should instead override :meth:`setup`, which is automatically called to initialize the module. Modules can contain submodules, and in this way can be nested in a tree structure. Submodels can be assigned as regular attributes inside the :meth:`setup` method. You can define arbitrary "forward pass" methods on your Module subclass. While no methods are special-cased, ``__call__`` is a popular choice because it allows you to use module instances as if they are functions:: >>> from flax import linen as nn >>> from typing import Tuple >>> class Module(nn.Module): ... features: Tuple[int, ...] = (16, 4) ... def setup(self): ... self.dense1 = nn.Dense(self.features[0]) ... self.dense2 = nn.Dense(self.features[1]) ... def __call__(self, x): ... return self.dense2(nn.relu(self.dense1(x))) Optionally, for more concise module implementations where submodules definitions are co-located with their usage, you can use the :meth:`compact` wrapper. """ if typing.TYPE_CHECKING: name: Optional[str] = module_field(kw_only=True, default=None) parent: Union['Module', _Sentinel, None] = module_field( kw_only=True, default=None ) def __init__(self, *args, **kwargs): # this stub makes sure pytype accepts constructor arguments. pass def __call__(self, *args, **kwargs) -> Any: # this stub allows pytype to accept Modules as Callables. pass def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None: """Automatically initializes all subclasses as custom dataclasses.""" super().__init_subclass__(**kwargs) # All Flax Modules are dataclasses. We force this convention since # it encourages the stateless behavior needed to clone module instances for # functional transformation. Instead of using a python metaclass, we # automatically transform Modules into dataclasses at subclass creation # time, and we set the last dataclass arguments to `parent` and `name`. cls._customized_dataclass_transform(kw_only) # We wrap user-defined methods including setup and __call__ to enforce # a number of different checks and to provide clear error messages. cls._verify_single_or_no_compact() cls._find_compact_name_scope_methods() cls._wrap_module_attributes() # Set empty class defaults. cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined] cls.scope: Optional[Scope] = None # type: ignore # Handles weak referencing of parent Modules to prevent reference cycles. cls._parent_ref = None # type: ignore[attr-defined] cls.parent = ParentDescriptor() # type: ignore[assignment] def _customized_dataclass_transform(cls, kw_only: bool): """Transforms `cls` into a dataclass, with custom additional behavior. 1. Inject `parent` and `name` fields. (If they are already present, then check that they have the expected types.) 2. Set compare, hash, and repr to False for non-init fields. 3. Generate a hash function (if not provided by cls). """ # Check reserved attributes have expected type annotations. annotations = dict(cls.__dict__.get('__annotations__', {})) if annotations.get('parent', _ParentType) != _ParentType: raise errors.ReservedModuleAttributeError(annotations) if annotations.get('name', str) not in ('str', str, Optional[str]): raise errors.ReservedModuleAttributeError(annotations) # any non-init field will only be set in setup # During __hash__ and __eq__ the field is not set yet # so it should not be used in compare, hash or repr. for field in annotations: field_meta = getattr(cls, field, None) if isinstance(field_meta, dataclasses.Field) and not field_meta.init: field_meta.compare = False field_meta.hash = False field_meta.repr = False extra_fields = [ ( 'parent', _ParentType, kw_only_dataclasses.field( repr=False, default=_unspecified_parent, kw_only=True ), ), ( 'name', Optional[str], kw_only_dataclasses.field(default=None, kw_only=True), ), ] if kw_only: if tuple(sys.version_info)[:3] >= (3, 10, 0): for ( name, annotation, # pytype: disable=invalid-annotation default, ) in extra_fields: setattr(cls, name, default) cls.__annotations__[name] = annotation dataclasses.dataclass( # type: ignore[call-overload] unsafe_hash='__hash__' not in cls.__dict__, repr=False, kw_only=True, )(cls) else: raise TypeError('`kw_only` is not available before Py 3.10.') else: # Now apply dataclass transform (which operates in-place). # Do generate a hash function only if not provided by the class. kw_only_dataclasses.dataclass( cls, unsafe_hash='__hash__' not in cls.__dict__, repr=False, extra_fields=extra_fields, ) # pytype: disable=wrong-keyword-args cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign] def _verify_single_or_no_compact(cls): """Statically verifies that at most a single method is labelled compact.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] n_compact_fns = len( [ method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact') ] ) if n_compact_fns > 1: raise errors.MultipleMethodsCompactError() def _find_compact_name_scope_methods(cls): """Finds all compact_name_scope methods in the class.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] compact_name_scope_fns = tuple( method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact_name_scope') ) cls._compact_name_scope_methods = compact_name_scope_fns def _wrap_module_attributes(cls): """Wraps user-defined non-inherited methods and descriptors with state management functions. """ # wrap methods method_exclusions = [f.name for f in dataclasses.fields(cls)] + [ '__eq__', '__repr__', '__init__', '__hash__', '__post_init__', ] for key in _get_local_method_names(cls, exclude=method_exclusions): method = getattr(cls, key) if hasattr(method, 'nowrap'): continue setattr(cls, key, wrap_method_once(method)) # wrap descriptors descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [ 'parent', '__dict__', ] for key in _get_local_descriptor_names(cls, descriptor_exclusions): # don't use getattr here, since it will call the descriptor descriptor = cls.__dict__[key] if hasattr(descriptor, 'nowrap'): continue setattr(cls, key, wrap_descriptor_once(descriptor)) return cls def _call_wrapped_method(self, fun, args, kwargs): """Calls a wrapped method. This function is responsible for setting up the thread local state correctly before calling the method and cleaning up afterwards. This includes storing intermediates, setup of the compact scope, and making sure setup is called before any other method. Args: fun: The wrapped method. args: Named arguments passed to ``fun``. kwargs: Keyword arguments passed to ``fun``. Returns: The results of calling ``fun``. """ is_compact_method = hasattr(fun, 'compact') fun_name = _get_fn_name(fun) is_setup_method = fun_name == 'setup' add_call_info = not is_setup_method and len(_context.call_info_stack) > 0 # We lazily call setup() only when needed. if is_setup_method: if self.scope is None: raise errors.CallSetupUnboundModuleError() is_recurrent = self._state.in_setup self._state.in_setup = True else: self._try_setup() if is_compact_method: if self.scope is None: raise errors.CallCompactUnboundModuleError() is_recurrent = self._state.in_compact_method self._state.in_compact_method = True _context.module_stack.append(self) try: # get call info if add_call_info: assert self.scope is not None call_index = _context.call_info_stack[-1].get_call_index() if _global_interceptor_stack: run_fun = functools.partial(run_interceptors, fun) else: run_fun = fun # call method if _use_named_call: with jax.named_scope(_derive_profiling_name(self, fun)): y = run_fun(self, *args, **kwargs) else: y = run_fun(self, *args, **kwargs) if _context.capture_stack: filter_fn = _context.capture_stack[-1] if filter_fn and filter_fn(self, fun_name): self.sow('intermediates', fun_name, y) if add_call_info: _args, _kwargs, _y = flax.linen.summary._represent_tree( (args, kwargs, y) ) _context.call_info_stack[-1].calls.append( _CallInfo( call_index, self.path, self.clone(), self.scope.rngs, self.scope.mutable, fun.__name__, _args, _kwargs, _y, ) ) return y finally: _context.module_stack.pop() if is_compact_method: object.__setattr__(self, 'scope', self.scope.rewound()) # setup or compact calls can be recurrent for example due to super calls # resetting the state would cause is compact/setup method # to be set to False prematurely. if (is_compact_method or is_setup_method) and not is_recurrent: self._state.reset() def __setattr__(self, name: str, val: Any): """Sets an attribute on this Module. We overload setattr solely to support pythonic naming via assignment of submodules in the special :meth:`setup` function:: self.submodule_name = MyModule(...) We also support lists and other general pytrees, e.g.:: self.submodules = [MyModule0(..), MyModule1(..), ...] Args: name: Attribute to set. val: Value of the attribute. """ fields = self.__dataclass_fields__ # pytype: disable=attribute-error is_dataclass_attr = name in fields and fields[name].init if not self._state.in_setup: if not self._state.is_initialized: # Setting attributes before end of Module.__post_init__() object.__setattr__(self, name, val) return else: # We're past all initialization and setup logic: # Raises a TypeError just like frozen python dataclasses. raise errors.SetAttributeFrozenModuleError( self.__class__.__name__, name, val ) # We're inside the setup() method: if is_dataclass_attr: # These names are specified as dataclass fields. They should not be # initialized within the setup() method, but can be modified freely # before it. raise errors.SetAttributeInModuleSetupError() # Values (that may be variables or submodules) are being defined and # attached in setup(), we run some extra logic in that case. self._register_submodules(name, val) def __getattr__(self, name: str) -> Any: """Call setup() before getting any setup-defined attributes.""" # We don't want to return anything for python copy / pickle methods. if name in _UNDEFINED_COPY_PICKLE_METHODS: raise AttributeError() self._try_setup() if name in self.__dict__: return self.__dict__[name] else: msg = f'"{self.__class__.__name__}" object has no attribute "{name}".' if self.scope is None: msg += ( f' If "{name}" is defined in \'.setup()\', remember these fields ' "are only accessible from inside 'init' or 'apply'." ) raise AttributeError(msg) def __dir__(self) -> List[str]: """Call setup() before listing attributes.""" self._try_setup() return object.__dir__(self) # type: ignore def __post_init__(self) -> None: # DO NOT REMOVE - Marker for internal logging. # In dataclasses, __init__ is overridden to process dataclass arguments, # and __post_init__ is called immediately afterwards. Here, depending on the # type of `parent` passed to initialize the Module, we either defer # initialization, attach this Module as a submodule of a parent, or bind # this Module at the top-level to variables and rngs. object.__setattr__(self, '_id', uuid()) object.__setattr__(self, '_state', _ModuleInternalState()) # Typically we set the parent based on the dynamic module context. if self.parent is _unspecified_parent: # pytype: disable=attribute-error object.__setattr__(self, 'parent', _context.module_stack[-1]) # Initialization is deferred for top level Modules or any other "orphan" # Modules until attachment by __setattr__ i.e. MyModule(..., parent=None) if self.parent is None: return # Register submodule on parent Module. if isinstance(self.parent, Module): # When initializing an unnamed Module inside setup() # initialization is deferred until attachment by __setattr__ # i.e. self.mymodule = MyModule(...) self.name: Optional[str] if ( self.parent._state.in_setup and self.name is None ): # pytype: disable=attribute-error return if not self.parent._initialization_allowed: raise errors.AssignSubModuleError(self.__class__.__name__) # Autonaming of submodules. if self.name is None: # pytype: disable=attribute-error prefix = f'{self.__class__.__name__}' cursor = self.parent._state.autoname_cursor.get(prefix, 0) self.name = f'{prefix}_{cursor}' self.parent._state.autoname_cursor[prefix] = cursor + 1 # Allow scope aliasing under transforms for submodules defined in setup. reuse_scopes = ( self.parent._state.in_setup and self.parent._state.setup_called == SetupState.TRANSFORMED ) # Perform name-collision check. if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes): parent_class = self.parent.__class__.__name__ raise errors.NameInUseError('submodule', self.name, parent_class) # Finalize attachment to parent and scope initialization. self.parent._state.children[self.name] = self assert self.parent.scope is not None object.__setattr__( self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes) ) # Top-level invocation with a functional Scope. elif isinstance(self.parent, Scope): object.__setattr__(self, 'scope', self.parent) else: raise ValueError('parent must be None, Module or Scope') # eagerly bind submodules if scope is available if self.scope is not None: for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) self._state.is_initialized = True def __repr__(self) -> str: return _module_repr(self) def setup(self) -> None: """Initializes a Module lazily (similar to a lazy ``__init__``). ``setup`` is called once lazily on a module instance when a module is bound, immediately before any other methods like ``__call__`` are invoked, or before a ``setup``-defined attribute on ``self`` is accessed. This can happen in three cases: 1. Immediately when invoking :meth:`apply`, :meth:`init` or :meth:`init_and_output`. 2. Once the module is given a name by being assigned to an attribute of another module inside the other module's ``setup`` method (see :meth:`__setattr__`):: >>> class MyModule(nn.Module): ... def setup(self): ... submodule = nn.Conv(...) ... # Accessing `submodule` attributes does not yet work here. ... # The following line invokes `self.__setattr__`, which gives ... # `submodule` the name "conv1". ... self.conv1 = submodule ... # Accessing `submodule` attributes or methods is now safe and ... # either causes setup() to be called once. 3. Once a module is constructed inside a method wrapped with :meth:`compact`, immediately before another method is called or ``setup`` defined attribute is accessed. """ pass def _register_submodules(self, name, val): """Registers a submodule.""" assert self.scope, 'Trying to register submodules on unbound scope.' root = self.scope.root cache = _caches.get(root, weakref.WeakValueDictionary()) _caches[root] = cache queue = [] preserve_adopted_names = config.flax_preserve_adopted_names if hasattr(type(self), 'preserve_adopted_names'): preserve_adopted_names = type(self).preserve_adopted_names def adopt_attr_modules(cache, queue, suffix, subvalue): if isinstance(subvalue, Module): current_name = subvalue.name adopted_name = None if subvalue.parent is None: # Preserve sharing-by-reference relationships during adoption # via cache keyed on unique instance ids. key = subvalue._id # Module was passed from outside. It needs to be cloned. # Outside modules are named by attachment, not an outer name, # UNLESS we're using new adopted name policy, in which case an existing # name will be used, as is often supplied by config systems. if preserve_adopted_names: adopted_name = object.__getattribute__(subvalue, 'name') if key in cache: subvalue = cache[key] else: subvalue = subvalue.clone(name=None) cache[key] = subvalue if subvalue.name is None: object.__setattr__(subvalue, 'parent', self) if adopted_name is None: adopted_name = ( f'{name}{suffix}' if not isinstance(subvalue, CompactNameScope) else current_name ) object.__setattr__(subvalue, 'name', adopted_name) queue.append(subvalue) return subvalue val = _freeze_attr( _map_over_modules_in_tree( functools.partial(adopt_attr_modules, cache, queue), val ) ) object.__setattr__(self, name, val) for x in queue: x.__post_init__() def _try_setup(self, shallow: bool = False) -> None: """Tries to setup module if scope is available and setup has not been called yet.""" if ( self.scope and not self._state.in_setup and self._state.setup_called != SetupState.DONE ): try: self._state.in_setup = True # A shallow setup will only register attribute submodules but it does # not call the user's setup. This avoids running before a # transformation. for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) if not shallow: self.setup() # create NonTransparent Modules self._compact_name_scope_modules = { name: CompactNameScope( getattr(type(self), name).inner_fun, lambda: self, name=name ) for name in self._compact_name_scope_methods } # We run static checks abstractly once for setup before any transforms # to detect name collisions and other python errors. elif self._state.setup_called == SetupState.NEW: self._validate_setup() finally: self._state.in_setup = False if not shallow: self._state.setup_called = SetupState.DONE def _validate_setup(self) -> None: """Abstractly evaluates setup only to run static checks.""" def run_setup_only(x): wrapped_id = wrap_method_once(lambda m, x: x) with TestScope({}, rngs={}, mutable=True).temporary() as root: return wrapped_id(self.clone(parent=root), x) _ = jax.eval_shape(run_setup_only, 0) def _name_taken( self, name: str, reuse_scopes: bool = False, collection: Optional[str] = None, ) -> bool: assert self.scope is not None if reuse_scopes: return False return self.scope.name_reserved(name, collection) def _initialization_allowed(self): return ( not self._state.is_initialized # allow eager attachment in post-init or self._state.in_setup or self._state.in_compact_method ) def path(self): if self.scope is None: raise ValueError("Can't access module paths on unbound modules.") return self.scope.path def clone( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = None, _deep_clone: Union[bool, weakref.WeakValueDictionary] = False, _reset_names: bool = False, **updates, ) -> M: """Creates a clone of this Module, with optionally updated arguments. NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used primarily for internal routines, and ``copy`` offers simpler arguments and better defaults. Args: parent: The parent of the clone. The clone will have no parent if no explicit parent is specified. _deep_clone: A boolean or a weak value dictionary to control deep cloning of submodules. If True, submodules will be cloned recursively. If a weak value dictionary is passed, it will be used to cache cloned submodules. This flag is used by init/apply/bind to avoid scope leakage. _reset_names: If True, ``name=None`` is also passed to submodules when cloning. Resetting names in submodules is necessary when calling ``.unbind``. **updates: Attribute updates. Returns: A clone of the this Module with the updated attributes and parent. """ attrs = { f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init } attrs.update(parent=parent, **updates) # Here we implement deep cloning of submodules, this is necessary to avoid scope leakage # from external submodules into init/apply/bind while preserving sharing-by-reference # relationships between submodules. if _deep_clone != False: # We use a weak value dictionary to cache cloned submodules. When a shared # submodule is cloned, its only cloned once else its fetched from the cache. cache = ( weakref.WeakValueDictionary() if isinstance(_deep_clone, bool) else _deep_clone ) def clone_fn(m: Module) -> Module: if hasattr(m, '_id'): key = m._id if key in cache: return cache[key] else: if _reset_names: clone = m.clone( _deep_clone=cache, _reset_names=_reset_names, name=None ) else: clone = m.clone(_deep_clone=cache) cache[key] = clone return clone else: # If the module doesn't have an _id attribute it could be a mock object # so we return it as is. return m # _map_submodules will map over all submodules inside attrs # value here can be any pytree, non-module values are ignored for field_name, value in attrs.items(): if field_name == 'parent': continue attrs[field_name] = _map_submodules(clone_fn, value) module = self.__class__(**attrs) return module def copy( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent, name: Optional[str] = None, **updates, ) -> M: """Creates a copy of this Module, with optionally updated arguments. Args: parent: The parent of the copy. By default the current module is taken as parent if not explicitly specified. name: A new name for the copied Module, by default a new automatic name will be given. **updates: Attribute updates. Returns: A copy of the this Module with the updated name, parent, and attributes. """ return self.clone( parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates ) def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[True], **init_kwargs, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[False], **init_kwargs, ) -> Variable[meta.AxisMetadata[T]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: """Declares and returns a variable in this Module. See :mod:`flax.core.variables` for more information. See also :meth:`param` for a shorthand way to define read-only variables in the "params" collection. Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be passed on explicitly:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... key = self.make_rng('stats') ... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape) ... ... ... return x * mean.value >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats`` has to be provided explicitly when calling :meth:`init` and :meth:`apply`. Args: col: The variable collection name. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this variable is used in this module. If None, the variable must already be initialized otherwise an error is raised. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn Returns: A :class:`flax.core.variables.Variable` that can be read or set via ".value" attribute. Throws an error if the variable exists already. """ if not self._initialization_allowed: raise ValueError( 'Variables must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection=col): raise errors.NameInUseError('variable', name, self.__class__.__name__) assert self.scope is not None v = self.scope.variable( col, name, init_fn, *init_args, unbox=unbox, **init_kwargs ) self._state.children[name] = col return v def param( self, name: str, init_fn: Callable[..., T], *init_args, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[True], **init_kwargs, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[False], **init_kwargs, ) -> meta.AxisMetadata[T]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool = True, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: """Declares and returns a parameter in this Module. Parameters are read-only variables in the collection named "params". See :mod:`flax.core.variables` for more details on variables. The first argument of ``init_fn`` is assumed to be a PRNG key, which is provided automatically and does not have to be passed using ``init_args`` or ``init_kwargs``:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape) ... ... ... return x * mean >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, but only ``shape`` has to be provided explicitly; ``key`` is set automatically using the PRNG for ``params`` that is passed when initializing the module using :meth:`init`. Args: name: The parameter name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn. Returns: The value of the initialized parameter. Throws an error if the parameter exists already. """ if not self._initialization_allowed: raise ValueError( 'Parameters must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection='params'): raise errors.NameInUseError('param', name, self.__class__.__name__) assert self.scope is not None v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs) self._state.children[name] = 'params' return v def has_variable(self, col: str, name: str) -> bool: """Checks if a variable of given collection and name exists in this Module. See :mod:`flax.core.variables` for more explanation on variables and collections. Args: col: The variable collection name. name: The name of the variable. Returns: True if the variable exists. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.has_variable(col, name) def is_mutable_collection(self, col: str) -> bool: """Returns true if the collection ``col`` is mutable.""" if self.scope is None: raise ValueError("Can't check mutability on unbound modules") return self.scope.is_mutable_collection(col) def has_rng(self, name: str) -> bool: """Returns true if a PRNGSequence with name ``name`` exists.""" if self.scope is None: raise ValueError("Can't query for RNGs on unbound modules") return self.scope.has_rng(name) def make_rng(self, name: str = 'params') -> PRNGKey: """Returns a new RNG key from a given RNG sequence for this Module. The new RNG key is split from the previous one. Thus, every call to ``make_rng`` returns a new RNG key, while still guaranteeing full reproducibility. .. note:: If an invalid name is passed (i.e. no RNG key was passed by the user in ``.init`` or ``.apply`` for this name), then ``name`` will default to ``'params'``. Example:: >>> import jax >>> import flax.linen as nn >>> class ParamsModule(nn.Module): ... def __call__(self): ... return self.make_rng('params') >>> class OtherModule(nn.Module): ... def __call__(self): ... return self.make_rng('other') >>> key = jax.random.key(0) >>> params_out, _ = ParamsModule().init_with_output({'params': key}) >>> # self.make_rng('other') will default to using the 'params' RNG stream >>> other_out, _ = OtherModule().init_with_output({'params': key}) >>> assert params_out == other_out Learn more about RNG's by reading the Flax RNG guide: https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html Args: name: The RNG sequence name. Returns: The newly generated RNG key. """ if self.scope is None: raise ValueError("Can't use RNGs on unbound modules") return self.scope.make_rng(name) def is_initializing(self) -> bool: """Returns True if running under self.init(...) or nn.init(...)(). This is a helper method to handle the common case of simple initialization where we wish to have setup logic occur when only called under ``module.init`` or ``nn.init``. For more complicated multi-phase initialization scenarios it is better to test for the mutability of particular variable collections or for the presence of particular variables that potentially need to be initialized. """ if self.scope is None: raise ValueError("Can't check if running under init() on unbound modules") return self.scope.get_flag('initializing', False) def _module_checks(self): """Run standard runtime checks.""" if not isinstance(self, Module): raise errors.InvalidInstanceModuleError() overridden_post_init = self.__post_init__ != Module.__post_init__ if overridden_post_init and not hasattr(self, '_id'): raise errors.IncorrectPostInitOverrideError() def bind( self: M, variables: VariableDict, *args, rngs: Optional[RNGSequences] = None, mutable: CollectionFilter = False, ) -> M: """Creates an interactive Module instance by binding variables and RNGs. ``bind`` provides an "interactive" instance of a Module directly without transforming a function with ``apply``. This is particularly useful for debugging and interactive use cases like notebooks where a function would limit the ability to split up code into different cells. Once the variables (and optionally RNGs) are bound to a ``Module`` it becomes a stateful object. Note that idiomatic JAX is functional and therefore an interactive instance does not mix well with vanilla JAX APIs. ``bind()`` should only be used for interactive experimentation, and in all other cases we strongly encourage users to use ``apply()`` instead. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = nn.Dense(3) ... self.decoder = nn.Dense(5) ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> x = jnp.ones((16, 9)) >>> ae = AutoEncoder() >>> variables = ae.init(jax.random.key(0), x) >>> model = ae.bind(variables) >>> z = model.encoder(x) >>> x_reconstructed = model.decoder(z) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments (not used). rngs: a dict of PRNGKeys to initialize the PRNG sequences. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. Returns: A copy of this instance with bound variables and RNGs. """ Module._module_checks(self) del args scope = core.bind(variables, rngs=rngs, mutable=mutable) return self.clone(parent=scope, _deep_clone=True) def unbind(self: M) -> Tuple[M, VariableDict]: """Returns an unbound copy of a Module and its variables. ``unbind`` helps create a stateless version of a bound Module. An example of a common use case: to extract a sub-Module defined inside ``setup()`` and its corresponding variables: 1) temporarily ``bind`` the parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that ``setup()`` is only called when the Module is bound.):: >>> class Encoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(256)(x) >>> class Decoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(784)(x) >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = Encoder() ... self.decoder = Decoder() ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> module = AutoEncoder() >>> variables = module.init(jax.random.key(0), jnp.ones((1, 784))) >>> # Extract the Encoder sub-Module and its variables >>> encoder, encoder_vars = module.bind(variables).encoder.unbind() Returns: A tuple with an unbound copy of this Module and its variables. """ Module._module_checks(self) if self.scope is None: raise errors.CallUnbindOnUnboundModuleError() variables = self.variables module = self.clone(_deep_clone=True, _reset_names=True, name=None) return module, variables def apply( self, variables: VariableDict, *args, rngs: Optional[Union[PRNGKey, RNGSequences]] = None, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = False, capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]: """Applies a module method to variables and returns output and modified variables. Note that ``method`` should be set if one would like to call ``apply`` on a different class method than ``__call__``. For instance, suppose a Transformer modules has a method called ``encode``, then the following calls ``apply`` on that method:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Transformer(nn.Module): ... def encode(self, x): ... ... >>> x = jnp.ones((16, 9)) >>> model = Transformer() >>> variables = model.init(jax.random.key(0), x, method=Transformer.encode) >>> encoded = model.apply(variables, x, method=Transformer.encode) If a function instance is provided, the unbound function is used. For instance, the example below is equivalent to the one above:: >>> encoded = model.apply(variables, x, method=model.encode) You can also pass a string to a callable attribute of the module. For example, the previous can be written as:: >>> encoded = model.apply(variables, x, method='encode') Note ``method`` can also be a function that is not defined in ``Transformer``. In that case, the function should have at least one argument representing an instance of the Module class:: >>> def other_fn(instance, x): ... # instance.some_module_attr(...) ... instance.encode ... ... >>> model.apply(variables, x, method=other_fn) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, add_noise=False): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... if add_noise: ... # Add gaussian noise ... noise_key = self.make_rng('noise') ... x = x + jax.random.normal(noise_key, x.shape) ... ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)} >>> variables = module.init(rngs, x) >>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> rngs['noise'] = jax.random.key(0) >>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # different output (key(1) vs key(0)) >>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1) >>> del rngs['noise'] >>> # self.make_rng('noise') will default to using the 'params' RNG stream >>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # same output (key(0)) >>> np.testing.assert_allclose(out1, out2) >>> # passing in a single key is equivalent to passing in {'params': key} >>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0)) >>> # same output (key(0)) >>> np.testing.assert_allclose(out2, out3) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments passed to the specified apply method. rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params" PRNG sequence is used to initialize parameters. method: A function to call apply on. This is generally a function in the module. If provided, applies this method. If not provided, applies the ``__call__`` method of the module. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default, only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the specified apply method. Returns: If ``mutable`` is False, returns output. If any collections are mutable, returns ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if rngs is not None and not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) # if the `method` string is a submodule, we create a lambda function # that calls the submodule, forwarding all arguments. if isinstance(method, Module): method = lambda self, *args, **kwargs: getattr(self, attribute_name)( *args, **kwargs ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return apply( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(variables, *args, **kwargs, rngs=rngs) def init_with_output( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]: """Initializes a module method with variables and returns output and modified variables. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return init_with_output( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(rngs, *args, **kwargs) def init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[FrozenVariableDict, Dict[str, Any]]: """Initializes a module method with variables and returns modified variables. ``init`` takes as first argument either a single ``PRNGKey``, or a dictionary mapping variable collections names to their ``PRNGKeys``, and will call ``method`` (which is the module's ``__call__`` function by default) passing ``*args`` and ``**kwargs``, and returns a dictionary of initialized variables. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, train): ... x = nn.Dense(16)(x) ... x = nn.BatchNorm(use_running_average=not train)(x) ... x = nn.relu(x) ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> key = jax.random.key(0) >>> variables = module.init(key, x, train=False) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... other_variable = self.variable( ... 'other_collection', ... 'other_variable', ... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape), ... x, ... ) ... x = x + other_variable.value ... ... return nn.Dense(1)(x) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)} >>> variables0 = module.init(rngs, x) >>> rngs['other_rng'] = jax.random.key(0) >>> variables1 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables0['params'], variables1['params'] ... ) >>> # different other_variable (key(1) vs key(0)) >>> np.testing.assert_raises( ... AssertionError, ... np.testing.assert_allclose, ... variables0['other_collection']['other_variable'], ... variables1['other_collection']['other_variable'], ... ) >>> del rngs['other_rng'] >>> # self.make_rng('other_rng') will default to using the 'params' RNG stream >>> variables2 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables1['params'], variables2['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables1['other_collection']['other_variable'], ... variables2['other_collection']['other_variable'], ... ) >>> # passing in a single key is equivalent to passing in {'params': key} >>> variables3 = module.init(jax.random.key(0), x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables2['params'], variables3['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables2['other_collection']['other_variable'], ... variables3['other_collection']['other_variable'], ... ) Jitting ``init`` initializes a model lazily using only the shapes of the provided arguments, and avoids computing the forward pass with actual values. Example:: >>> module = nn.Dense(1) >>> init_jit = jax.jit(module.init) >>> variables = init_jit(jax.random.key(0), x) ``init`` is a light wrapper over ``apply``, so other ``apply`` arguments like ``method``, ``mutable``, and ``capture_intermediates`` are also available. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) _, v_out = self.init_with_output( rngs, *args, method=method, mutable=mutable, capture_intermediates=capture_intermediates, **kwargs, ) return v_out def lazy_init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Optional[Callable[..., Any]] = None, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> FrozenVariableDict: """Initializes a module without computing on an actual input. lazy_init will initialize the variables without doing unnecessary compute. The input data should be passed as a ``jax.ShapeDtypeStruct`` which specifies the shape and dtype of the input but no concrete data. Example:: >>> model = nn.Dense(features=256) >>> variables = model.lazy_init( ... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) The args and kwargs args passed to ``lazy_init`` can be a mix of concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct) values. Concrete values are only necessary for arguments that affect the initialization of variables. For example, the model might expect a keyword arg that enables/disables a subpart of the model. In this case, an explicit value (True/Flase) should be passed otherwise ``lazy_init`` cannot infer which variables should be initialized. Args: rngs: The rngs for the variable collections. *args: arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) def lazy_wrapper(rngs, *args, **kwargs): return self.init(rngs, *args, method=method, mutable=mutable, **kwargs) return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs) def variables(self) -> VariableDict: """Returns the variables in this module.""" if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.variables() def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T: """Retrieves the value of a Variable. Args: col: the variable collection. name: the name of the variable. default: the default value to return if the variable does not exist in this scope. Returns: The value of the input variable, of the default value if the variable doesn't exist in this scope. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.get_variable(col, name, default) def put_variable(self, col: str, name: str, value: Any): """Updates the value of the given variable if it is mutable, or an error otherwise. Args: col: the variable collection. name: the name of the variable. value: the new value of the variable. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") self.scope.put_variable(col, name, value) def sow(self, col: str, name: str, value: Any) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: """Stores a value in a collection. Collections can be used to collect intermediate values without the overhead of explicitly passing a container through each Module call. If the target collection is not mutable ``sow`` behaves like a no-op and returns ``False``. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... self.sow('intermediates', 'h', h) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply(variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)} By default the values are stored in a tuple and each stored value is appended at the end. This way all intermediates can be tracked when the same module is called multiple times. Alternatively, a custom init/reduce function can be passed:: >>> class Foo2(nn.Module): ... @nn.compact ... def __call__(self, x): ... init_fn = lambda: 0 ... reduce_fn = lambda a, b: a + b ... self.sow('intermediates', 'h', x, ... init_fn=init_fn, reduce_fn=reduce_fn) ... self.sow('intermediates', 'h', x * 2, ... init_fn=init_fn, reduce_fn=reduce_fn) ... return x >>> x = jnp.ones((1, 1)) >>> model = Foo2() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply( ... variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': Array([[3.]], dtype=float32)} Args: col: The name of the variable collection. name: The name of the variable. value: The value of the variable. reduce_fn: The function used to combine the existing value with the new value. The default is to append the value to a tuple. init_fn: For the first value stored, ``reduce_fn`` will be passed the result of ``init_fn`` together with the value to be stored. The default is an empty tuple. Returns: ``True`` if the value has been stored successfully, ``False`` otherwise. """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if not self.scope.is_mutable_collection(col): return False if self.scope.has_variable(col, name): xs = self.scope.get_variable(col, name) else: self.scope.reserve(name, col) self._state.children[name] = col xs = init_fn() xs = reduce_fn(xs, value) self.scope.put_variable(col, name, xs) return True def perturb( self, name: str, value: T, collection: str = 'perturbations' ) -> T: """Add an zero-value variable ('perturbation') to the intermediate value. The gradient of ``value`` would be the same as the gradient of this perturbation variable. Therefore, if you define your loss function with both params and perturbations as standalone arguments, you can get the intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation argument. .. note:: This is an experimental API and may be tweaked later for better performance and usability. At its current stage, it creates extra dummy variables that occupies extra memory space. Use it only to debug gradients in training. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(3)(x) ... x = self.perturb('dense3', x) ... return nn.Dense(2)(x) >>> def loss(variables, inputs, targets): ... preds = model.apply(variables, inputs) ... return jnp.square(preds - targets).mean() >>> x = jnp.ones((2, 9)) >>> y = jnp.ones((2, 2)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y) >>> print(intm_grads['perturbations']['dense3']) [[-1.456924 -0.44332537 0.02422847] [-1.456924 -0.44332537 0.02422847]] If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op so you can easily disable the behavior when not needed:: >>> model.apply(variables, x) # works as expected Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> model.apply({'params': variables['params']}, x) # behaves like a no-op Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y) >>> 'perturbations' not in intm_grads True """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if self.is_mutable_collection(collection): if not self.scope.has_variable(collection, name): self.scope.reserve(name, collection) self._state.children[name] = collection self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore if collection in self.scope.root._variables: if self.scope.has_variable(collection, name): value += self.scope.get_variable(collection, name) # type: ignore else: raise ValueError(f"Perturbation collection {collection} present, but " f"missing perturbation variable {name}") return value def tabulate( self, rngs: Union[PRNGKey, RNGSequences], *args, depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> str: """Creates a summary of the Module represented as a table. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns the string summarizing the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the ``console_kwargs`` argument, for example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> # print(Foo().tabulate( >>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in ``variables`` which are sorted alphabetically. **Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable. Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. depth: controls how many submodule deep the summary can go. By default, its ``None`` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.console.Console`` when rendering the table. Default arguments are ``{'force_terminal': True, 'force_jupyter': False}``. table_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table`` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table.add_column`` when adding columns to the table. compute_flops: whether to include a ``flops`` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a ``vjp_flops`` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of ``compute_flops``. **kwargs: keyword arguments to pass to the forward computation. Returns: A string summarizing the Module. """ from flax.linen import summary tabulate_fn = summary.tabulate( self, rngs, depth=depth, show_repeated=show_repeated, mutable=mutable, console_kwargs=console_kwargs, table_kwargs=table_kwargs, column_kwargs=column_kwargs, compute_flops=compute_flops, compute_vjp_flops=compute_vjp_flops, ) return tabulate_fn(*args, **kwargs) def module_paths( self, rngs: Union[PRNGKey, RNGSequences], *args, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> dict[str, 'Module']: """Returns a dictionary mapping module paths to module instances. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns a dictionary mapping module paths to unbounded copies of module instances that were used at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> modules = Foo().module_paths(jax.random.key(0), x) >>> print({ ... p: type(m).__name__ for p, m in modules.items() ... }) {'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'} Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. **kwargs: keyword arguments to pass to the forward computation. Returns: A dict`ionary mapping module paths to module instances. """ from flax.linen import summary table = summary._get_module_table( module=self, depth=None, show_repeated=show_repeated, compute_flops=False, compute_vjp_flops=False, )(rngs, *args, **kwargs, mutable=mutable) return {'/'.join(row.path): row.module_copy for row in table} The provided code snippet includes necessary dependencies for implementing the `clean_clone` function. Write a Python function `def clean_clone(x)` to solve the following problem: Remove scopes and tracers from children. Here is the function: def clean_clone(x): """Remove scopes and tracers from children.""" if isinstance(x, Module): object.__setattr__( x, 'children', {k: clean_clone(v) for k, v in x.children.items()} ) object.__setattr__(x, 'scope', None) return x
Remove scopes and tracers from children.
22,607
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax def _fingerprint_recursive( obj: Any, path: tuple[str, ...], seen_modules: dict[FlaxId, int] ) -> Any: class Module(ModuleBase): def __init__(self, *args, **kwargs): def __call__(self, *args, **kwargs) -> Any: def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None: def _customized_dataclass_transform(cls, kw_only: bool): def _verify_single_or_no_compact(cls): def _find_compact_name_scope_methods(cls): def _wrap_module_attributes(cls): def _call_wrapped_method(self, fun, args, kwargs): def __setattr__(self, name: str, val: Any): def __getattr__(self, name: str) -> Any: def __dir__(self) -> List[str]: def __post_init__(self) -> None: def __repr__(self) -> str: def setup(self) -> None: def _register_submodules(self, name, val): def adopt_attr_modules(cache, queue, suffix, subvalue): def _try_setup(self, shallow: bool = False) -> None: def _validate_setup(self) -> None: def run_setup_only(x): def _name_taken( self, name: str, reuse_scopes: bool = False, collection: Optional[str] = None, ) -> bool: def _initialization_allowed(self): def path(self): def clone( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = None, _deep_clone: Union[bool, weakref.WeakValueDictionary] = False, _reset_names: bool = False, **updates, ) -> M: def clone_fn(m: Module) -> Module: def copy( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent, name: Optional[str] = None, **updates, ) -> M: def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, ) -> Variable[T]: def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[True], **init_kwargs, ) -> Variable[T]: def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[False], **init_kwargs, ) -> Variable[meta.AxisMetadata[T]]: def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: def param( self, name: str, init_fn: Callable[..., T], *init_args, ) -> T: def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[True], **init_kwargs, ) -> T: def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[False], **init_kwargs, ) -> meta.AxisMetadata[T]: def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool = True, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: def has_variable(self, col: str, name: str) -> bool: def is_mutable_collection(self, col: str) -> bool: def has_rng(self, name: str) -> bool: def make_rng(self, name: str = 'params') -> PRNGKey: def is_initializing(self) -> bool: def _module_checks(self): def bind( self: M, variables: VariableDict, *args, rngs: Optional[RNGSequences] = None, mutable: CollectionFilter = False, ) -> M: def unbind(self: M) -> Tuple[M, VariableDict]: def apply( self, variables: VariableDict, *args, rngs: Optional[Union[PRNGKey, RNGSequences]] = None, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = False, capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]: def init_with_output( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]: def init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[FrozenVariableDict, Dict[str, Any]]: def lazy_init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Optional[Callable[..., Any]] = None, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> FrozenVariableDict: def lazy_wrapper(rngs, *args, **kwargs): def variables(self) -> VariableDict: def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T: def put_variable(self, col: str, name: str, value: Any): def sow(self, col: str, name: str, value: Any) -> bool: def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: def perturb( self, name: str, value: T, collection: str = 'perturbations' ) -> T: def tabulate( self, rngs: Union[PRNGKey, RNGSequences], *args, depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> str: def module_paths( self, rngs: Union[PRNGKey, RNGSequences], *args, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> dict[str, 'Module']: def _module_fingerprint(module: Module) -> tuple[type[Any], Any]: return _fingerprint_recursive(module, (), {})
null
22,608
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax Target = TypeVar('Target', bound=TransformTarget) def lift_transform( transform, target, *trafo_args, methods=None, **trafo_kwargs ): """Applies to class or as a decorator on class fns.""" # TODO(marcvanzee): Improve docstrings (#1977). if _is_module_class(target): return module_class_lift_transform( transform, target, *trafo_args, methods=methods, **trafo_kwargs ) # we presume this is being used as a function decorator in class definition elif callable(target) and not isinstance(target, Module): return decorator_lift_transform( transform, target, *trafo_args, **trafo_kwargs ) else: raise errors.TransformTargetError(target) class FrozenDict(Mapping[K, V]): """An immutable variant of the Python dict.""" __slots__ = ('_dict', '_hash') def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name # make sure the dict is as xs = dict(*args, **kwargs) if __unsafe_skip_copy__: self._dict = xs else: self._dict = _prepare_freeze(xs) self._hash = None def __getitem__(self, key): v = self._dict[key] if isinstance(v, dict): return FrozenDict(v) return v def __setitem__(self, key, value): raise ValueError('FrozenDict is immutable.') def __contains__(self, key): return key in self._dict def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __repr__(self): return self.pretty_repr() def __reduce__(self): return FrozenDict, (self.unfreeze(),) def pretty_repr(self, num_spaces=4): """Returns an indented representation of the nested dictionary.""" def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})' def __hash__(self): if self._hash is None: h = 0 for key, value in self.items(): h ^= hash((key, value)) self._hash = h return self._hash def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': """Create a new FrozenDict with additional or replaced entries.""" return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type] def keys(self): return FrozenKeysView(self) def values(self): return FrozenValuesView(self) def items(self): for key in self._dict: yield (key, self[key]) def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: """Create a new FrozenDict where one entry is removed. Example:: >>> from flax.core import FrozenDict >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables, params = variables.pop('params') Args: key: the key to remove from the dict Returns: A pair with the new FrozenDict and the removed value. """ value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value def unfreeze(self) -> Dict[K, V]: """Unfreeze this FrozenDict. Returns: An unfrozen version of this FrozenDict instance. """ return unfreeze(self) def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]: """Flattens this FrozenDict. Returns: A flattened version of this FrozenDict instance. """ sorted_keys = sorted(self._dict) return tuple( [(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys] ), tuple(sorted_keys) def tree_unflatten(cls, keys, values): # data is already deep copied due to tree map mechanism # we can skip the deep copy in the constructor return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True) CollectionFilter = Filter PRNGSequenceFilter = Filter InOutAxis = Union[Axis, In[Axis], Out[Axis]] The provided code snippet includes necessary dependencies for implementing the `vmap` function. Write a Python function `def vmap( target: Target, variable_axes: Mapping[CollectionFilter, InOutAxis] = FrozenDict(), split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict(), in_axes=0, out_axes=0, axis_size: Optional[int] = None, axis_name: Optional[str] = None, spmd_axis_name: Optional[str] = None, metadata_params: Mapping[Any, Any] = {}, methods=None, ) -> Target` to solve the following problem: A lifted version of ``jax.vmap``. See ``jax.vmap`` for the unlifted batch transform in Jax. ``vmap`` can be used to add a batch axis to a ``Module``. For example we could create a version of ``Dense`` with a batch axis that does not share parameters:: >>> import flax.linen as nn >>> BatchDense = nn.vmap( ... nn.Dense, ... in_axes=0, out_axes=0, ... variable_axes={'params': 0}, ... split_rngs={'params': True}) By using ``variable_axes={'params': 0}``, we indicate that the parameters themselves are mapped over and therefore not shared along the mapped axis. Consequently, we also split the 'params' RNG, otherwise the parameters would be initialized identically along the mapped axis. Similarly, ``vmap`` could be used to add a batch axis with parameter sharing:: >>> import flax.linen as nn >>> BatchDense = nn.vmap( ... nn.Dense, ... in_axes=0, out_axes=0, ... variable_axes={'params': None}, ... split_rngs={'params': False}) Here we use ``variable_axes={'params': None}`` to indicate the parameter variables are shared along the mapped axis. Consequently, the 'params' RNG must also be shared. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections that are lifted into the batching transformation. Use ``None`` to indicate a broadcasted collection or an integer to map over an axis. For example, passing in ``variable_axes={'params': None}`` will indicate that the parameter variables should be shared along the mapped axis. split_rngs: Split PRNG sequences will be different for each index of the batch dimension. Unsplit PRNGs will be broadcasted. in_axes: Specifies the mapping of the input arguments (see ``jax.vmap``). out_axes: Specifies the mapping of the return value (see ``jax.vmap``). axis_size: Specifies the size of the batch axis. This only needs to be specified if it cannot be derived from the input arguments. axis_name: Specifies a name for the batch axis. Can be used together with parallel reduction primitives (e.g. ``jax.lax.pmean``, ``jax.lax.ppermute``, etc.). Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. methods: If ``target`` is a ``Module``, the methods of ``Module`` to vmap over. spmd_axis_name: Axis name added to any pjit sharding constraints appearing in ``fn``. See also https://github.com/google/flax/blob/main/flax/linen/partitioning.py. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A batched/vectorized version of ``target``, with the same arguments but with extra axes at positions indicated by ``in_axes``, and the same return value, but with extra axes at positions indicated by ``out_axes``. Here is the function: def vmap( target: Target, variable_axes: Mapping[CollectionFilter, InOutAxis] = FrozenDict(), split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict(), in_axes=0, out_axes=0, axis_size: Optional[int] = None, axis_name: Optional[str] = None, spmd_axis_name: Optional[str] = None, metadata_params: Mapping[Any, Any] = {}, methods=None, ) -> Target: """A lifted version of ``jax.vmap``. See ``jax.vmap`` for the unlifted batch transform in Jax. ``vmap`` can be used to add a batch axis to a ``Module``. For example we could create a version of ``Dense`` with a batch axis that does not share parameters:: >>> import flax.linen as nn >>> BatchDense = nn.vmap( ... nn.Dense, ... in_axes=0, out_axes=0, ... variable_axes={'params': 0}, ... split_rngs={'params': True}) By using ``variable_axes={'params': 0}``, we indicate that the parameters themselves are mapped over and therefore not shared along the mapped axis. Consequently, we also split the 'params' RNG, otherwise the parameters would be initialized identically along the mapped axis. Similarly, ``vmap`` could be used to add a batch axis with parameter sharing:: >>> import flax.linen as nn >>> BatchDense = nn.vmap( ... nn.Dense, ... in_axes=0, out_axes=0, ... variable_axes={'params': None}, ... split_rngs={'params': False}) Here we use ``variable_axes={'params': None}`` to indicate the parameter variables are shared along the mapped axis. Consequently, the 'params' RNG must also be shared. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections that are lifted into the batching transformation. Use ``None`` to indicate a broadcasted collection or an integer to map over an axis. For example, passing in ``variable_axes={'params': None}`` will indicate that the parameter variables should be shared along the mapped axis. split_rngs: Split PRNG sequences will be different for each index of the batch dimension. Unsplit PRNGs will be broadcasted. in_axes: Specifies the mapping of the input arguments (see ``jax.vmap``). out_axes: Specifies the mapping of the return value (see ``jax.vmap``). axis_size: Specifies the size of the batch axis. This only needs to be specified if it cannot be derived from the input arguments. axis_name: Specifies a name for the batch axis. Can be used together with parallel reduction primitives (e.g. ``jax.lax.pmean``, ``jax.lax.ppermute``, etc.). Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. methods: If ``target`` is a ``Module``, the methods of ``Module`` to vmap over. spmd_axis_name: Axis name added to any pjit sharding constraints appearing in ``fn``. See also https://github.com/google/flax/blob/main/flax/linen/partitioning.py. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A batched/vectorized version of ``target``, with the same arguments but with extra axes at positions indicated by ``in_axes``, and the same return value, but with extra axes at positions indicated by ``out_axes``. """ return lift_transform( lift.vmap, target, variable_axes, split_rngs, methods=methods, in_axes=in_axes, out_axes=out_axes, axis_size=axis_size, axis_name=axis_name, metadata_params=metadata_params, spmd_axis_name=spmd_axis_name, )
A lifted version of ``jax.vmap``. See ``jax.vmap`` for the unlifted batch transform in Jax. ``vmap`` can be used to add a batch axis to a ``Module``. For example we could create a version of ``Dense`` with a batch axis that does not share parameters:: >>> import flax.linen as nn >>> BatchDense = nn.vmap( ... nn.Dense, ... in_axes=0, out_axes=0, ... variable_axes={'params': 0}, ... split_rngs={'params': True}) By using ``variable_axes={'params': 0}``, we indicate that the parameters themselves are mapped over and therefore not shared along the mapped axis. Consequently, we also split the 'params' RNG, otherwise the parameters would be initialized identically along the mapped axis. Similarly, ``vmap`` could be used to add a batch axis with parameter sharing:: >>> import flax.linen as nn >>> BatchDense = nn.vmap( ... nn.Dense, ... in_axes=0, out_axes=0, ... variable_axes={'params': None}, ... split_rngs={'params': False}) Here we use ``variable_axes={'params': None}`` to indicate the parameter variables are shared along the mapped axis. Consequently, the 'params' RNG must also be shared. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections that are lifted into the batching transformation. Use ``None`` to indicate a broadcasted collection or an integer to map over an axis. For example, passing in ``variable_axes={'params': None}`` will indicate that the parameter variables should be shared along the mapped axis. split_rngs: Split PRNG sequences will be different for each index of the batch dimension. Unsplit PRNGs will be broadcasted. in_axes: Specifies the mapping of the input arguments (see ``jax.vmap``). out_axes: Specifies the mapping of the return value (see ``jax.vmap``). axis_size: Specifies the size of the batch axis. This only needs to be specified if it cannot be derived from the input arguments. axis_name: Specifies a name for the batch axis. Can be used together with parallel reduction primitives (e.g. ``jax.lax.pmean``, ``jax.lax.ppermute``, etc.). Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. methods: If ``target`` is a ``Module``, the methods of ``Module`` to vmap over. spmd_axis_name: Axis name added to any pjit sharding constraints appearing in ``fn``. See also https://github.com/google/flax/blob/main/flax/linen/partitioning.py. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A batched/vectorized version of ``target``, with the same arguments but with extra axes at positions indicated by ``in_axes``, and the same return value, but with extra axes at positions indicated by ``out_axes``.
22,609
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax Target = TypeVar('Target', bound=TransformTarget) def lift_transform( transform, target, *trafo_args, methods=None, **trafo_kwargs ): """Applies to class or as a decorator on class fns.""" # TODO(marcvanzee): Improve docstrings (#1977). if _is_module_class(target): return module_class_lift_transform( transform, target, *trafo_args, methods=methods, **trafo_kwargs ) # we presume this is being used as a function decorator in class definition elif callable(target) and not isinstance(target, Module): return decorator_lift_transform( transform, target, *trafo_args, **trafo_kwargs ) else: raise errors.TransformTargetError(target) CollectionFilter = Filter PRNGSequenceFilter = Filter The provided code snippet includes necessary dependencies for implementing the `checkpoint` function. Write a Python function `def checkpoint( target: Target, variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, concrete: bool = False, prevent_cse: bool = True, static_argnums: Union[int, Tuple[int, ...]] = (), policy: Optional[Callable[..., bool]] = None, methods=None, ) -> Target` to solve the following problem: Lifted version of ``jax.checkpoint``. Checkpointing is a technique for reducing memory usage by recomputing activations during backpropagation. When training large models, it can be helpful to checkpoint parts of the model to trade off memory usage for additional computation. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn ... >>> class CheckpointedMLP(nn.Module): ... @nn.checkpoint ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(128)(x) ... x = nn.relu(x) ... x = nn.Dense(1)(x) ... return x ... >>> model = CheckpointedMLP() >>> variables = model.init(jax.random.key(0), jnp.ones((1, 16))) This function is aliased to ``remat`` just like ``jax.remat``. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. intermediate computations will be re-computed when computing gradients for the target. variables: The variable collections that are lifted. By default all collections are lifted. rngs: The PRNG sequences that are lifted. By default all PRNG sequences are lifted. concrete: Optional, boolean indicating whether ``fun`` may involve value-dependent Python control flow (default ``False``). Support for such control flow is optional, and disabled by default, because in some edge-case compositions with :func:`jax.jit` it can lead to some extra computation. prevent_cse: Optional, boolean indicating whether to prevent common subexpression elimination (CSE) optimizations in the HLO generated from differentiation. This CSE prevention has costs because it can foil other optimizations, and because it can incur high overheads on some backends, especially GPU. The default is True because otherwise, under a ``jit`` or ``pmap``, CSE can defeat the purpose of this decorator. But in some settings, like when used inside a ``scan``, this CSE prevention mechanism is unnecessary, in which case ``prevent_cse`` should be set to False. static_argnums: Optional, int or sequence of ints, indicates which argument values on which to specialize for tracing and caching purposes. Specifying arguments as static can avoid ConcretizationTypeErrors when tracing, but at the cost of more retracing overheads. policy: Experimental checkpoint policy, see ``jax.checkpoint``. methods: An optional list of method names that will be lifted, if ``methods`` is None (default) only the ``__call__`` method will be lifted. If``target`` is a function, ``methods`` is ignored. Returns: A wrapped version of ``target``. When computing gradients intermediate computations will be re-computed on the backward pass. Here is the function: def checkpoint( target: Target, variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, concrete: bool = False, prevent_cse: bool = True, static_argnums: Union[int, Tuple[int, ...]] = (), policy: Optional[Callable[..., bool]] = None, methods=None, ) -> Target: """Lifted version of ``jax.checkpoint``. Checkpointing is a technique for reducing memory usage by recomputing activations during backpropagation. When training large models, it can be helpful to checkpoint parts of the model to trade off memory usage for additional computation. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn ... >>> class CheckpointedMLP(nn.Module): ... @nn.checkpoint ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(128)(x) ... x = nn.relu(x) ... x = nn.Dense(1)(x) ... return x ... >>> model = CheckpointedMLP() >>> variables = model.init(jax.random.key(0), jnp.ones((1, 16))) This function is aliased to ``remat`` just like ``jax.remat``. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. intermediate computations will be re-computed when computing gradients for the target. variables: The variable collections that are lifted. By default all collections are lifted. rngs: The PRNG sequences that are lifted. By default all PRNG sequences are lifted. concrete: Optional, boolean indicating whether ``fun`` may involve value-dependent Python control flow (default ``False``). Support for such control flow is optional, and disabled by default, because in some edge-case compositions with :func:`jax.jit` it can lead to some extra computation. prevent_cse: Optional, boolean indicating whether to prevent common subexpression elimination (CSE) optimizations in the HLO generated from differentiation. This CSE prevention has costs because it can foil other optimizations, and because it can incur high overheads on some backends, especially GPU. The default is True because otherwise, under a ``jit`` or ``pmap``, CSE can defeat the purpose of this decorator. But in some settings, like when used inside a ``scan``, this CSE prevention mechanism is unnecessary, in which case ``prevent_cse`` should be set to False. static_argnums: Optional, int or sequence of ints, indicates which argument values on which to specialize for tracing and caching purposes. Specifying arguments as static can avoid ConcretizationTypeErrors when tracing, but at the cost of more retracing overheads. policy: Experimental checkpoint policy, see ``jax.checkpoint``. methods: An optional list of method names that will be lifted, if ``methods`` is None (default) only the ``__call__`` method will be lifted. If``target`` is a function, ``methods`` is ignored. Returns: A wrapped version of ``target``. When computing gradients intermediate computations will be re-computed on the backward pass. """ # subtract 1 from each static_argnums because 'self' is not passed to the # lifted function static_argnums = jax.tree_util.tree_map(lambda x: x - 1, static_argnums) return lift_transform( lift.checkpoint, target, variables=variables, rngs=rngs, concrete=concrete, static_argnums=static_argnums, prevent_cse=prevent_cse, policy=policy, methods=methods, )
Lifted version of ``jax.checkpoint``. Checkpointing is a technique for reducing memory usage by recomputing activations during backpropagation. When training large models, it can be helpful to checkpoint parts of the model to trade off memory usage for additional computation. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn ... >>> class CheckpointedMLP(nn.Module): ... @nn.checkpoint ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(128)(x) ... x = nn.relu(x) ... x = nn.Dense(1)(x) ... return x ... >>> model = CheckpointedMLP() >>> variables = model.init(jax.random.key(0), jnp.ones((1, 16))) This function is aliased to ``remat`` just like ``jax.remat``. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. intermediate computations will be re-computed when computing gradients for the target. variables: The variable collections that are lifted. By default all collections are lifted. rngs: The PRNG sequences that are lifted. By default all PRNG sequences are lifted. concrete: Optional, boolean indicating whether ``fun`` may involve value-dependent Python control flow (default ``False``). Support for such control flow is optional, and disabled by default, because in some edge-case compositions with :func:`jax.jit` it can lead to some extra computation. prevent_cse: Optional, boolean indicating whether to prevent common subexpression elimination (CSE) optimizations in the HLO generated from differentiation. This CSE prevention has costs because it can foil other optimizations, and because it can incur high overheads on some backends, especially GPU. The default is True because otherwise, under a ``jit`` or ``pmap``, CSE can defeat the purpose of this decorator. But in some settings, like when used inside a ``scan``, this CSE prevention mechanism is unnecessary, in which case ``prevent_cse`` should be set to False. static_argnums: Optional, int or sequence of ints, indicates which argument values on which to specialize for tracing and caching purposes. Specifying arguments as static can avoid ConcretizationTypeErrors when tracing, but at the cost of more retracing overheads. policy: Experimental checkpoint policy, see ``jax.checkpoint``. methods: An optional list of method names that will be lifted, if ``methods`` is None (default) only the ``__call__`` method will be lifted. If``target`` is a function, ``methods`` is ignored. Returns: A wrapped version of ``target``. When computing gradients intermediate computations will be re-computed on the backward pass.
22,610
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax Target = TypeVar('Target', bound=TransformTarget) def lift_transform( transform, target, *trafo_args, methods=None, **trafo_kwargs ): """Applies to class or as a decorator on class fns.""" # TODO(marcvanzee): Improve docstrings (#1977). if _is_module_class(target): return module_class_lift_transform( transform, target, *trafo_args, methods=methods, **trafo_kwargs ) # we presume this is being used as a function decorator in class definition elif callable(target) and not isinstance(target, Module): return decorator_lift_transform( transform, target, *trafo_args, **trafo_kwargs ) else: raise errors.TransformTargetError(target) class FrozenDict(Mapping[K, V]): """An immutable variant of the Python dict.""" __slots__ = ('_dict', '_hash') def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name # make sure the dict is as xs = dict(*args, **kwargs) if __unsafe_skip_copy__: self._dict = xs else: self._dict = _prepare_freeze(xs) self._hash = None def __getitem__(self, key): v = self._dict[key] if isinstance(v, dict): return FrozenDict(v) return v def __setitem__(self, key, value): raise ValueError('FrozenDict is immutable.') def __contains__(self, key): return key in self._dict def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __repr__(self): return self.pretty_repr() def __reduce__(self): return FrozenDict, (self.unfreeze(),) def pretty_repr(self, num_spaces=4): """Returns an indented representation of the nested dictionary.""" def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})' def __hash__(self): if self._hash is None: h = 0 for key, value in self.items(): h ^= hash((key, value)) self._hash = h return self._hash def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': """Create a new FrozenDict with additional or replaced entries.""" return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type] def keys(self): return FrozenKeysView(self) def values(self): return FrozenValuesView(self) def items(self): for key in self._dict: yield (key, self[key]) def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: """Create a new FrozenDict where one entry is removed. Example:: >>> from flax.core import FrozenDict >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables, params = variables.pop('params') Args: key: the key to remove from the dict Returns: A pair with the new FrozenDict and the removed value. """ value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value def unfreeze(self) -> Dict[K, V]: """Unfreeze this FrozenDict. Returns: An unfrozen version of this FrozenDict instance. """ return unfreeze(self) def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]: """Flattens this FrozenDict. Returns: A flattened version of this FrozenDict instance. """ sorted_keys = sorted(self._dict) return tuple( [(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys] ), tuple(sorted_keys) def tree_unflatten(cls, keys, values): # data is already deep copied due to tree map mechanism # we can skip the deep copy in the constructor return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True) CollectionFilter = Filter PRNGSequenceFilter = Filter InOutScanAxis = Union[ScanAxis, In[ScanAxis], Out[ScanAxis]] The provided code snippet includes necessary dependencies for implementing the `remat_scan` function. Write a Python function `def remat_scan( target: Target, lengths: Optional[Sequence[int]] = (), policy: Optional[Callable[..., bool]] = None, variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, variable_axes: Mapping[CollectionFilter, InOutScanAxis] = FrozenDict( {True: 0} ), split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict({True: True}), ) -> Target` to solve the following problem: Combines remat and scan for memory efficiency and constant time compilation. ``remat_scan`` allows for constant compile times and sublinear memory usage with respect to model depth. At a small constant penalty. This is typically beneficial for very deep models. Example:: >>> import flax.linen as nn >>> class BigModel(nn.Module): ... @nn.compact ... def __call__(self, x): ... DenseStack = nn.remat_scan(nn.Dense, lengths=(10, 10)) ... # 100x dense with O(sqrt(N)) memory for gradient computation ... return DenseStack(8, name="dense_stack")(x) Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. lengths: number of loop iterations at the given level. The total number of iterations ``n = prod(lengths)``. each loop is rematerialized. This way the memory consumption is proportional to ``n^(1 / d)`` where ``d = len(lengths)``. Minimal memory consumptions requires tuning the lengths such that the same amount of memory is consumed at each level of the nested loop. policy: Experimental checkpoint policy, see ``jax.checkpoint``. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. variable_axes: the variable collections that are scanned over. Defaults to ``{True: 0}``. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Defaults to ``{True: True}``. Returns: A wrapped version of ``target`` that repeats itself prod(lengths) times. Here is the function: def remat_scan( target: Target, lengths: Optional[Sequence[int]] = (), policy: Optional[Callable[..., bool]] = None, variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, variable_axes: Mapping[CollectionFilter, InOutScanAxis] = FrozenDict( {True: 0} ), split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict({True: True}), ) -> Target: """Combines remat and scan for memory efficiency and constant time compilation. ``remat_scan`` allows for constant compile times and sublinear memory usage with respect to model depth. At a small constant penalty. This is typically beneficial for very deep models. Example:: >>> import flax.linen as nn >>> class BigModel(nn.Module): ... @nn.compact ... def __call__(self, x): ... DenseStack = nn.remat_scan(nn.Dense, lengths=(10, 10)) ... # 100x dense with O(sqrt(N)) memory for gradient computation ... return DenseStack(8, name="dense_stack")(x) Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. lengths: number of loop iterations at the given level. The total number of iterations ``n = prod(lengths)``. each loop is rematerialized. This way the memory consumption is proportional to ``n^(1 / d)`` where ``d = len(lengths)``. Minimal memory consumptions requires tuning the lengths such that the same amount of memory is consumed at each level of the nested loop. policy: Experimental checkpoint policy, see ``jax.checkpoint``. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. variable_axes: the variable collections that are scanned over. Defaults to ``{True: 0}``. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Defaults to ``{True: True}``. Returns: A wrapped version of ``target`` that repeats itself prod(lengths) times. """ return lift_transform( lift.remat_scan, target, lengths=lengths, variable_broadcast=variable_broadcast, variable_carry=variable_carry, variable_axes=variable_axes, split_rngs=split_rngs, policy=policy, )
Combines remat and scan for memory efficiency and constant time compilation. ``remat_scan`` allows for constant compile times and sublinear memory usage with respect to model depth. At a small constant penalty. This is typically beneficial for very deep models. Example:: >>> import flax.linen as nn >>> class BigModel(nn.Module): ... @nn.compact ... def __call__(self, x): ... DenseStack = nn.remat_scan(nn.Dense, lengths=(10, 10)) ... # 100x dense with O(sqrt(N)) memory for gradient computation ... return DenseStack(8, name="dense_stack")(x) Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. lengths: number of loop iterations at the given level. The total number of iterations ``n = prod(lengths)``. each loop is rematerialized. This way the memory consumption is proportional to ``n^(1 / d)`` where ``d = len(lengths)``. Minimal memory consumptions requires tuning the lengths such that the same amount of memory is consumed at each level of the nested loop. policy: Experimental checkpoint policy, see ``jax.checkpoint``. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. variable_axes: the variable collections that are scanned over. Defaults to ``{True: 0}``. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Defaults to ``{True: True}``. Returns: A wrapped version of ``target`` that repeats itself prod(lengths) times.
22,611
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax Target = TypeVar('Target', bound=TransformTarget) def lift_transform( transform, target, *trafo_args, methods=None, **trafo_kwargs ): """Applies to class or as a decorator on class fns.""" # TODO(marcvanzee): Improve docstrings (#1977). if _is_module_class(target): return module_class_lift_transform( transform, target, *trafo_args, methods=methods, **trafo_kwargs ) # we presume this is being used as a function decorator in class definition elif callable(target) and not isinstance(target, Module): return decorator_lift_transform( transform, target, *trafo_args, **trafo_kwargs ) else: raise errors.TransformTargetError(target) class FrozenDict(Mapping[K, V]): """An immutable variant of the Python dict.""" __slots__ = ('_dict', '_hash') def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name # make sure the dict is as xs = dict(*args, **kwargs) if __unsafe_skip_copy__: self._dict = xs else: self._dict = _prepare_freeze(xs) self._hash = None def __getitem__(self, key): v = self._dict[key] if isinstance(v, dict): return FrozenDict(v) return v def __setitem__(self, key, value): raise ValueError('FrozenDict is immutable.') def __contains__(self, key): return key in self._dict def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __repr__(self): return self.pretty_repr() def __reduce__(self): return FrozenDict, (self.unfreeze(),) def pretty_repr(self, num_spaces=4): """Returns an indented representation of the nested dictionary.""" def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})' def __hash__(self): if self._hash is None: h = 0 for key, value in self.items(): h ^= hash((key, value)) self._hash = h return self._hash def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': """Create a new FrozenDict with additional or replaced entries.""" return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type] def keys(self): return FrozenKeysView(self) def values(self): return FrozenValuesView(self) def items(self): for key in self._dict: yield (key, self[key]) def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: """Create a new FrozenDict where one entry is removed. Example:: >>> from flax.core import FrozenDict >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables, params = variables.pop('params') Args: key: the key to remove from the dict Returns: A pair with the new FrozenDict and the removed value. """ value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value def unfreeze(self) -> Dict[K, V]: """Unfreeze this FrozenDict. Returns: An unfrozen version of this FrozenDict instance. """ return unfreeze(self) def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]: """Flattens this FrozenDict. Returns: A flattened version of this FrozenDict instance. """ sorted_keys = sorted(self._dict) return tuple( [(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys] ), tuple(sorted_keys) def tree_unflatten(cls, keys, values): # data is already deep copied due to tree map mechanism # we can skip the deep copy in the constructor return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True) CollectionFilter = Filter PRNGSequenceFilter = Filter InOutScanAxis = Union[ScanAxis, In[ScanAxis], Out[ScanAxis]] The provided code snippet includes necessary dependencies for implementing the `scan` function. Write a Python function `def scan( target: Target, variable_axes: Mapping[CollectionFilter, InOutScanAxis] = FrozenDict(), variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict(), in_axes=0, out_axes=0, length: Optional[int] = None, reverse: bool = False, unroll: int = 1, data_transform: Optional[Callable[..., Any]] = None, metadata_params: Mapping[Any, Any] = {}, methods=None, ) -> Target` to solve the following problem: A lifted version of ``jax.lax.scan``. See ``jax.lax.scan`` for the unlifted scan in Jax. To improve consistency with ``vmap``, this version of scan uses ``in_axes`` and ``out_axes`` to determine which arguments are scanned over and along which axis. ``scan`` distinguishes between 3 different types of values inside the loop: #. **scan**: a value that is iterated over in a loop. All scan values must have the same size in the axis they are scanned over. Scanned outputs will be stacked along the scan axis. #. **carry**: A carried value is updated at each loop iteration. It must have the same shape and dtype throughout the loop. #. **broadcast**: a value that is closed over by the loop. When a variable is broadcasted they are typically initialized inside the loop body but independent of the loop variables. The ``target`` should have the signature ``(module, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys`` are the scan values that go in and out of the loop. Example:: >>> import flax.linen as nn >>> import jax >>> import jax.numpy as jnp ... >>> class LSTM(nn.Module): ... features: int ... ... @nn.compact ... def __call__(self, x): ... ScanLSTM = nn.scan( ... nn.LSTMCell, variable_broadcast="params", ... split_rngs={"params": False}, in_axes=1, out_axes=1) ... ... lstm = ScanLSTM(self.features) ... input_shape = x[:, 0].shape ... carry = lstm.initialize_carry(jax.random.key(0), input_shape) ... carry, x = lstm(carry, x) ... return x ... >>> x = jnp.ones((4, 12, 7)) >>> module = LSTM(features=32) >>> y, variables = module.init_with_output(jax.random.key(0), x) Note that when providing a function to ``nn.scan``, the scanning happens over all arguments starting from the third argument, as specified by ``in_axes``. The previous example could also be written using the functional form as:: >>> class LSTM(nn.Module): ... features: int ... ... @nn.compact ... def __call__(self, x): ... ... cell = nn.LSTMCell(self.features) ... def body_fn(cell, carry, x): ... carry, y = cell(carry, x) ... return carry, y ... scan = nn.scan( ... body_fn, variable_broadcast="params", ... split_rngs={"params": False}, in_axes=1, out_axes=1) ... ... input_shape = x[:, 0].shape ... carry = cell.initialize_carry( ... jax.random.key(0), input_shape) ... carry, x = scan(cell, carry, x) ... return x ... >>> module = LSTM(features=32) >>> variables = module.init(jax.random.key(0), jnp.ones((4, 12, 7))) You can also use ``scan`` to reduce the compilation time of your JAX program by merging multiple layers into a single scan loop, you can do this when you have a sequence of identical layers that you want to apply iteratively to an input. For example:: >>> class ResidualMLPBlock(nn.Module): ... @nn.compact ... def __call__(self, x, _): ... h = nn.Dense(features=2)(x) ... h = nn.relu(h) ... return x + h, None ... >>> class ResidualMLP(nn.Module): ... n_layers: int = 4 ... ... @nn.compact ... def __call__(self, x): ... ScanMLP = nn.scan( ... ResidualMLPBlock, variable_axes={'params': 0}, ... variable_broadcast=False, split_rngs={'params': True}, ... length=self.n_layers) ... x, _ = ScanMLP()(x, None) ... return x ... >>> model = ResidualMLP(n_layers=4) >>> variables = model.init(jax.random.key(42), jnp.ones((1, 2))) To reduce both compilation and memory usage, you can use :func:`remat_scan` which will in addition checkpoint each layer in the scan loop. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections that are scanned over. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. in_axes: Specifies the axis to scan over for the arguments. Should be a prefix tree of the arguments. Use ``flax.core.broadcast`` to feed an entire input to each iteration of the scan body. out_axes: Specifies the axis to scan over for the return value. Should be a prefix tree of the return value. length: Specifies the number of loop iterations. This only needs to be specified if it cannot be derived from the scan arguments. reverse: If true, scan from end to start in reverse order. unroll: how many scan iterations to unroll within a single iteration of a loop (default: 1). data_transform: optional function to transform raw functional-core variable and rng groups inside lifted scan body_fn, intended for inline SPMD annotations. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. methods: If ``target`` is a ``Module``, the methods of ``Module`` to scan over. Returns: The scan function with the signature ``(module, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys`` are the scan values that go in and out of the loop. Here is the function: def scan( target: Target, variable_axes: Mapping[CollectionFilter, InOutScanAxis] = FrozenDict(), variable_broadcast: CollectionFilter = False, variable_carry: CollectionFilter = False, split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict(), in_axes=0, out_axes=0, length: Optional[int] = None, reverse: bool = False, unroll: int = 1, data_transform: Optional[Callable[..., Any]] = None, metadata_params: Mapping[Any, Any] = {}, methods=None, ) -> Target: """A lifted version of ``jax.lax.scan``. See ``jax.lax.scan`` for the unlifted scan in Jax. To improve consistency with ``vmap``, this version of scan uses ``in_axes`` and ``out_axes`` to determine which arguments are scanned over and along which axis. ``scan`` distinguishes between 3 different types of values inside the loop: #. **scan**: a value that is iterated over in a loop. All scan values must have the same size in the axis they are scanned over. Scanned outputs will be stacked along the scan axis. #. **carry**: A carried value is updated at each loop iteration. It must have the same shape and dtype throughout the loop. #. **broadcast**: a value that is closed over by the loop. When a variable is broadcasted they are typically initialized inside the loop body but independent of the loop variables. The ``target`` should have the signature ``(module, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys`` are the scan values that go in and out of the loop. Example:: >>> import flax.linen as nn >>> import jax >>> import jax.numpy as jnp ... >>> class LSTM(nn.Module): ... features: int ... ... @nn.compact ... def __call__(self, x): ... ScanLSTM = nn.scan( ... nn.LSTMCell, variable_broadcast="params", ... split_rngs={"params": False}, in_axes=1, out_axes=1) ... ... lstm = ScanLSTM(self.features) ... input_shape = x[:, 0].shape ... carry = lstm.initialize_carry(jax.random.key(0), input_shape) ... carry, x = lstm(carry, x) ... return x ... >>> x = jnp.ones((4, 12, 7)) >>> module = LSTM(features=32) >>> y, variables = module.init_with_output(jax.random.key(0), x) Note that when providing a function to ``nn.scan``, the scanning happens over all arguments starting from the third argument, as specified by ``in_axes``. The previous example could also be written using the functional form as:: >>> class LSTM(nn.Module): ... features: int ... ... @nn.compact ... def __call__(self, x): ... ... cell = nn.LSTMCell(self.features) ... def body_fn(cell, carry, x): ... carry, y = cell(carry, x) ... return carry, y ... scan = nn.scan( ... body_fn, variable_broadcast="params", ... split_rngs={"params": False}, in_axes=1, out_axes=1) ... ... input_shape = x[:, 0].shape ... carry = cell.initialize_carry( ... jax.random.key(0), input_shape) ... carry, x = scan(cell, carry, x) ... return x ... >>> module = LSTM(features=32) >>> variables = module.init(jax.random.key(0), jnp.ones((4, 12, 7))) You can also use ``scan`` to reduce the compilation time of your JAX program by merging multiple layers into a single scan loop, you can do this when you have a sequence of identical layers that you want to apply iteratively to an input. For example:: >>> class ResidualMLPBlock(nn.Module): ... @nn.compact ... def __call__(self, x, _): ... h = nn.Dense(features=2)(x) ... h = nn.relu(h) ... return x + h, None ... >>> class ResidualMLP(nn.Module): ... n_layers: int = 4 ... ... @nn.compact ... def __call__(self, x): ... ScanMLP = nn.scan( ... ResidualMLPBlock, variable_axes={'params': 0}, ... variable_broadcast=False, split_rngs={'params': True}, ... length=self.n_layers) ... x, _ = ScanMLP()(x, None) ... return x ... >>> model = ResidualMLP(n_layers=4) >>> variables = model.init(jax.random.key(42), jnp.ones((1, 2))) To reduce both compilation and memory usage, you can use :func:`remat_scan` which will in addition checkpoint each layer in the scan loop. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections that are scanned over. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. in_axes: Specifies the axis to scan over for the arguments. Should be a prefix tree of the arguments. Use ``flax.core.broadcast`` to feed an entire input to each iteration of the scan body. out_axes: Specifies the axis to scan over for the return value. Should be a prefix tree of the return value. length: Specifies the number of loop iterations. This only needs to be specified if it cannot be derived from the scan arguments. reverse: If true, scan from end to start in reverse order. unroll: how many scan iterations to unroll within a single iteration of a loop (default: 1). data_transform: optional function to transform raw functional-core variable and rng groups inside lifted scan body_fn, intended for inline SPMD annotations. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. methods: If ``target`` is a ``Module``, the methods of ``Module`` to scan over. Returns: The scan function with the signature ``(module, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys`` are the scan values that go in and out of the loop. """ return lift_transform( lift.scan, target, variable_axes=variable_axes, variable_broadcast=variable_broadcast, variable_carry=variable_carry, split_rngs=split_rngs, in_axes=in_axes, out_axes=out_axes, length=length, reverse=reverse, unroll=unroll, data_transform=data_transform, metadata_params=metadata_params, methods=methods, )
A lifted version of ``jax.lax.scan``. See ``jax.lax.scan`` for the unlifted scan in Jax. To improve consistency with ``vmap``, this version of scan uses ``in_axes`` and ``out_axes`` to determine which arguments are scanned over and along which axis. ``scan`` distinguishes between 3 different types of values inside the loop: #. **scan**: a value that is iterated over in a loop. All scan values must have the same size in the axis they are scanned over. Scanned outputs will be stacked along the scan axis. #. **carry**: A carried value is updated at each loop iteration. It must have the same shape and dtype throughout the loop. #. **broadcast**: a value that is closed over by the loop. When a variable is broadcasted they are typically initialized inside the loop body but independent of the loop variables. The ``target`` should have the signature ``(module, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys`` are the scan values that go in and out of the loop. Example:: >>> import flax.linen as nn >>> import jax >>> import jax.numpy as jnp ... >>> class LSTM(nn.Module): ... features: int ... ... @nn.compact ... def __call__(self, x): ... ScanLSTM = nn.scan( ... nn.LSTMCell, variable_broadcast="params", ... split_rngs={"params": False}, in_axes=1, out_axes=1) ... ... lstm = ScanLSTM(self.features) ... input_shape = x[:, 0].shape ... carry = lstm.initialize_carry(jax.random.key(0), input_shape) ... carry, x = lstm(carry, x) ... return x ... >>> x = jnp.ones((4, 12, 7)) >>> module = LSTM(features=32) >>> y, variables = module.init_with_output(jax.random.key(0), x) Note that when providing a function to ``nn.scan``, the scanning happens over all arguments starting from the third argument, as specified by ``in_axes``. The previous example could also be written using the functional form as:: >>> class LSTM(nn.Module): ... features: int ... ... @nn.compact ... def __call__(self, x): ... ... cell = nn.LSTMCell(self.features) ... def body_fn(cell, carry, x): ... carry, y = cell(carry, x) ... return carry, y ... scan = nn.scan( ... body_fn, variable_broadcast="params", ... split_rngs={"params": False}, in_axes=1, out_axes=1) ... ... input_shape = x[:, 0].shape ... carry = cell.initialize_carry( ... jax.random.key(0), input_shape) ... carry, x = scan(cell, carry, x) ... return x ... >>> module = LSTM(features=32) >>> variables = module.init(jax.random.key(0), jnp.ones((4, 12, 7))) You can also use ``scan`` to reduce the compilation time of your JAX program by merging multiple layers into a single scan loop, you can do this when you have a sequence of identical layers that you want to apply iteratively to an input. For example:: >>> class ResidualMLPBlock(nn.Module): ... @nn.compact ... def __call__(self, x, _): ... h = nn.Dense(features=2)(x) ... h = nn.relu(h) ... return x + h, None ... >>> class ResidualMLP(nn.Module): ... n_layers: int = 4 ... ... @nn.compact ... def __call__(self, x): ... ScanMLP = nn.scan( ... ResidualMLPBlock, variable_axes={'params': 0}, ... variable_broadcast=False, split_rngs={'params': True}, ... length=self.n_layers) ... x, _ = ScanMLP()(x, None) ... return x ... >>> model = ResidualMLP(n_layers=4) >>> variables = model.init(jax.random.key(42), jnp.ones((1, 2))) To reduce both compilation and memory usage, you can use :func:`remat_scan` which will in addition checkpoint each layer in the scan loop. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections that are scanned over. variable_broadcast: Specifies the broadcasted variable collections. A broadcasted variable should not depend on any computation that cannot be lifted out of the loop. This is typically used to define shared parameters inside the fn. variable_carry: Specifies the variable collections that are carried through the loop. Mutations to these variables are carried to the next iteration and will be preserved when the scan finishes. split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. in_axes: Specifies the axis to scan over for the arguments. Should be a prefix tree of the arguments. Use ``flax.core.broadcast`` to feed an entire input to each iteration of the scan body. out_axes: Specifies the axis to scan over for the return value. Should be a prefix tree of the return value. length: Specifies the number of loop iterations. This only needs to be specified if it cannot be derived from the scan arguments. reverse: If true, scan from end to start in reverse order. unroll: how many scan iterations to unroll within a single iteration of a loop (default: 1). data_transform: optional function to transform raw functional-core variable and rng groups inside lifted scan body_fn, intended for inline SPMD annotations. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. methods: If ``target`` is a ``Module``, the methods of ``Module`` to scan over. Returns: The scan function with the signature ``(module, carry, *xs) -> (carry, ys)``, where ``xs`` and ``ys`` are the scan values that go in and out of the loop.
22,612
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax def lift_direct_transform( transform: Callable[..., Any], targets: Tuple[Callable[..., Any], ...], mdl: Module, *args, multi_scope=True, **kwargs, ): """Lift direct transform.""" # TODO(marcvanzee): Improve docstrings (#1977). for target in targets: if _is_module_class(target): raise ValueError( f'The {transform.__name__} transform can only be applied on a Module' ' method. That is function that takes a Module instance as its first' ' arg.' ) elif not callable(target): raise ValueError('transform target must be callable') # normalize self.foo bound methods to class.foo unbound methods. targets = tuple(_get_unbound_fn(target) for target in targets) aug_transform = lambda *fns: functools.partial(transform, *fns) return decorator_lift_transform( aug_transform, targets, multi_scope=multi_scope )(mdl, *args, **kwargs) CollectionFilter = Filter PRNGSequenceFilter = Filter class Module(ModuleBase): """Base class for all neural network modules. Layers and models should subclass this class. All Flax Modules are Python 3.7 `dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since dataclasses take over ``__init__``, you should instead override :meth:`setup`, which is automatically called to initialize the module. Modules can contain submodules, and in this way can be nested in a tree structure. Submodels can be assigned as regular attributes inside the :meth:`setup` method. You can define arbitrary "forward pass" methods on your Module subclass. While no methods are special-cased, ``__call__`` is a popular choice because it allows you to use module instances as if they are functions:: >>> from flax import linen as nn >>> from typing import Tuple >>> class Module(nn.Module): ... features: Tuple[int, ...] = (16, 4) ... def setup(self): ... self.dense1 = nn.Dense(self.features[0]) ... self.dense2 = nn.Dense(self.features[1]) ... def __call__(self, x): ... return self.dense2(nn.relu(self.dense1(x))) Optionally, for more concise module implementations where submodules definitions are co-located with their usage, you can use the :meth:`compact` wrapper. """ if typing.TYPE_CHECKING: name: Optional[str] = module_field(kw_only=True, default=None) parent: Union['Module', _Sentinel, None] = module_field( kw_only=True, default=None ) def __init__(self, *args, **kwargs): # this stub makes sure pytype accepts constructor arguments. pass def __call__(self, *args, **kwargs) -> Any: # this stub allows pytype to accept Modules as Callables. pass def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None: """Automatically initializes all subclasses as custom dataclasses.""" super().__init_subclass__(**kwargs) # All Flax Modules are dataclasses. We force this convention since # it encourages the stateless behavior needed to clone module instances for # functional transformation. Instead of using a python metaclass, we # automatically transform Modules into dataclasses at subclass creation # time, and we set the last dataclass arguments to `parent` and `name`. cls._customized_dataclass_transform(kw_only) # We wrap user-defined methods including setup and __call__ to enforce # a number of different checks and to provide clear error messages. cls._verify_single_or_no_compact() cls._find_compact_name_scope_methods() cls._wrap_module_attributes() # Set empty class defaults. cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined] cls.scope: Optional[Scope] = None # type: ignore # Handles weak referencing of parent Modules to prevent reference cycles. cls._parent_ref = None # type: ignore[attr-defined] cls.parent = ParentDescriptor() # type: ignore[assignment] def _customized_dataclass_transform(cls, kw_only: bool): """Transforms `cls` into a dataclass, with custom additional behavior. 1. Inject `parent` and `name` fields. (If they are already present, then check that they have the expected types.) 2. Set compare, hash, and repr to False for non-init fields. 3. Generate a hash function (if not provided by cls). """ # Check reserved attributes have expected type annotations. annotations = dict(cls.__dict__.get('__annotations__', {})) if annotations.get('parent', _ParentType) != _ParentType: raise errors.ReservedModuleAttributeError(annotations) if annotations.get('name', str) not in ('str', str, Optional[str]): raise errors.ReservedModuleAttributeError(annotations) # any non-init field will only be set in setup # During __hash__ and __eq__ the field is not set yet # so it should not be used in compare, hash or repr. for field in annotations: field_meta = getattr(cls, field, None) if isinstance(field_meta, dataclasses.Field) and not field_meta.init: field_meta.compare = False field_meta.hash = False field_meta.repr = False extra_fields = [ ( 'parent', _ParentType, kw_only_dataclasses.field( repr=False, default=_unspecified_parent, kw_only=True ), ), ( 'name', Optional[str], kw_only_dataclasses.field(default=None, kw_only=True), ), ] if kw_only: if tuple(sys.version_info)[:3] >= (3, 10, 0): for ( name, annotation, # pytype: disable=invalid-annotation default, ) in extra_fields: setattr(cls, name, default) cls.__annotations__[name] = annotation dataclasses.dataclass( # type: ignore[call-overload] unsafe_hash='__hash__' not in cls.__dict__, repr=False, kw_only=True, )(cls) else: raise TypeError('`kw_only` is not available before Py 3.10.') else: # Now apply dataclass transform (which operates in-place). # Do generate a hash function only if not provided by the class. kw_only_dataclasses.dataclass( cls, unsafe_hash='__hash__' not in cls.__dict__, repr=False, extra_fields=extra_fields, ) # pytype: disable=wrong-keyword-args cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign] def _verify_single_or_no_compact(cls): """Statically verifies that at most a single method is labelled compact.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] n_compact_fns = len( [ method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact') ] ) if n_compact_fns > 1: raise errors.MultipleMethodsCompactError() def _find_compact_name_scope_methods(cls): """Finds all compact_name_scope methods in the class.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] compact_name_scope_fns = tuple( method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact_name_scope') ) cls._compact_name_scope_methods = compact_name_scope_fns def _wrap_module_attributes(cls): """Wraps user-defined non-inherited methods and descriptors with state management functions. """ # wrap methods method_exclusions = [f.name for f in dataclasses.fields(cls)] + [ '__eq__', '__repr__', '__init__', '__hash__', '__post_init__', ] for key in _get_local_method_names(cls, exclude=method_exclusions): method = getattr(cls, key) if hasattr(method, 'nowrap'): continue setattr(cls, key, wrap_method_once(method)) # wrap descriptors descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [ 'parent', '__dict__', ] for key in _get_local_descriptor_names(cls, descriptor_exclusions): # don't use getattr here, since it will call the descriptor descriptor = cls.__dict__[key] if hasattr(descriptor, 'nowrap'): continue setattr(cls, key, wrap_descriptor_once(descriptor)) return cls def _call_wrapped_method(self, fun, args, kwargs): """Calls a wrapped method. This function is responsible for setting up the thread local state correctly before calling the method and cleaning up afterwards. This includes storing intermediates, setup of the compact scope, and making sure setup is called before any other method. Args: fun: The wrapped method. args: Named arguments passed to ``fun``. kwargs: Keyword arguments passed to ``fun``. Returns: The results of calling ``fun``. """ is_compact_method = hasattr(fun, 'compact') fun_name = _get_fn_name(fun) is_setup_method = fun_name == 'setup' add_call_info = not is_setup_method and len(_context.call_info_stack) > 0 # We lazily call setup() only when needed. if is_setup_method: if self.scope is None: raise errors.CallSetupUnboundModuleError() is_recurrent = self._state.in_setup self._state.in_setup = True else: self._try_setup() if is_compact_method: if self.scope is None: raise errors.CallCompactUnboundModuleError() is_recurrent = self._state.in_compact_method self._state.in_compact_method = True _context.module_stack.append(self) try: # get call info if add_call_info: assert self.scope is not None call_index = _context.call_info_stack[-1].get_call_index() if _global_interceptor_stack: run_fun = functools.partial(run_interceptors, fun) else: run_fun = fun # call method if _use_named_call: with jax.named_scope(_derive_profiling_name(self, fun)): y = run_fun(self, *args, **kwargs) else: y = run_fun(self, *args, **kwargs) if _context.capture_stack: filter_fn = _context.capture_stack[-1] if filter_fn and filter_fn(self, fun_name): self.sow('intermediates', fun_name, y) if add_call_info: _args, _kwargs, _y = flax.linen.summary._represent_tree( (args, kwargs, y) ) _context.call_info_stack[-1].calls.append( _CallInfo( call_index, self.path, self.clone(), self.scope.rngs, self.scope.mutable, fun.__name__, _args, _kwargs, _y, ) ) return y finally: _context.module_stack.pop() if is_compact_method: object.__setattr__(self, 'scope', self.scope.rewound()) # setup or compact calls can be recurrent for example due to super calls # resetting the state would cause is compact/setup method # to be set to False prematurely. if (is_compact_method or is_setup_method) and not is_recurrent: self._state.reset() def __setattr__(self, name: str, val: Any): """Sets an attribute on this Module. We overload setattr solely to support pythonic naming via assignment of submodules in the special :meth:`setup` function:: self.submodule_name = MyModule(...) We also support lists and other general pytrees, e.g.:: self.submodules = [MyModule0(..), MyModule1(..), ...] Args: name: Attribute to set. val: Value of the attribute. """ fields = self.__dataclass_fields__ # pytype: disable=attribute-error is_dataclass_attr = name in fields and fields[name].init if not self._state.in_setup: if not self._state.is_initialized: # Setting attributes before end of Module.__post_init__() object.__setattr__(self, name, val) return else: # We're past all initialization and setup logic: # Raises a TypeError just like frozen python dataclasses. raise errors.SetAttributeFrozenModuleError( self.__class__.__name__, name, val ) # We're inside the setup() method: if is_dataclass_attr: # These names are specified as dataclass fields. They should not be # initialized within the setup() method, but can be modified freely # before it. raise errors.SetAttributeInModuleSetupError() # Values (that may be variables or submodules) are being defined and # attached in setup(), we run some extra logic in that case. self._register_submodules(name, val) def __getattr__(self, name: str) -> Any: """Call setup() before getting any setup-defined attributes.""" # We don't want to return anything for python copy / pickle methods. if name in _UNDEFINED_COPY_PICKLE_METHODS: raise AttributeError() self._try_setup() if name in self.__dict__: return self.__dict__[name] else: msg = f'"{self.__class__.__name__}" object has no attribute "{name}".' if self.scope is None: msg += ( f' If "{name}" is defined in \'.setup()\', remember these fields ' "are only accessible from inside 'init' or 'apply'." ) raise AttributeError(msg) def __dir__(self) -> List[str]: """Call setup() before listing attributes.""" self._try_setup() return object.__dir__(self) # type: ignore def __post_init__(self) -> None: # DO NOT REMOVE - Marker for internal logging. # In dataclasses, __init__ is overridden to process dataclass arguments, # and __post_init__ is called immediately afterwards. Here, depending on the # type of `parent` passed to initialize the Module, we either defer # initialization, attach this Module as a submodule of a parent, or bind # this Module at the top-level to variables and rngs. object.__setattr__(self, '_id', uuid()) object.__setattr__(self, '_state', _ModuleInternalState()) # Typically we set the parent based on the dynamic module context. if self.parent is _unspecified_parent: # pytype: disable=attribute-error object.__setattr__(self, 'parent', _context.module_stack[-1]) # Initialization is deferred for top level Modules or any other "orphan" # Modules until attachment by __setattr__ i.e. MyModule(..., parent=None) if self.parent is None: return # Register submodule on parent Module. if isinstance(self.parent, Module): # When initializing an unnamed Module inside setup() # initialization is deferred until attachment by __setattr__ # i.e. self.mymodule = MyModule(...) self.name: Optional[str] if ( self.parent._state.in_setup and self.name is None ): # pytype: disable=attribute-error return if not self.parent._initialization_allowed: raise errors.AssignSubModuleError(self.__class__.__name__) # Autonaming of submodules. if self.name is None: # pytype: disable=attribute-error prefix = f'{self.__class__.__name__}' cursor = self.parent._state.autoname_cursor.get(prefix, 0) self.name = f'{prefix}_{cursor}' self.parent._state.autoname_cursor[prefix] = cursor + 1 # Allow scope aliasing under transforms for submodules defined in setup. reuse_scopes = ( self.parent._state.in_setup and self.parent._state.setup_called == SetupState.TRANSFORMED ) # Perform name-collision check. if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes): parent_class = self.parent.__class__.__name__ raise errors.NameInUseError('submodule', self.name, parent_class) # Finalize attachment to parent and scope initialization. self.parent._state.children[self.name] = self assert self.parent.scope is not None object.__setattr__( self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes) ) # Top-level invocation with a functional Scope. elif isinstance(self.parent, Scope): object.__setattr__(self, 'scope', self.parent) else: raise ValueError('parent must be None, Module or Scope') # eagerly bind submodules if scope is available if self.scope is not None: for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) self._state.is_initialized = True def __repr__(self) -> str: return _module_repr(self) def setup(self) -> None: """Initializes a Module lazily (similar to a lazy ``__init__``). ``setup`` is called once lazily on a module instance when a module is bound, immediately before any other methods like ``__call__`` are invoked, or before a ``setup``-defined attribute on ``self`` is accessed. This can happen in three cases: 1. Immediately when invoking :meth:`apply`, :meth:`init` or :meth:`init_and_output`. 2. Once the module is given a name by being assigned to an attribute of another module inside the other module's ``setup`` method (see :meth:`__setattr__`):: >>> class MyModule(nn.Module): ... def setup(self): ... submodule = nn.Conv(...) ... # Accessing `submodule` attributes does not yet work here. ... # The following line invokes `self.__setattr__`, which gives ... # `submodule` the name "conv1". ... self.conv1 = submodule ... # Accessing `submodule` attributes or methods is now safe and ... # either causes setup() to be called once. 3. Once a module is constructed inside a method wrapped with :meth:`compact`, immediately before another method is called or ``setup`` defined attribute is accessed. """ pass def _register_submodules(self, name, val): """Registers a submodule.""" assert self.scope, 'Trying to register submodules on unbound scope.' root = self.scope.root cache = _caches.get(root, weakref.WeakValueDictionary()) _caches[root] = cache queue = [] preserve_adopted_names = config.flax_preserve_adopted_names if hasattr(type(self), 'preserve_adopted_names'): preserve_adopted_names = type(self).preserve_adopted_names def adopt_attr_modules(cache, queue, suffix, subvalue): if isinstance(subvalue, Module): current_name = subvalue.name adopted_name = None if subvalue.parent is None: # Preserve sharing-by-reference relationships during adoption # via cache keyed on unique instance ids. key = subvalue._id # Module was passed from outside. It needs to be cloned. # Outside modules are named by attachment, not an outer name, # UNLESS we're using new adopted name policy, in which case an existing # name will be used, as is often supplied by config systems. if preserve_adopted_names: adopted_name = object.__getattribute__(subvalue, 'name') if key in cache: subvalue = cache[key] else: subvalue = subvalue.clone(name=None) cache[key] = subvalue if subvalue.name is None: object.__setattr__(subvalue, 'parent', self) if adopted_name is None: adopted_name = ( f'{name}{suffix}' if not isinstance(subvalue, CompactNameScope) else current_name ) object.__setattr__(subvalue, 'name', adopted_name) queue.append(subvalue) return subvalue val = _freeze_attr( _map_over_modules_in_tree( functools.partial(adopt_attr_modules, cache, queue), val ) ) object.__setattr__(self, name, val) for x in queue: x.__post_init__() def _try_setup(self, shallow: bool = False) -> None: """Tries to setup module if scope is available and setup has not been called yet.""" if ( self.scope and not self._state.in_setup and self._state.setup_called != SetupState.DONE ): try: self._state.in_setup = True # A shallow setup will only register attribute submodules but it does # not call the user's setup. This avoids running before a # transformation. for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) if not shallow: self.setup() # create NonTransparent Modules self._compact_name_scope_modules = { name: CompactNameScope( getattr(type(self), name).inner_fun, lambda: self, name=name ) for name in self._compact_name_scope_methods } # We run static checks abstractly once for setup before any transforms # to detect name collisions and other python errors. elif self._state.setup_called == SetupState.NEW: self._validate_setup() finally: self._state.in_setup = False if not shallow: self._state.setup_called = SetupState.DONE def _validate_setup(self) -> None: """Abstractly evaluates setup only to run static checks.""" def run_setup_only(x): wrapped_id = wrap_method_once(lambda m, x: x) with TestScope({}, rngs={}, mutable=True).temporary() as root: return wrapped_id(self.clone(parent=root), x) _ = jax.eval_shape(run_setup_only, 0) def _name_taken( self, name: str, reuse_scopes: bool = False, collection: Optional[str] = None, ) -> bool: assert self.scope is not None if reuse_scopes: return False return self.scope.name_reserved(name, collection) def _initialization_allowed(self): return ( not self._state.is_initialized # allow eager attachment in post-init or self._state.in_setup or self._state.in_compact_method ) def path(self): if self.scope is None: raise ValueError("Can't access module paths on unbound modules.") return self.scope.path def clone( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = None, _deep_clone: Union[bool, weakref.WeakValueDictionary] = False, _reset_names: bool = False, **updates, ) -> M: """Creates a clone of this Module, with optionally updated arguments. NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used primarily for internal routines, and ``copy`` offers simpler arguments and better defaults. Args: parent: The parent of the clone. The clone will have no parent if no explicit parent is specified. _deep_clone: A boolean or a weak value dictionary to control deep cloning of submodules. If True, submodules will be cloned recursively. If a weak value dictionary is passed, it will be used to cache cloned submodules. This flag is used by init/apply/bind to avoid scope leakage. _reset_names: If True, ``name=None`` is also passed to submodules when cloning. Resetting names in submodules is necessary when calling ``.unbind``. **updates: Attribute updates. Returns: A clone of the this Module with the updated attributes and parent. """ attrs = { f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init } attrs.update(parent=parent, **updates) # Here we implement deep cloning of submodules, this is necessary to avoid scope leakage # from external submodules into init/apply/bind while preserving sharing-by-reference # relationships between submodules. if _deep_clone != False: # We use a weak value dictionary to cache cloned submodules. When a shared # submodule is cloned, its only cloned once else its fetched from the cache. cache = ( weakref.WeakValueDictionary() if isinstance(_deep_clone, bool) else _deep_clone ) def clone_fn(m: Module) -> Module: if hasattr(m, '_id'): key = m._id if key in cache: return cache[key] else: if _reset_names: clone = m.clone( _deep_clone=cache, _reset_names=_reset_names, name=None ) else: clone = m.clone(_deep_clone=cache) cache[key] = clone return clone else: # If the module doesn't have an _id attribute it could be a mock object # so we return it as is. return m # _map_submodules will map over all submodules inside attrs # value here can be any pytree, non-module values are ignored for field_name, value in attrs.items(): if field_name == 'parent': continue attrs[field_name] = _map_submodules(clone_fn, value) module = self.__class__(**attrs) return module def copy( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent, name: Optional[str] = None, **updates, ) -> M: """Creates a copy of this Module, with optionally updated arguments. Args: parent: The parent of the copy. By default the current module is taken as parent if not explicitly specified. name: A new name for the copied Module, by default a new automatic name will be given. **updates: Attribute updates. Returns: A copy of the this Module with the updated name, parent, and attributes. """ return self.clone( parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates ) def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[True], **init_kwargs, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[False], **init_kwargs, ) -> Variable[meta.AxisMetadata[T]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: """Declares and returns a variable in this Module. See :mod:`flax.core.variables` for more information. See also :meth:`param` for a shorthand way to define read-only variables in the "params" collection. Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be passed on explicitly:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... key = self.make_rng('stats') ... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape) ... ... ... return x * mean.value >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats`` has to be provided explicitly when calling :meth:`init` and :meth:`apply`. Args: col: The variable collection name. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this variable is used in this module. If None, the variable must already be initialized otherwise an error is raised. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn Returns: A :class:`flax.core.variables.Variable` that can be read or set via ".value" attribute. Throws an error if the variable exists already. """ if not self._initialization_allowed: raise ValueError( 'Variables must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection=col): raise errors.NameInUseError('variable', name, self.__class__.__name__) assert self.scope is not None v = self.scope.variable( col, name, init_fn, *init_args, unbox=unbox, **init_kwargs ) self._state.children[name] = col return v def param( self, name: str, init_fn: Callable[..., T], *init_args, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[True], **init_kwargs, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[False], **init_kwargs, ) -> meta.AxisMetadata[T]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool = True, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: """Declares and returns a parameter in this Module. Parameters are read-only variables in the collection named "params". See :mod:`flax.core.variables` for more details on variables. The first argument of ``init_fn`` is assumed to be a PRNG key, which is provided automatically and does not have to be passed using ``init_args`` or ``init_kwargs``:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape) ... ... ... return x * mean >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, but only ``shape`` has to be provided explicitly; ``key`` is set automatically using the PRNG for ``params`` that is passed when initializing the module using :meth:`init`. Args: name: The parameter name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn. Returns: The value of the initialized parameter. Throws an error if the parameter exists already. """ if not self._initialization_allowed: raise ValueError( 'Parameters must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection='params'): raise errors.NameInUseError('param', name, self.__class__.__name__) assert self.scope is not None v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs) self._state.children[name] = 'params' return v def has_variable(self, col: str, name: str) -> bool: """Checks if a variable of given collection and name exists in this Module. See :mod:`flax.core.variables` for more explanation on variables and collections. Args: col: The variable collection name. name: The name of the variable. Returns: True if the variable exists. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.has_variable(col, name) def is_mutable_collection(self, col: str) -> bool: """Returns true if the collection ``col`` is mutable.""" if self.scope is None: raise ValueError("Can't check mutability on unbound modules") return self.scope.is_mutable_collection(col) def has_rng(self, name: str) -> bool: """Returns true if a PRNGSequence with name ``name`` exists.""" if self.scope is None: raise ValueError("Can't query for RNGs on unbound modules") return self.scope.has_rng(name) def make_rng(self, name: str = 'params') -> PRNGKey: """Returns a new RNG key from a given RNG sequence for this Module. The new RNG key is split from the previous one. Thus, every call to ``make_rng`` returns a new RNG key, while still guaranteeing full reproducibility. .. note:: If an invalid name is passed (i.e. no RNG key was passed by the user in ``.init`` or ``.apply`` for this name), then ``name`` will default to ``'params'``. Example:: >>> import jax >>> import flax.linen as nn >>> class ParamsModule(nn.Module): ... def __call__(self): ... return self.make_rng('params') >>> class OtherModule(nn.Module): ... def __call__(self): ... return self.make_rng('other') >>> key = jax.random.key(0) >>> params_out, _ = ParamsModule().init_with_output({'params': key}) >>> # self.make_rng('other') will default to using the 'params' RNG stream >>> other_out, _ = OtherModule().init_with_output({'params': key}) >>> assert params_out == other_out Learn more about RNG's by reading the Flax RNG guide: https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html Args: name: The RNG sequence name. Returns: The newly generated RNG key. """ if self.scope is None: raise ValueError("Can't use RNGs on unbound modules") return self.scope.make_rng(name) def is_initializing(self) -> bool: """Returns True if running under self.init(...) or nn.init(...)(). This is a helper method to handle the common case of simple initialization where we wish to have setup logic occur when only called under ``module.init`` or ``nn.init``. For more complicated multi-phase initialization scenarios it is better to test for the mutability of particular variable collections or for the presence of particular variables that potentially need to be initialized. """ if self.scope is None: raise ValueError("Can't check if running under init() on unbound modules") return self.scope.get_flag('initializing', False) def _module_checks(self): """Run standard runtime checks.""" if not isinstance(self, Module): raise errors.InvalidInstanceModuleError() overridden_post_init = self.__post_init__ != Module.__post_init__ if overridden_post_init and not hasattr(self, '_id'): raise errors.IncorrectPostInitOverrideError() def bind( self: M, variables: VariableDict, *args, rngs: Optional[RNGSequences] = None, mutable: CollectionFilter = False, ) -> M: """Creates an interactive Module instance by binding variables and RNGs. ``bind`` provides an "interactive" instance of a Module directly without transforming a function with ``apply``. This is particularly useful for debugging and interactive use cases like notebooks where a function would limit the ability to split up code into different cells. Once the variables (and optionally RNGs) are bound to a ``Module`` it becomes a stateful object. Note that idiomatic JAX is functional and therefore an interactive instance does not mix well with vanilla JAX APIs. ``bind()`` should only be used for interactive experimentation, and in all other cases we strongly encourage users to use ``apply()`` instead. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = nn.Dense(3) ... self.decoder = nn.Dense(5) ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> x = jnp.ones((16, 9)) >>> ae = AutoEncoder() >>> variables = ae.init(jax.random.key(0), x) >>> model = ae.bind(variables) >>> z = model.encoder(x) >>> x_reconstructed = model.decoder(z) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments (not used). rngs: a dict of PRNGKeys to initialize the PRNG sequences. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. Returns: A copy of this instance with bound variables and RNGs. """ Module._module_checks(self) del args scope = core.bind(variables, rngs=rngs, mutable=mutable) return self.clone(parent=scope, _deep_clone=True) def unbind(self: M) -> Tuple[M, VariableDict]: """Returns an unbound copy of a Module and its variables. ``unbind`` helps create a stateless version of a bound Module. An example of a common use case: to extract a sub-Module defined inside ``setup()`` and its corresponding variables: 1) temporarily ``bind`` the parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that ``setup()`` is only called when the Module is bound.):: >>> class Encoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(256)(x) >>> class Decoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(784)(x) >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = Encoder() ... self.decoder = Decoder() ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> module = AutoEncoder() >>> variables = module.init(jax.random.key(0), jnp.ones((1, 784))) >>> # Extract the Encoder sub-Module and its variables >>> encoder, encoder_vars = module.bind(variables).encoder.unbind() Returns: A tuple with an unbound copy of this Module and its variables. """ Module._module_checks(self) if self.scope is None: raise errors.CallUnbindOnUnboundModuleError() variables = self.variables module = self.clone(_deep_clone=True, _reset_names=True, name=None) return module, variables def apply( self, variables: VariableDict, *args, rngs: Optional[Union[PRNGKey, RNGSequences]] = None, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = False, capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]: """Applies a module method to variables and returns output and modified variables. Note that ``method`` should be set if one would like to call ``apply`` on a different class method than ``__call__``. For instance, suppose a Transformer modules has a method called ``encode``, then the following calls ``apply`` on that method:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Transformer(nn.Module): ... def encode(self, x): ... ... >>> x = jnp.ones((16, 9)) >>> model = Transformer() >>> variables = model.init(jax.random.key(0), x, method=Transformer.encode) >>> encoded = model.apply(variables, x, method=Transformer.encode) If a function instance is provided, the unbound function is used. For instance, the example below is equivalent to the one above:: >>> encoded = model.apply(variables, x, method=model.encode) You can also pass a string to a callable attribute of the module. For example, the previous can be written as:: >>> encoded = model.apply(variables, x, method='encode') Note ``method`` can also be a function that is not defined in ``Transformer``. In that case, the function should have at least one argument representing an instance of the Module class:: >>> def other_fn(instance, x): ... # instance.some_module_attr(...) ... instance.encode ... ... >>> model.apply(variables, x, method=other_fn) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, add_noise=False): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... if add_noise: ... # Add gaussian noise ... noise_key = self.make_rng('noise') ... x = x + jax.random.normal(noise_key, x.shape) ... ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)} >>> variables = module.init(rngs, x) >>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> rngs['noise'] = jax.random.key(0) >>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # different output (key(1) vs key(0)) >>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1) >>> del rngs['noise'] >>> # self.make_rng('noise') will default to using the 'params' RNG stream >>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # same output (key(0)) >>> np.testing.assert_allclose(out1, out2) >>> # passing in a single key is equivalent to passing in {'params': key} >>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0)) >>> # same output (key(0)) >>> np.testing.assert_allclose(out2, out3) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments passed to the specified apply method. rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params" PRNG sequence is used to initialize parameters. method: A function to call apply on. This is generally a function in the module. If provided, applies this method. If not provided, applies the ``__call__`` method of the module. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default, only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the specified apply method. Returns: If ``mutable`` is False, returns output. If any collections are mutable, returns ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if rngs is not None and not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) # if the `method` string is a submodule, we create a lambda function # that calls the submodule, forwarding all arguments. if isinstance(method, Module): method = lambda self, *args, **kwargs: getattr(self, attribute_name)( *args, **kwargs ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return apply( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(variables, *args, **kwargs, rngs=rngs) def init_with_output( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]: """Initializes a module method with variables and returns output and modified variables. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return init_with_output( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(rngs, *args, **kwargs) def init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[FrozenVariableDict, Dict[str, Any]]: """Initializes a module method with variables and returns modified variables. ``init`` takes as first argument either a single ``PRNGKey``, or a dictionary mapping variable collections names to their ``PRNGKeys``, and will call ``method`` (which is the module's ``__call__`` function by default) passing ``*args`` and ``**kwargs``, and returns a dictionary of initialized variables. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, train): ... x = nn.Dense(16)(x) ... x = nn.BatchNorm(use_running_average=not train)(x) ... x = nn.relu(x) ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> key = jax.random.key(0) >>> variables = module.init(key, x, train=False) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... other_variable = self.variable( ... 'other_collection', ... 'other_variable', ... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape), ... x, ... ) ... x = x + other_variable.value ... ... return nn.Dense(1)(x) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)} >>> variables0 = module.init(rngs, x) >>> rngs['other_rng'] = jax.random.key(0) >>> variables1 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables0['params'], variables1['params'] ... ) >>> # different other_variable (key(1) vs key(0)) >>> np.testing.assert_raises( ... AssertionError, ... np.testing.assert_allclose, ... variables0['other_collection']['other_variable'], ... variables1['other_collection']['other_variable'], ... ) >>> del rngs['other_rng'] >>> # self.make_rng('other_rng') will default to using the 'params' RNG stream >>> variables2 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables1['params'], variables2['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables1['other_collection']['other_variable'], ... variables2['other_collection']['other_variable'], ... ) >>> # passing in a single key is equivalent to passing in {'params': key} >>> variables3 = module.init(jax.random.key(0), x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables2['params'], variables3['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables2['other_collection']['other_variable'], ... variables3['other_collection']['other_variable'], ... ) Jitting ``init`` initializes a model lazily using only the shapes of the provided arguments, and avoids computing the forward pass with actual values. Example:: >>> module = nn.Dense(1) >>> init_jit = jax.jit(module.init) >>> variables = init_jit(jax.random.key(0), x) ``init`` is a light wrapper over ``apply``, so other ``apply`` arguments like ``method``, ``mutable``, and ``capture_intermediates`` are also available. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) _, v_out = self.init_with_output( rngs, *args, method=method, mutable=mutable, capture_intermediates=capture_intermediates, **kwargs, ) return v_out def lazy_init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Optional[Callable[..., Any]] = None, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> FrozenVariableDict: """Initializes a module without computing on an actual input. lazy_init will initialize the variables without doing unnecessary compute. The input data should be passed as a ``jax.ShapeDtypeStruct`` which specifies the shape and dtype of the input but no concrete data. Example:: >>> model = nn.Dense(features=256) >>> variables = model.lazy_init( ... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) The args and kwargs args passed to ``lazy_init`` can be a mix of concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct) values. Concrete values are only necessary for arguments that affect the initialization of variables. For example, the model might expect a keyword arg that enables/disables a subpart of the model. In this case, an explicit value (True/Flase) should be passed otherwise ``lazy_init`` cannot infer which variables should be initialized. Args: rngs: The rngs for the variable collections. *args: arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) def lazy_wrapper(rngs, *args, **kwargs): return self.init(rngs, *args, method=method, mutable=mutable, **kwargs) return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs) def variables(self) -> VariableDict: """Returns the variables in this module.""" if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.variables() def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T: """Retrieves the value of a Variable. Args: col: the variable collection. name: the name of the variable. default: the default value to return if the variable does not exist in this scope. Returns: The value of the input variable, of the default value if the variable doesn't exist in this scope. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.get_variable(col, name, default) def put_variable(self, col: str, name: str, value: Any): """Updates the value of the given variable if it is mutable, or an error otherwise. Args: col: the variable collection. name: the name of the variable. value: the new value of the variable. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") self.scope.put_variable(col, name, value) def sow(self, col: str, name: str, value: Any) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: """Stores a value in a collection. Collections can be used to collect intermediate values without the overhead of explicitly passing a container through each Module call. If the target collection is not mutable ``sow`` behaves like a no-op and returns ``False``. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... self.sow('intermediates', 'h', h) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply(variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)} By default the values are stored in a tuple and each stored value is appended at the end. This way all intermediates can be tracked when the same module is called multiple times. Alternatively, a custom init/reduce function can be passed:: >>> class Foo2(nn.Module): ... @nn.compact ... def __call__(self, x): ... init_fn = lambda: 0 ... reduce_fn = lambda a, b: a + b ... self.sow('intermediates', 'h', x, ... init_fn=init_fn, reduce_fn=reduce_fn) ... self.sow('intermediates', 'h', x * 2, ... init_fn=init_fn, reduce_fn=reduce_fn) ... return x >>> x = jnp.ones((1, 1)) >>> model = Foo2() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply( ... variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': Array([[3.]], dtype=float32)} Args: col: The name of the variable collection. name: The name of the variable. value: The value of the variable. reduce_fn: The function used to combine the existing value with the new value. The default is to append the value to a tuple. init_fn: For the first value stored, ``reduce_fn`` will be passed the result of ``init_fn`` together with the value to be stored. The default is an empty tuple. Returns: ``True`` if the value has been stored successfully, ``False`` otherwise. """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if not self.scope.is_mutable_collection(col): return False if self.scope.has_variable(col, name): xs = self.scope.get_variable(col, name) else: self.scope.reserve(name, col) self._state.children[name] = col xs = init_fn() xs = reduce_fn(xs, value) self.scope.put_variable(col, name, xs) return True def perturb( self, name: str, value: T, collection: str = 'perturbations' ) -> T: """Add an zero-value variable ('perturbation') to the intermediate value. The gradient of ``value`` would be the same as the gradient of this perturbation variable. Therefore, if you define your loss function with both params and perturbations as standalone arguments, you can get the intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation argument. .. note:: This is an experimental API and may be tweaked later for better performance and usability. At its current stage, it creates extra dummy variables that occupies extra memory space. Use it only to debug gradients in training. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(3)(x) ... x = self.perturb('dense3', x) ... return nn.Dense(2)(x) >>> def loss(variables, inputs, targets): ... preds = model.apply(variables, inputs) ... return jnp.square(preds - targets).mean() >>> x = jnp.ones((2, 9)) >>> y = jnp.ones((2, 2)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y) >>> print(intm_grads['perturbations']['dense3']) [[-1.456924 -0.44332537 0.02422847] [-1.456924 -0.44332537 0.02422847]] If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op so you can easily disable the behavior when not needed:: >>> model.apply(variables, x) # works as expected Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> model.apply({'params': variables['params']}, x) # behaves like a no-op Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y) >>> 'perturbations' not in intm_grads True """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if self.is_mutable_collection(collection): if not self.scope.has_variable(collection, name): self.scope.reserve(name, collection) self._state.children[name] = collection self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore if collection in self.scope.root._variables: if self.scope.has_variable(collection, name): value += self.scope.get_variable(collection, name) # type: ignore else: raise ValueError(f"Perturbation collection {collection} present, but " f"missing perturbation variable {name}") return value def tabulate( self, rngs: Union[PRNGKey, RNGSequences], *args, depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> str: """Creates a summary of the Module represented as a table. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns the string summarizing the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the ``console_kwargs`` argument, for example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> # print(Foo().tabulate( >>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in ``variables`` which are sorted alphabetically. **Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable. Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. depth: controls how many submodule deep the summary can go. By default, its ``None`` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.console.Console`` when rendering the table. Default arguments are ``{'force_terminal': True, 'force_jupyter': False}``. table_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table`` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table.add_column`` when adding columns to the table. compute_flops: whether to include a ``flops`` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a ``vjp_flops`` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of ``compute_flops``. **kwargs: keyword arguments to pass to the forward computation. Returns: A string summarizing the Module. """ from flax.linen import summary tabulate_fn = summary.tabulate( self, rngs, depth=depth, show_repeated=show_repeated, mutable=mutable, console_kwargs=console_kwargs, table_kwargs=table_kwargs, column_kwargs=column_kwargs, compute_flops=compute_flops, compute_vjp_flops=compute_vjp_flops, ) return tabulate_fn(*args, **kwargs) def module_paths( self, rngs: Union[PRNGKey, RNGSequences], *args, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> dict[str, 'Module']: """Returns a dictionary mapping module paths to module instances. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns a dictionary mapping module paths to unbounded copies of module instances that were used at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> modules = Foo().module_paths(jax.random.key(0), x) >>> print({ ... p: type(m).__name__ for p, m in modules.items() ... }) {'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'} Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. **kwargs: keyword arguments to pass to the forward computation. Returns: A dict`ionary mapping module paths to module instances. """ from flax.linen import summary table = summary._get_module_table( module=self, depth=None, show_repeated=show_repeated, compute_flops=False, compute_vjp_flops=False, )(rngs, *args, **kwargs, mutable=mutable) return {'/'.join(row.path): row.module_copy for row in table} The provided code snippet includes necessary dependencies for implementing the `vjp` function. Write a Python function `def vjp( fn: Callable[..., Any], mdl: Module, *primals, has_aux: bool = False, reduce_axes=(), vjp_variables: CollectionFilter = 'params', variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, multi_scope: bool = False, )` to solve the following problem: A lifted version of ``jax.vjp``. See ``jax.vjp`` for the unlifted vector-Jacobian product (backward gradient). Note that a gradient is returned for all variables in the collections specified by ``vjp_variables``. However, the backward function only expects a cotangent for the return value of ``fn``. If variables require a co-tangent as well they can be returned from ``fn`` using ``Module.variables``. Example:: >>> import flax.linen as nn >>> import jax.numpy as jnp >>> class LearnScale(nn.Module): ... @nn.compact ... def __call__(self, x, y): ... p = self.param('scale', nn.initializers.zeros_init(), ()) ... return p * x * y >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, y): ... z, bwd = nn.vjp(lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) ... params_grad, x_grad, y_grad = bwd(jnp.ones(z.shape)) ... return z, params_grad, x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the VJP will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a VJP function that sums over the batch while ``vjp(f, *args)`` will create a per-example VJP. vjp_variables: The vjpfun will return a cotangent vector for all variable collections specified by this filter. variables: other variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. multi_scope: for Modules containing multiple scopes from outside modules passed in, allow for variable gradients to be returned for multiple scopes instead of erroring. Returns: If ``has_aux`` is ``False``, returns a ``(primals_out, vjpfun)`` pair, where ``primals_out`` is ``fn(*primals)``. ``vjpfun`` is a function from a cotangent vector with the same shape as ``primals_out`` to a tuple of cotangent vectors with the same shape as ``primals``, representing the vector-Jacobian product of ``fn`` evaluated at ``primals``. If ``has_aux`` is ``True``, returns a ``(primals_out, vjpfun, aux)`` tuple where ``aux`` is the auxiliary data returned by ``fn``. Here is the function: def vjp( fn: Callable[..., Any], mdl: Module, *primals, has_aux: bool = False, reduce_axes=(), vjp_variables: CollectionFilter = 'params', variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, multi_scope: bool = False, ): """A lifted version of ``jax.vjp``. See ``jax.vjp`` for the unlifted vector-Jacobian product (backward gradient). Note that a gradient is returned for all variables in the collections specified by ``vjp_variables``. However, the backward function only expects a cotangent for the return value of ``fn``. If variables require a co-tangent as well they can be returned from ``fn`` using ``Module.variables``. Example:: >>> import flax.linen as nn >>> import jax.numpy as jnp >>> class LearnScale(nn.Module): ... @nn.compact ... def __call__(self, x, y): ... p = self.param('scale', nn.initializers.zeros_init(), ()) ... return p * x * y >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, y): ... z, bwd = nn.vjp(lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) ... params_grad, x_grad, y_grad = bwd(jnp.ones(z.shape)) ... return z, params_grad, x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the VJP will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a VJP function that sums over the batch while ``vjp(f, *args)`` will create a per-example VJP. vjp_variables: The vjpfun will return a cotangent vector for all variable collections specified by this filter. variables: other variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. multi_scope: for Modules containing multiple scopes from outside modules passed in, allow for variable gradients to be returned for multiple scopes instead of erroring. Returns: If ``has_aux`` is ``False``, returns a ``(primals_out, vjpfun)`` pair, where ``primals_out`` is ``fn(*primals)``. ``vjpfun`` is a function from a cotangent vector with the same shape as ``primals_out`` to a tuple of cotangent vectors with the same shape as ``primals``, representing the vector-Jacobian product of ``fn`` evaluated at ``primals``. If ``has_aux`` is ``True``, returns a ``(primals_out, vjpfun, aux)`` tuple where ``aux`` is the auxiliary data returned by ``fn``. """ return lift_direct_transform( lift.vjp, (fn,), mdl, *primals, multi_scope=multi_scope, has_aux=has_aux, reduce_axes=reduce_axes, vjp_variables=vjp_variables, variables=variables, rngs=rngs, )
A lifted version of ``jax.vjp``. See ``jax.vjp`` for the unlifted vector-Jacobian product (backward gradient). Note that a gradient is returned for all variables in the collections specified by ``vjp_variables``. However, the backward function only expects a cotangent for the return value of ``fn``. If variables require a co-tangent as well they can be returned from ``fn`` using ``Module.variables``. Example:: >>> import flax.linen as nn >>> import jax.numpy as jnp >>> class LearnScale(nn.Module): ... @nn.compact ... def __call__(self, x, y): ... p = self.param('scale', nn.initializers.zeros_init(), ()) ... return p * x * y >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, y): ... z, bwd = nn.vjp(lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) ... params_grad, x_grad, y_grad = bwd(jnp.ones(z.shape)) ... return z, params_grad, x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the VJP will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a VJP function that sums over the batch while ``vjp(f, *args)`` will create a per-example VJP. vjp_variables: The vjpfun will return a cotangent vector for all variable collections specified by this filter. variables: other variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. multi_scope: for Modules containing multiple scopes from outside modules passed in, allow for variable gradients to be returned for multiple scopes instead of erroring. Returns: If ``has_aux`` is ``False``, returns a ``(primals_out, vjpfun)`` pair, where ``primals_out`` is ``fn(*primals)``. ``vjpfun`` is a function from a cotangent vector with the same shape as ``primals_out`` to a tuple of cotangent vectors with the same shape as ``primals``, representing the vector-Jacobian product of ``fn`` evaluated at ``primals``. If ``has_aux`` is ``True``, returns a ``(primals_out, vjpfun, aux)`` tuple where ``aux`` is the auxiliary data returned by ``fn``.
22,613
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax def value_and_grad( fn: Callable[..., Any], mdl: Module, *primals, has_aux: bool = False, reduce_axes=(), variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, ): """A limited, lifted equivalent of ``jax.value_and_grad``. Note that for this convenience function, gradients are only calculated for the function inputs, and not with respect to any module variables. The target function must return a scalar-valued output. For a more general lifted vjp, see ``nn.vjp`` for the lifted vector-Jacobian product. Example:: class LearnScale(nn.Module): def __call__(self, x, y): p = self.param('scale', nn.initializers.zeros_init(), ()) return p * x * y class Foo(nn.Module): def __call__(self, x, y): z, (x_grad, y_grad) = nn.value_and_grad( lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) return z, x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the grad will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a grad function that sums over the batch while ``grad(f, *args)`` will create a per-example grad. variables: variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. Returns: If ``has_aux`` is ``False``, returns a ``primals_out, grads`` pair, where ``primals_out`` is ``fn(*primals)``. ``grads`` are the gradients for the corresponding primals and do not include the gradients for module variables. If ``has_aux`` is ``True``, returns a ``(primals_out, aux), grads`` tuple where ``aux`` is the auxiliary data returned by ``fn``. """ grad_partial = functools.partial( lift_direct_transform, lift.value_and_grad, (fn,), mdl, *primals, has_aux=has_aux, reduce_axes=reduce_axes, variables=variables, rngs=rngs, ) if has_aux: out, aux, argument_grads = grad_partial() if out.shape != (): raise ValueError( 'grad can only work on functions with ' f'scalar-valued outputs. out shape={out.shape}' ) return (out, aux), argument_grads else: out, argument_grads = grad_partial() if out.shape != (): raise ValueError( 'grad can only work on functions with ' f'scalar-valued outputs. out shape={out.shape}' ) return out, argument_grads CollectionFilter = Filter PRNGSequenceFilter = Filter class Module(ModuleBase): """Base class for all neural network modules. Layers and models should subclass this class. All Flax Modules are Python 3.7 `dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since dataclasses take over ``__init__``, you should instead override :meth:`setup`, which is automatically called to initialize the module. Modules can contain submodules, and in this way can be nested in a tree structure. Submodels can be assigned as regular attributes inside the :meth:`setup` method. You can define arbitrary "forward pass" methods on your Module subclass. While no methods are special-cased, ``__call__`` is a popular choice because it allows you to use module instances as if they are functions:: >>> from flax import linen as nn >>> from typing import Tuple >>> class Module(nn.Module): ... features: Tuple[int, ...] = (16, 4) ... def setup(self): ... self.dense1 = nn.Dense(self.features[0]) ... self.dense2 = nn.Dense(self.features[1]) ... def __call__(self, x): ... return self.dense2(nn.relu(self.dense1(x))) Optionally, for more concise module implementations where submodules definitions are co-located with their usage, you can use the :meth:`compact` wrapper. """ if typing.TYPE_CHECKING: name: Optional[str] = module_field(kw_only=True, default=None) parent: Union['Module', _Sentinel, None] = module_field( kw_only=True, default=None ) def __init__(self, *args, **kwargs): # this stub makes sure pytype accepts constructor arguments. pass def __call__(self, *args, **kwargs) -> Any: # this stub allows pytype to accept Modules as Callables. pass def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None: """Automatically initializes all subclasses as custom dataclasses.""" super().__init_subclass__(**kwargs) # All Flax Modules are dataclasses. We force this convention since # it encourages the stateless behavior needed to clone module instances for # functional transformation. Instead of using a python metaclass, we # automatically transform Modules into dataclasses at subclass creation # time, and we set the last dataclass arguments to `parent` and `name`. cls._customized_dataclass_transform(kw_only) # We wrap user-defined methods including setup and __call__ to enforce # a number of different checks and to provide clear error messages. cls._verify_single_or_no_compact() cls._find_compact_name_scope_methods() cls._wrap_module_attributes() # Set empty class defaults. cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined] cls.scope: Optional[Scope] = None # type: ignore # Handles weak referencing of parent Modules to prevent reference cycles. cls._parent_ref = None # type: ignore[attr-defined] cls.parent = ParentDescriptor() # type: ignore[assignment] def _customized_dataclass_transform(cls, kw_only: bool): """Transforms `cls` into a dataclass, with custom additional behavior. 1. Inject `parent` and `name` fields. (If they are already present, then check that they have the expected types.) 2. Set compare, hash, and repr to False for non-init fields. 3. Generate a hash function (if not provided by cls). """ # Check reserved attributes have expected type annotations. annotations = dict(cls.__dict__.get('__annotations__', {})) if annotations.get('parent', _ParentType) != _ParentType: raise errors.ReservedModuleAttributeError(annotations) if annotations.get('name', str) not in ('str', str, Optional[str]): raise errors.ReservedModuleAttributeError(annotations) # any non-init field will only be set in setup # During __hash__ and __eq__ the field is not set yet # so it should not be used in compare, hash or repr. for field in annotations: field_meta = getattr(cls, field, None) if isinstance(field_meta, dataclasses.Field) and not field_meta.init: field_meta.compare = False field_meta.hash = False field_meta.repr = False extra_fields = [ ( 'parent', _ParentType, kw_only_dataclasses.field( repr=False, default=_unspecified_parent, kw_only=True ), ), ( 'name', Optional[str], kw_only_dataclasses.field(default=None, kw_only=True), ), ] if kw_only: if tuple(sys.version_info)[:3] >= (3, 10, 0): for ( name, annotation, # pytype: disable=invalid-annotation default, ) in extra_fields: setattr(cls, name, default) cls.__annotations__[name] = annotation dataclasses.dataclass( # type: ignore[call-overload] unsafe_hash='__hash__' not in cls.__dict__, repr=False, kw_only=True, )(cls) else: raise TypeError('`kw_only` is not available before Py 3.10.') else: # Now apply dataclass transform (which operates in-place). # Do generate a hash function only if not provided by the class. kw_only_dataclasses.dataclass( cls, unsafe_hash='__hash__' not in cls.__dict__, repr=False, extra_fields=extra_fields, ) # pytype: disable=wrong-keyword-args cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign] def _verify_single_or_no_compact(cls): """Statically verifies that at most a single method is labelled compact.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] n_compact_fns = len( [ method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact') ] ) if n_compact_fns > 1: raise errors.MultipleMethodsCompactError() def _find_compact_name_scope_methods(cls): """Finds all compact_name_scope methods in the class.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] compact_name_scope_fns = tuple( method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact_name_scope') ) cls._compact_name_scope_methods = compact_name_scope_fns def _wrap_module_attributes(cls): """Wraps user-defined non-inherited methods and descriptors with state management functions. """ # wrap methods method_exclusions = [f.name for f in dataclasses.fields(cls)] + [ '__eq__', '__repr__', '__init__', '__hash__', '__post_init__', ] for key in _get_local_method_names(cls, exclude=method_exclusions): method = getattr(cls, key) if hasattr(method, 'nowrap'): continue setattr(cls, key, wrap_method_once(method)) # wrap descriptors descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [ 'parent', '__dict__', ] for key in _get_local_descriptor_names(cls, descriptor_exclusions): # don't use getattr here, since it will call the descriptor descriptor = cls.__dict__[key] if hasattr(descriptor, 'nowrap'): continue setattr(cls, key, wrap_descriptor_once(descriptor)) return cls def _call_wrapped_method(self, fun, args, kwargs): """Calls a wrapped method. This function is responsible for setting up the thread local state correctly before calling the method and cleaning up afterwards. This includes storing intermediates, setup of the compact scope, and making sure setup is called before any other method. Args: fun: The wrapped method. args: Named arguments passed to ``fun``. kwargs: Keyword arguments passed to ``fun``. Returns: The results of calling ``fun``. """ is_compact_method = hasattr(fun, 'compact') fun_name = _get_fn_name(fun) is_setup_method = fun_name == 'setup' add_call_info = not is_setup_method and len(_context.call_info_stack) > 0 # We lazily call setup() only when needed. if is_setup_method: if self.scope is None: raise errors.CallSetupUnboundModuleError() is_recurrent = self._state.in_setup self._state.in_setup = True else: self._try_setup() if is_compact_method: if self.scope is None: raise errors.CallCompactUnboundModuleError() is_recurrent = self._state.in_compact_method self._state.in_compact_method = True _context.module_stack.append(self) try: # get call info if add_call_info: assert self.scope is not None call_index = _context.call_info_stack[-1].get_call_index() if _global_interceptor_stack: run_fun = functools.partial(run_interceptors, fun) else: run_fun = fun # call method if _use_named_call: with jax.named_scope(_derive_profiling_name(self, fun)): y = run_fun(self, *args, **kwargs) else: y = run_fun(self, *args, **kwargs) if _context.capture_stack: filter_fn = _context.capture_stack[-1] if filter_fn and filter_fn(self, fun_name): self.sow('intermediates', fun_name, y) if add_call_info: _args, _kwargs, _y = flax.linen.summary._represent_tree( (args, kwargs, y) ) _context.call_info_stack[-1].calls.append( _CallInfo( call_index, self.path, self.clone(), self.scope.rngs, self.scope.mutable, fun.__name__, _args, _kwargs, _y, ) ) return y finally: _context.module_stack.pop() if is_compact_method: object.__setattr__(self, 'scope', self.scope.rewound()) # setup or compact calls can be recurrent for example due to super calls # resetting the state would cause is compact/setup method # to be set to False prematurely. if (is_compact_method or is_setup_method) and not is_recurrent: self._state.reset() def __setattr__(self, name: str, val: Any): """Sets an attribute on this Module. We overload setattr solely to support pythonic naming via assignment of submodules in the special :meth:`setup` function:: self.submodule_name = MyModule(...) We also support lists and other general pytrees, e.g.:: self.submodules = [MyModule0(..), MyModule1(..), ...] Args: name: Attribute to set. val: Value of the attribute. """ fields = self.__dataclass_fields__ # pytype: disable=attribute-error is_dataclass_attr = name in fields and fields[name].init if not self._state.in_setup: if not self._state.is_initialized: # Setting attributes before end of Module.__post_init__() object.__setattr__(self, name, val) return else: # We're past all initialization and setup logic: # Raises a TypeError just like frozen python dataclasses. raise errors.SetAttributeFrozenModuleError( self.__class__.__name__, name, val ) # We're inside the setup() method: if is_dataclass_attr: # These names are specified as dataclass fields. They should not be # initialized within the setup() method, but can be modified freely # before it. raise errors.SetAttributeInModuleSetupError() # Values (that may be variables or submodules) are being defined and # attached in setup(), we run some extra logic in that case. self._register_submodules(name, val) def __getattr__(self, name: str) -> Any: """Call setup() before getting any setup-defined attributes.""" # We don't want to return anything for python copy / pickle methods. if name in _UNDEFINED_COPY_PICKLE_METHODS: raise AttributeError() self._try_setup() if name in self.__dict__: return self.__dict__[name] else: msg = f'"{self.__class__.__name__}" object has no attribute "{name}".' if self.scope is None: msg += ( f' If "{name}" is defined in \'.setup()\', remember these fields ' "are only accessible from inside 'init' or 'apply'." ) raise AttributeError(msg) def __dir__(self) -> List[str]: """Call setup() before listing attributes.""" self._try_setup() return object.__dir__(self) # type: ignore def __post_init__(self) -> None: # DO NOT REMOVE - Marker for internal logging. # In dataclasses, __init__ is overridden to process dataclass arguments, # and __post_init__ is called immediately afterwards. Here, depending on the # type of `parent` passed to initialize the Module, we either defer # initialization, attach this Module as a submodule of a parent, or bind # this Module at the top-level to variables and rngs. object.__setattr__(self, '_id', uuid()) object.__setattr__(self, '_state', _ModuleInternalState()) # Typically we set the parent based on the dynamic module context. if self.parent is _unspecified_parent: # pytype: disable=attribute-error object.__setattr__(self, 'parent', _context.module_stack[-1]) # Initialization is deferred for top level Modules or any other "orphan" # Modules until attachment by __setattr__ i.e. MyModule(..., parent=None) if self.parent is None: return # Register submodule on parent Module. if isinstance(self.parent, Module): # When initializing an unnamed Module inside setup() # initialization is deferred until attachment by __setattr__ # i.e. self.mymodule = MyModule(...) self.name: Optional[str] if ( self.parent._state.in_setup and self.name is None ): # pytype: disable=attribute-error return if not self.parent._initialization_allowed: raise errors.AssignSubModuleError(self.__class__.__name__) # Autonaming of submodules. if self.name is None: # pytype: disable=attribute-error prefix = f'{self.__class__.__name__}' cursor = self.parent._state.autoname_cursor.get(prefix, 0) self.name = f'{prefix}_{cursor}' self.parent._state.autoname_cursor[prefix] = cursor + 1 # Allow scope aliasing under transforms for submodules defined in setup. reuse_scopes = ( self.parent._state.in_setup and self.parent._state.setup_called == SetupState.TRANSFORMED ) # Perform name-collision check. if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes): parent_class = self.parent.__class__.__name__ raise errors.NameInUseError('submodule', self.name, parent_class) # Finalize attachment to parent and scope initialization. self.parent._state.children[self.name] = self assert self.parent.scope is not None object.__setattr__( self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes) ) # Top-level invocation with a functional Scope. elif isinstance(self.parent, Scope): object.__setattr__(self, 'scope', self.parent) else: raise ValueError('parent must be None, Module or Scope') # eagerly bind submodules if scope is available if self.scope is not None: for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) self._state.is_initialized = True def __repr__(self) -> str: return _module_repr(self) def setup(self) -> None: """Initializes a Module lazily (similar to a lazy ``__init__``). ``setup`` is called once lazily on a module instance when a module is bound, immediately before any other methods like ``__call__`` are invoked, or before a ``setup``-defined attribute on ``self`` is accessed. This can happen in three cases: 1. Immediately when invoking :meth:`apply`, :meth:`init` or :meth:`init_and_output`. 2. Once the module is given a name by being assigned to an attribute of another module inside the other module's ``setup`` method (see :meth:`__setattr__`):: >>> class MyModule(nn.Module): ... def setup(self): ... submodule = nn.Conv(...) ... # Accessing `submodule` attributes does not yet work here. ... # The following line invokes `self.__setattr__`, which gives ... # `submodule` the name "conv1". ... self.conv1 = submodule ... # Accessing `submodule` attributes or methods is now safe and ... # either causes setup() to be called once. 3. Once a module is constructed inside a method wrapped with :meth:`compact`, immediately before another method is called or ``setup`` defined attribute is accessed. """ pass def _register_submodules(self, name, val): """Registers a submodule.""" assert self.scope, 'Trying to register submodules on unbound scope.' root = self.scope.root cache = _caches.get(root, weakref.WeakValueDictionary()) _caches[root] = cache queue = [] preserve_adopted_names = config.flax_preserve_adopted_names if hasattr(type(self), 'preserve_adopted_names'): preserve_adopted_names = type(self).preserve_adopted_names def adopt_attr_modules(cache, queue, suffix, subvalue): if isinstance(subvalue, Module): current_name = subvalue.name adopted_name = None if subvalue.parent is None: # Preserve sharing-by-reference relationships during adoption # via cache keyed on unique instance ids. key = subvalue._id # Module was passed from outside. It needs to be cloned. # Outside modules are named by attachment, not an outer name, # UNLESS we're using new adopted name policy, in which case an existing # name will be used, as is often supplied by config systems. if preserve_adopted_names: adopted_name = object.__getattribute__(subvalue, 'name') if key in cache: subvalue = cache[key] else: subvalue = subvalue.clone(name=None) cache[key] = subvalue if subvalue.name is None: object.__setattr__(subvalue, 'parent', self) if adopted_name is None: adopted_name = ( f'{name}{suffix}' if not isinstance(subvalue, CompactNameScope) else current_name ) object.__setattr__(subvalue, 'name', adopted_name) queue.append(subvalue) return subvalue val = _freeze_attr( _map_over_modules_in_tree( functools.partial(adopt_attr_modules, cache, queue), val ) ) object.__setattr__(self, name, val) for x in queue: x.__post_init__() def _try_setup(self, shallow: bool = False) -> None: """Tries to setup module if scope is available and setup has not been called yet.""" if ( self.scope and not self._state.in_setup and self._state.setup_called != SetupState.DONE ): try: self._state.in_setup = True # A shallow setup will only register attribute submodules but it does # not call the user's setup. This avoids running before a # transformation. for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) if not shallow: self.setup() # create NonTransparent Modules self._compact_name_scope_modules = { name: CompactNameScope( getattr(type(self), name).inner_fun, lambda: self, name=name ) for name in self._compact_name_scope_methods } # We run static checks abstractly once for setup before any transforms # to detect name collisions and other python errors. elif self._state.setup_called == SetupState.NEW: self._validate_setup() finally: self._state.in_setup = False if not shallow: self._state.setup_called = SetupState.DONE def _validate_setup(self) -> None: """Abstractly evaluates setup only to run static checks.""" def run_setup_only(x): wrapped_id = wrap_method_once(lambda m, x: x) with TestScope({}, rngs={}, mutable=True).temporary() as root: return wrapped_id(self.clone(parent=root), x) _ = jax.eval_shape(run_setup_only, 0) def _name_taken( self, name: str, reuse_scopes: bool = False, collection: Optional[str] = None, ) -> bool: assert self.scope is not None if reuse_scopes: return False return self.scope.name_reserved(name, collection) def _initialization_allowed(self): return ( not self._state.is_initialized # allow eager attachment in post-init or self._state.in_setup or self._state.in_compact_method ) def path(self): if self.scope is None: raise ValueError("Can't access module paths on unbound modules.") return self.scope.path def clone( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = None, _deep_clone: Union[bool, weakref.WeakValueDictionary] = False, _reset_names: bool = False, **updates, ) -> M: """Creates a clone of this Module, with optionally updated arguments. NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used primarily for internal routines, and ``copy`` offers simpler arguments and better defaults. Args: parent: The parent of the clone. The clone will have no parent if no explicit parent is specified. _deep_clone: A boolean or a weak value dictionary to control deep cloning of submodules. If True, submodules will be cloned recursively. If a weak value dictionary is passed, it will be used to cache cloned submodules. This flag is used by init/apply/bind to avoid scope leakage. _reset_names: If True, ``name=None`` is also passed to submodules when cloning. Resetting names in submodules is necessary when calling ``.unbind``. **updates: Attribute updates. Returns: A clone of the this Module with the updated attributes and parent. """ attrs = { f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init } attrs.update(parent=parent, **updates) # Here we implement deep cloning of submodules, this is necessary to avoid scope leakage # from external submodules into init/apply/bind while preserving sharing-by-reference # relationships between submodules. if _deep_clone != False: # We use a weak value dictionary to cache cloned submodules. When a shared # submodule is cloned, its only cloned once else its fetched from the cache. cache = ( weakref.WeakValueDictionary() if isinstance(_deep_clone, bool) else _deep_clone ) def clone_fn(m: Module) -> Module: if hasattr(m, '_id'): key = m._id if key in cache: return cache[key] else: if _reset_names: clone = m.clone( _deep_clone=cache, _reset_names=_reset_names, name=None ) else: clone = m.clone(_deep_clone=cache) cache[key] = clone return clone else: # If the module doesn't have an _id attribute it could be a mock object # so we return it as is. return m # _map_submodules will map over all submodules inside attrs # value here can be any pytree, non-module values are ignored for field_name, value in attrs.items(): if field_name == 'parent': continue attrs[field_name] = _map_submodules(clone_fn, value) module = self.__class__(**attrs) return module def copy( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent, name: Optional[str] = None, **updates, ) -> M: """Creates a copy of this Module, with optionally updated arguments. Args: parent: The parent of the copy. By default the current module is taken as parent if not explicitly specified. name: A new name for the copied Module, by default a new automatic name will be given. **updates: Attribute updates. Returns: A copy of the this Module with the updated name, parent, and attributes. """ return self.clone( parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates ) def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[True], **init_kwargs, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[False], **init_kwargs, ) -> Variable[meta.AxisMetadata[T]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: """Declares and returns a variable in this Module. See :mod:`flax.core.variables` for more information. See also :meth:`param` for a shorthand way to define read-only variables in the "params" collection. Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be passed on explicitly:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... key = self.make_rng('stats') ... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape) ... ... ... return x * mean.value >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats`` has to be provided explicitly when calling :meth:`init` and :meth:`apply`. Args: col: The variable collection name. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this variable is used in this module. If None, the variable must already be initialized otherwise an error is raised. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn Returns: A :class:`flax.core.variables.Variable` that can be read or set via ".value" attribute. Throws an error if the variable exists already. """ if not self._initialization_allowed: raise ValueError( 'Variables must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection=col): raise errors.NameInUseError('variable', name, self.__class__.__name__) assert self.scope is not None v = self.scope.variable( col, name, init_fn, *init_args, unbox=unbox, **init_kwargs ) self._state.children[name] = col return v def param( self, name: str, init_fn: Callable[..., T], *init_args, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[True], **init_kwargs, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[False], **init_kwargs, ) -> meta.AxisMetadata[T]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool = True, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: """Declares and returns a parameter in this Module. Parameters are read-only variables in the collection named "params". See :mod:`flax.core.variables` for more details on variables. The first argument of ``init_fn`` is assumed to be a PRNG key, which is provided automatically and does not have to be passed using ``init_args`` or ``init_kwargs``:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape) ... ... ... return x * mean >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, but only ``shape`` has to be provided explicitly; ``key`` is set automatically using the PRNG for ``params`` that is passed when initializing the module using :meth:`init`. Args: name: The parameter name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn. Returns: The value of the initialized parameter. Throws an error if the parameter exists already. """ if not self._initialization_allowed: raise ValueError( 'Parameters must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection='params'): raise errors.NameInUseError('param', name, self.__class__.__name__) assert self.scope is not None v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs) self._state.children[name] = 'params' return v def has_variable(self, col: str, name: str) -> bool: """Checks if a variable of given collection and name exists in this Module. See :mod:`flax.core.variables` for more explanation on variables and collections. Args: col: The variable collection name. name: The name of the variable. Returns: True if the variable exists. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.has_variable(col, name) def is_mutable_collection(self, col: str) -> bool: """Returns true if the collection ``col`` is mutable.""" if self.scope is None: raise ValueError("Can't check mutability on unbound modules") return self.scope.is_mutable_collection(col) def has_rng(self, name: str) -> bool: """Returns true if a PRNGSequence with name ``name`` exists.""" if self.scope is None: raise ValueError("Can't query for RNGs on unbound modules") return self.scope.has_rng(name) def make_rng(self, name: str = 'params') -> PRNGKey: """Returns a new RNG key from a given RNG sequence for this Module. The new RNG key is split from the previous one. Thus, every call to ``make_rng`` returns a new RNG key, while still guaranteeing full reproducibility. .. note:: If an invalid name is passed (i.e. no RNG key was passed by the user in ``.init`` or ``.apply`` for this name), then ``name`` will default to ``'params'``. Example:: >>> import jax >>> import flax.linen as nn >>> class ParamsModule(nn.Module): ... def __call__(self): ... return self.make_rng('params') >>> class OtherModule(nn.Module): ... def __call__(self): ... return self.make_rng('other') >>> key = jax.random.key(0) >>> params_out, _ = ParamsModule().init_with_output({'params': key}) >>> # self.make_rng('other') will default to using the 'params' RNG stream >>> other_out, _ = OtherModule().init_with_output({'params': key}) >>> assert params_out == other_out Learn more about RNG's by reading the Flax RNG guide: https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html Args: name: The RNG sequence name. Returns: The newly generated RNG key. """ if self.scope is None: raise ValueError("Can't use RNGs on unbound modules") return self.scope.make_rng(name) def is_initializing(self) -> bool: """Returns True if running under self.init(...) or nn.init(...)(). This is a helper method to handle the common case of simple initialization where we wish to have setup logic occur when only called under ``module.init`` or ``nn.init``. For more complicated multi-phase initialization scenarios it is better to test for the mutability of particular variable collections or for the presence of particular variables that potentially need to be initialized. """ if self.scope is None: raise ValueError("Can't check if running under init() on unbound modules") return self.scope.get_flag('initializing', False) def _module_checks(self): """Run standard runtime checks.""" if not isinstance(self, Module): raise errors.InvalidInstanceModuleError() overridden_post_init = self.__post_init__ != Module.__post_init__ if overridden_post_init and not hasattr(self, '_id'): raise errors.IncorrectPostInitOverrideError() def bind( self: M, variables: VariableDict, *args, rngs: Optional[RNGSequences] = None, mutable: CollectionFilter = False, ) -> M: """Creates an interactive Module instance by binding variables and RNGs. ``bind`` provides an "interactive" instance of a Module directly without transforming a function with ``apply``. This is particularly useful for debugging and interactive use cases like notebooks where a function would limit the ability to split up code into different cells. Once the variables (and optionally RNGs) are bound to a ``Module`` it becomes a stateful object. Note that idiomatic JAX is functional and therefore an interactive instance does not mix well with vanilla JAX APIs. ``bind()`` should only be used for interactive experimentation, and in all other cases we strongly encourage users to use ``apply()`` instead. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = nn.Dense(3) ... self.decoder = nn.Dense(5) ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> x = jnp.ones((16, 9)) >>> ae = AutoEncoder() >>> variables = ae.init(jax.random.key(0), x) >>> model = ae.bind(variables) >>> z = model.encoder(x) >>> x_reconstructed = model.decoder(z) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments (not used). rngs: a dict of PRNGKeys to initialize the PRNG sequences. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. Returns: A copy of this instance with bound variables and RNGs. """ Module._module_checks(self) del args scope = core.bind(variables, rngs=rngs, mutable=mutable) return self.clone(parent=scope, _deep_clone=True) def unbind(self: M) -> Tuple[M, VariableDict]: """Returns an unbound copy of a Module and its variables. ``unbind`` helps create a stateless version of a bound Module. An example of a common use case: to extract a sub-Module defined inside ``setup()`` and its corresponding variables: 1) temporarily ``bind`` the parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that ``setup()`` is only called when the Module is bound.):: >>> class Encoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(256)(x) >>> class Decoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(784)(x) >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = Encoder() ... self.decoder = Decoder() ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> module = AutoEncoder() >>> variables = module.init(jax.random.key(0), jnp.ones((1, 784))) >>> # Extract the Encoder sub-Module and its variables >>> encoder, encoder_vars = module.bind(variables).encoder.unbind() Returns: A tuple with an unbound copy of this Module and its variables. """ Module._module_checks(self) if self.scope is None: raise errors.CallUnbindOnUnboundModuleError() variables = self.variables module = self.clone(_deep_clone=True, _reset_names=True, name=None) return module, variables def apply( self, variables: VariableDict, *args, rngs: Optional[Union[PRNGKey, RNGSequences]] = None, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = False, capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]: """Applies a module method to variables and returns output and modified variables. Note that ``method`` should be set if one would like to call ``apply`` on a different class method than ``__call__``. For instance, suppose a Transformer modules has a method called ``encode``, then the following calls ``apply`` on that method:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Transformer(nn.Module): ... def encode(self, x): ... ... >>> x = jnp.ones((16, 9)) >>> model = Transformer() >>> variables = model.init(jax.random.key(0), x, method=Transformer.encode) >>> encoded = model.apply(variables, x, method=Transformer.encode) If a function instance is provided, the unbound function is used. For instance, the example below is equivalent to the one above:: >>> encoded = model.apply(variables, x, method=model.encode) You can also pass a string to a callable attribute of the module. For example, the previous can be written as:: >>> encoded = model.apply(variables, x, method='encode') Note ``method`` can also be a function that is not defined in ``Transformer``. In that case, the function should have at least one argument representing an instance of the Module class:: >>> def other_fn(instance, x): ... # instance.some_module_attr(...) ... instance.encode ... ... >>> model.apply(variables, x, method=other_fn) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, add_noise=False): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... if add_noise: ... # Add gaussian noise ... noise_key = self.make_rng('noise') ... x = x + jax.random.normal(noise_key, x.shape) ... ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)} >>> variables = module.init(rngs, x) >>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> rngs['noise'] = jax.random.key(0) >>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # different output (key(1) vs key(0)) >>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1) >>> del rngs['noise'] >>> # self.make_rng('noise') will default to using the 'params' RNG stream >>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # same output (key(0)) >>> np.testing.assert_allclose(out1, out2) >>> # passing in a single key is equivalent to passing in {'params': key} >>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0)) >>> # same output (key(0)) >>> np.testing.assert_allclose(out2, out3) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments passed to the specified apply method. rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params" PRNG sequence is used to initialize parameters. method: A function to call apply on. This is generally a function in the module. If provided, applies this method. If not provided, applies the ``__call__`` method of the module. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default, only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the specified apply method. Returns: If ``mutable`` is False, returns output. If any collections are mutable, returns ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if rngs is not None and not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) # if the `method` string is a submodule, we create a lambda function # that calls the submodule, forwarding all arguments. if isinstance(method, Module): method = lambda self, *args, **kwargs: getattr(self, attribute_name)( *args, **kwargs ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return apply( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(variables, *args, **kwargs, rngs=rngs) def init_with_output( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]: """Initializes a module method with variables and returns output and modified variables. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return init_with_output( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(rngs, *args, **kwargs) def init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[FrozenVariableDict, Dict[str, Any]]: """Initializes a module method with variables and returns modified variables. ``init`` takes as first argument either a single ``PRNGKey``, or a dictionary mapping variable collections names to their ``PRNGKeys``, and will call ``method`` (which is the module's ``__call__`` function by default) passing ``*args`` and ``**kwargs``, and returns a dictionary of initialized variables. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, train): ... x = nn.Dense(16)(x) ... x = nn.BatchNorm(use_running_average=not train)(x) ... x = nn.relu(x) ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> key = jax.random.key(0) >>> variables = module.init(key, x, train=False) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... other_variable = self.variable( ... 'other_collection', ... 'other_variable', ... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape), ... x, ... ) ... x = x + other_variable.value ... ... return nn.Dense(1)(x) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)} >>> variables0 = module.init(rngs, x) >>> rngs['other_rng'] = jax.random.key(0) >>> variables1 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables0['params'], variables1['params'] ... ) >>> # different other_variable (key(1) vs key(0)) >>> np.testing.assert_raises( ... AssertionError, ... np.testing.assert_allclose, ... variables0['other_collection']['other_variable'], ... variables1['other_collection']['other_variable'], ... ) >>> del rngs['other_rng'] >>> # self.make_rng('other_rng') will default to using the 'params' RNG stream >>> variables2 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables1['params'], variables2['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables1['other_collection']['other_variable'], ... variables2['other_collection']['other_variable'], ... ) >>> # passing in a single key is equivalent to passing in {'params': key} >>> variables3 = module.init(jax.random.key(0), x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables2['params'], variables3['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables2['other_collection']['other_variable'], ... variables3['other_collection']['other_variable'], ... ) Jitting ``init`` initializes a model lazily using only the shapes of the provided arguments, and avoids computing the forward pass with actual values. Example:: >>> module = nn.Dense(1) >>> init_jit = jax.jit(module.init) >>> variables = init_jit(jax.random.key(0), x) ``init`` is a light wrapper over ``apply``, so other ``apply`` arguments like ``method``, ``mutable``, and ``capture_intermediates`` are also available. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) _, v_out = self.init_with_output( rngs, *args, method=method, mutable=mutable, capture_intermediates=capture_intermediates, **kwargs, ) return v_out def lazy_init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Optional[Callable[..., Any]] = None, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> FrozenVariableDict: """Initializes a module without computing on an actual input. lazy_init will initialize the variables without doing unnecessary compute. The input data should be passed as a ``jax.ShapeDtypeStruct`` which specifies the shape and dtype of the input but no concrete data. Example:: >>> model = nn.Dense(features=256) >>> variables = model.lazy_init( ... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) The args and kwargs args passed to ``lazy_init`` can be a mix of concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct) values. Concrete values are only necessary for arguments that affect the initialization of variables. For example, the model might expect a keyword arg that enables/disables a subpart of the model. In this case, an explicit value (True/Flase) should be passed otherwise ``lazy_init`` cannot infer which variables should be initialized. Args: rngs: The rngs for the variable collections. *args: arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) def lazy_wrapper(rngs, *args, **kwargs): return self.init(rngs, *args, method=method, mutable=mutable, **kwargs) return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs) def variables(self) -> VariableDict: """Returns the variables in this module.""" if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.variables() def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T: """Retrieves the value of a Variable. Args: col: the variable collection. name: the name of the variable. default: the default value to return if the variable does not exist in this scope. Returns: The value of the input variable, of the default value if the variable doesn't exist in this scope. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.get_variable(col, name, default) def put_variable(self, col: str, name: str, value: Any): """Updates the value of the given variable if it is mutable, or an error otherwise. Args: col: the variable collection. name: the name of the variable. value: the new value of the variable. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") self.scope.put_variable(col, name, value) def sow(self, col: str, name: str, value: Any) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: """Stores a value in a collection. Collections can be used to collect intermediate values without the overhead of explicitly passing a container through each Module call. If the target collection is not mutable ``sow`` behaves like a no-op and returns ``False``. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... self.sow('intermediates', 'h', h) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply(variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)} By default the values are stored in a tuple and each stored value is appended at the end. This way all intermediates can be tracked when the same module is called multiple times. Alternatively, a custom init/reduce function can be passed:: >>> class Foo2(nn.Module): ... @nn.compact ... def __call__(self, x): ... init_fn = lambda: 0 ... reduce_fn = lambda a, b: a + b ... self.sow('intermediates', 'h', x, ... init_fn=init_fn, reduce_fn=reduce_fn) ... self.sow('intermediates', 'h', x * 2, ... init_fn=init_fn, reduce_fn=reduce_fn) ... return x >>> x = jnp.ones((1, 1)) >>> model = Foo2() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply( ... variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': Array([[3.]], dtype=float32)} Args: col: The name of the variable collection. name: The name of the variable. value: The value of the variable. reduce_fn: The function used to combine the existing value with the new value. The default is to append the value to a tuple. init_fn: For the first value stored, ``reduce_fn`` will be passed the result of ``init_fn`` together with the value to be stored. The default is an empty tuple. Returns: ``True`` if the value has been stored successfully, ``False`` otherwise. """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if not self.scope.is_mutable_collection(col): return False if self.scope.has_variable(col, name): xs = self.scope.get_variable(col, name) else: self.scope.reserve(name, col) self._state.children[name] = col xs = init_fn() xs = reduce_fn(xs, value) self.scope.put_variable(col, name, xs) return True def perturb( self, name: str, value: T, collection: str = 'perturbations' ) -> T: """Add an zero-value variable ('perturbation') to the intermediate value. The gradient of ``value`` would be the same as the gradient of this perturbation variable. Therefore, if you define your loss function with both params and perturbations as standalone arguments, you can get the intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation argument. .. note:: This is an experimental API and may be tweaked later for better performance and usability. At its current stage, it creates extra dummy variables that occupies extra memory space. Use it only to debug gradients in training. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(3)(x) ... x = self.perturb('dense3', x) ... return nn.Dense(2)(x) >>> def loss(variables, inputs, targets): ... preds = model.apply(variables, inputs) ... return jnp.square(preds - targets).mean() >>> x = jnp.ones((2, 9)) >>> y = jnp.ones((2, 2)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y) >>> print(intm_grads['perturbations']['dense3']) [[-1.456924 -0.44332537 0.02422847] [-1.456924 -0.44332537 0.02422847]] If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op so you can easily disable the behavior when not needed:: >>> model.apply(variables, x) # works as expected Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> model.apply({'params': variables['params']}, x) # behaves like a no-op Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y) >>> 'perturbations' not in intm_grads True """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if self.is_mutable_collection(collection): if not self.scope.has_variable(collection, name): self.scope.reserve(name, collection) self._state.children[name] = collection self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore if collection in self.scope.root._variables: if self.scope.has_variable(collection, name): value += self.scope.get_variable(collection, name) # type: ignore else: raise ValueError(f"Perturbation collection {collection} present, but " f"missing perturbation variable {name}") return value def tabulate( self, rngs: Union[PRNGKey, RNGSequences], *args, depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> str: """Creates a summary of the Module represented as a table. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns the string summarizing the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the ``console_kwargs`` argument, for example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> # print(Foo().tabulate( >>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in ``variables`` which are sorted alphabetically. **Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable. Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. depth: controls how many submodule deep the summary can go. By default, its ``None`` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.console.Console`` when rendering the table. Default arguments are ``{'force_terminal': True, 'force_jupyter': False}``. table_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table`` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table.add_column`` when adding columns to the table. compute_flops: whether to include a ``flops`` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a ``vjp_flops`` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of ``compute_flops``. **kwargs: keyword arguments to pass to the forward computation. Returns: A string summarizing the Module. """ from flax.linen import summary tabulate_fn = summary.tabulate( self, rngs, depth=depth, show_repeated=show_repeated, mutable=mutable, console_kwargs=console_kwargs, table_kwargs=table_kwargs, column_kwargs=column_kwargs, compute_flops=compute_flops, compute_vjp_flops=compute_vjp_flops, ) return tabulate_fn(*args, **kwargs) def module_paths( self, rngs: Union[PRNGKey, RNGSequences], *args, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> dict[str, 'Module']: """Returns a dictionary mapping module paths to module instances. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns a dictionary mapping module paths to unbounded copies of module instances that were used at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> modules = Foo().module_paths(jax.random.key(0), x) >>> print({ ... p: type(m).__name__ for p, m in modules.items() ... }) {'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'} Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. **kwargs: keyword arguments to pass to the forward computation. Returns: A dict`ionary mapping module paths to module instances. """ from flax.linen import summary table = summary._get_module_table( module=self, depth=None, show_repeated=show_repeated, compute_flops=False, compute_vjp_flops=False, )(rngs, *args, **kwargs, mutable=mutable) return {'/'.join(row.path): row.module_copy for row in table} The provided code snippet includes necessary dependencies for implementing the `grad` function. Write a Python function `def grad( fn: Callable[..., Any], mdl: Module, *primals, has_aux: bool = False, reduce_axes=(), variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, )` to solve the following problem: A limited, lifted equivalent of ``jax.grad``. Note that for this convenience function, gradients are only calculated for the function inputs, and not with respect to any module variables. The target function must return a scalar-valued output. For a more general lifted vjp, see ``nn.vjp`` for the lifted vector-Jacobian product. Example:: class LearnScale(nn.Module): @nn.compact def __call__(self, x, y): p = self.param('scale', nn.initializers.zeros_init(), ()) return p * x * y class Foo(nn.Module): @nn.compact def __call__(self, x, y): x_grad, y_grad = nn.grad( lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) return x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the grad will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a grad function that sums over the batch while ``grad(f, *args)`` will create a per-example grad. variables: variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. Returns: If ``has_aux`` is ``False``, returns ``grads``, where ``grads`` are the gradients for the corresponding primals and do not include the gradients for module variables. If ``has_aux`` is ``True``, returns a ``(grads, aux)`` tuple where ``aux`` is the auxiliary data returned by ``fn``. Here is the function: def grad( fn: Callable[..., Any], mdl: Module, *primals, has_aux: bool = False, reduce_axes=(), variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, ): """A limited, lifted equivalent of ``jax.grad``. Note that for this convenience function, gradients are only calculated for the function inputs, and not with respect to any module variables. The target function must return a scalar-valued output. For a more general lifted vjp, see ``nn.vjp`` for the lifted vector-Jacobian product. Example:: class LearnScale(nn.Module): @nn.compact def __call__(self, x, y): p = self.param('scale', nn.initializers.zeros_init(), ()) return p * x * y class Foo(nn.Module): @nn.compact def __call__(self, x, y): x_grad, y_grad = nn.grad( lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) return x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the grad will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a grad function that sums over the batch while ``grad(f, *args)`` will create a per-example grad. variables: variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. Returns: If ``has_aux`` is ``False``, returns ``grads``, where ``grads`` are the gradients for the corresponding primals and do not include the gradients for module variables. If ``has_aux`` is ``True``, returns a ``(grads, aux)`` tuple where ``aux`` is the auxiliary data returned by ``fn``. """ value_and_grad_partial = functools.partial( value_and_grad, fn, mdl, *primals, has_aux=has_aux, reduce_axes=reduce_axes, variables=variables, rngs=rngs, ) if has_aux: (_, aux), argument_grads = value_and_grad_partial() return argument_grads, aux else: _, argument_grads = value_and_grad_partial() return argument_grads
A limited, lifted equivalent of ``jax.grad``. Note that for this convenience function, gradients are only calculated for the function inputs, and not with respect to any module variables. The target function must return a scalar-valued output. For a more general lifted vjp, see ``nn.vjp`` for the lifted vector-Jacobian product. Example:: class LearnScale(nn.Module): @nn.compact def __call__(self, x, y): p = self.param('scale', nn.initializers.zeros_init(), ()) return p * x * y class Foo(nn.Module): @nn.compact def __call__(self, x, y): x_grad, y_grad = nn.grad( lambda mdl, x, y: mdl(x, y), LearnScale(), x, y) return x_grad, y_grad Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. *primals: A sequence of primal values at which the Jacobian of ``fn`` should be evaluated. The length of ``primals`` should be equal to the number of positional parameters to ``fn``. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. has_aux: Optional, bool. Indicates whether ``fn`` returns a pair where the first element is considered the output of the mathematical function to be differentiated and the second element is auxiliary data. Default ``False``. reduce_axes: Optional, tuple of axis names. If an axis is listed here, and ``fn`` implicitly broadcasts a value over that axis, the backward pass will perform a ``psum`` of the corresponding gradient. Otherwise, the grad will be per-example over named axes. For example, if ``'batch'`` is a named batch axis, ``vjp(f, *args, reduce_axes=('batch',))`` will create a grad function that sums over the batch while ``grad(f, *args)`` will create a per-example grad. variables: variables collections that are available inside ``fn`` but do not receive a cotangent. rngs: the prngs that are available inside ``fn``. Returns: If ``has_aux`` is ``False``, returns ``grads``, where ``grads`` are the gradients for the corresponding primals and do not include the gradients for module variables. If ``has_aux`` is ``True``, returns a ``(grads, aux)`` tuple where ``aux`` is the auxiliary data returned by ``fn``.
22,614
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax def lift_direct_transform( transform: Callable[..., Any], targets: Tuple[Callable[..., Any], ...], mdl: Module, *args, multi_scope=True, **kwargs, ): """Lift direct transform.""" # TODO(marcvanzee): Improve docstrings (#1977). for target in targets: if _is_module_class(target): raise ValueError( f'The {transform.__name__} transform can only be applied on a Module' ' method. That is function that takes a Module instance as its first' ' arg.' ) elif not callable(target): raise ValueError('transform target must be callable') # normalize self.foo bound methods to class.foo unbound methods. targets = tuple(_get_unbound_fn(target) for target in targets) aug_transform = lambda *fns: functools.partial(transform, *fns) return decorator_lift_transform( aug_transform, targets, multi_scope=multi_scope )(mdl, *args, **kwargs) CollectionFilter = Filter PRNGSequenceFilter = Filter class Module(ModuleBase): """Base class for all neural network modules. Layers and models should subclass this class. All Flax Modules are Python 3.7 `dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since dataclasses take over ``__init__``, you should instead override :meth:`setup`, which is automatically called to initialize the module. Modules can contain submodules, and in this way can be nested in a tree structure. Submodels can be assigned as regular attributes inside the :meth:`setup` method. You can define arbitrary "forward pass" methods on your Module subclass. While no methods are special-cased, ``__call__`` is a popular choice because it allows you to use module instances as if they are functions:: >>> from flax import linen as nn >>> from typing import Tuple >>> class Module(nn.Module): ... features: Tuple[int, ...] = (16, 4) ... def setup(self): ... self.dense1 = nn.Dense(self.features[0]) ... self.dense2 = nn.Dense(self.features[1]) ... def __call__(self, x): ... return self.dense2(nn.relu(self.dense1(x))) Optionally, for more concise module implementations where submodules definitions are co-located with their usage, you can use the :meth:`compact` wrapper. """ if typing.TYPE_CHECKING: name: Optional[str] = module_field(kw_only=True, default=None) parent: Union['Module', _Sentinel, None] = module_field( kw_only=True, default=None ) def __init__(self, *args, **kwargs): # this stub makes sure pytype accepts constructor arguments. pass def __call__(self, *args, **kwargs) -> Any: # this stub allows pytype to accept Modules as Callables. pass def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None: """Automatically initializes all subclasses as custom dataclasses.""" super().__init_subclass__(**kwargs) # All Flax Modules are dataclasses. We force this convention since # it encourages the stateless behavior needed to clone module instances for # functional transformation. Instead of using a python metaclass, we # automatically transform Modules into dataclasses at subclass creation # time, and we set the last dataclass arguments to `parent` and `name`. cls._customized_dataclass_transform(kw_only) # We wrap user-defined methods including setup and __call__ to enforce # a number of different checks and to provide clear error messages. cls._verify_single_or_no_compact() cls._find_compact_name_scope_methods() cls._wrap_module_attributes() # Set empty class defaults. cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined] cls.scope: Optional[Scope] = None # type: ignore # Handles weak referencing of parent Modules to prevent reference cycles. cls._parent_ref = None # type: ignore[attr-defined] cls.parent = ParentDescriptor() # type: ignore[assignment] def _customized_dataclass_transform(cls, kw_only: bool): """Transforms `cls` into a dataclass, with custom additional behavior. 1. Inject `parent` and `name` fields. (If they are already present, then check that they have the expected types.) 2. Set compare, hash, and repr to False for non-init fields. 3. Generate a hash function (if not provided by cls). """ # Check reserved attributes have expected type annotations. annotations = dict(cls.__dict__.get('__annotations__', {})) if annotations.get('parent', _ParentType) != _ParentType: raise errors.ReservedModuleAttributeError(annotations) if annotations.get('name', str) not in ('str', str, Optional[str]): raise errors.ReservedModuleAttributeError(annotations) # any non-init field will only be set in setup # During __hash__ and __eq__ the field is not set yet # so it should not be used in compare, hash or repr. for field in annotations: field_meta = getattr(cls, field, None) if isinstance(field_meta, dataclasses.Field) and not field_meta.init: field_meta.compare = False field_meta.hash = False field_meta.repr = False extra_fields = [ ( 'parent', _ParentType, kw_only_dataclasses.field( repr=False, default=_unspecified_parent, kw_only=True ), ), ( 'name', Optional[str], kw_only_dataclasses.field(default=None, kw_only=True), ), ] if kw_only: if tuple(sys.version_info)[:3] >= (3, 10, 0): for ( name, annotation, # pytype: disable=invalid-annotation default, ) in extra_fields: setattr(cls, name, default) cls.__annotations__[name] = annotation dataclasses.dataclass( # type: ignore[call-overload] unsafe_hash='__hash__' not in cls.__dict__, repr=False, kw_only=True, )(cls) else: raise TypeError('`kw_only` is not available before Py 3.10.') else: # Now apply dataclass transform (which operates in-place). # Do generate a hash function only if not provided by the class. kw_only_dataclasses.dataclass( cls, unsafe_hash='__hash__' not in cls.__dict__, repr=False, extra_fields=extra_fields, ) # pytype: disable=wrong-keyword-args cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign] def _verify_single_or_no_compact(cls): """Statically verifies that at most a single method is labelled compact.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] n_compact_fns = len( [ method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact') ] ) if n_compact_fns > 1: raise errors.MultipleMethodsCompactError() def _find_compact_name_scope_methods(cls): """Finds all compact_name_scope methods in the class.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] compact_name_scope_fns = tuple( method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact_name_scope') ) cls._compact_name_scope_methods = compact_name_scope_fns def _wrap_module_attributes(cls): """Wraps user-defined non-inherited methods and descriptors with state management functions. """ # wrap methods method_exclusions = [f.name for f in dataclasses.fields(cls)] + [ '__eq__', '__repr__', '__init__', '__hash__', '__post_init__', ] for key in _get_local_method_names(cls, exclude=method_exclusions): method = getattr(cls, key) if hasattr(method, 'nowrap'): continue setattr(cls, key, wrap_method_once(method)) # wrap descriptors descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [ 'parent', '__dict__', ] for key in _get_local_descriptor_names(cls, descriptor_exclusions): # don't use getattr here, since it will call the descriptor descriptor = cls.__dict__[key] if hasattr(descriptor, 'nowrap'): continue setattr(cls, key, wrap_descriptor_once(descriptor)) return cls def _call_wrapped_method(self, fun, args, kwargs): """Calls a wrapped method. This function is responsible for setting up the thread local state correctly before calling the method and cleaning up afterwards. This includes storing intermediates, setup of the compact scope, and making sure setup is called before any other method. Args: fun: The wrapped method. args: Named arguments passed to ``fun``. kwargs: Keyword arguments passed to ``fun``. Returns: The results of calling ``fun``. """ is_compact_method = hasattr(fun, 'compact') fun_name = _get_fn_name(fun) is_setup_method = fun_name == 'setup' add_call_info = not is_setup_method and len(_context.call_info_stack) > 0 # We lazily call setup() only when needed. if is_setup_method: if self.scope is None: raise errors.CallSetupUnboundModuleError() is_recurrent = self._state.in_setup self._state.in_setup = True else: self._try_setup() if is_compact_method: if self.scope is None: raise errors.CallCompactUnboundModuleError() is_recurrent = self._state.in_compact_method self._state.in_compact_method = True _context.module_stack.append(self) try: # get call info if add_call_info: assert self.scope is not None call_index = _context.call_info_stack[-1].get_call_index() if _global_interceptor_stack: run_fun = functools.partial(run_interceptors, fun) else: run_fun = fun # call method if _use_named_call: with jax.named_scope(_derive_profiling_name(self, fun)): y = run_fun(self, *args, **kwargs) else: y = run_fun(self, *args, **kwargs) if _context.capture_stack: filter_fn = _context.capture_stack[-1] if filter_fn and filter_fn(self, fun_name): self.sow('intermediates', fun_name, y) if add_call_info: _args, _kwargs, _y = flax.linen.summary._represent_tree( (args, kwargs, y) ) _context.call_info_stack[-1].calls.append( _CallInfo( call_index, self.path, self.clone(), self.scope.rngs, self.scope.mutable, fun.__name__, _args, _kwargs, _y, ) ) return y finally: _context.module_stack.pop() if is_compact_method: object.__setattr__(self, 'scope', self.scope.rewound()) # setup or compact calls can be recurrent for example due to super calls # resetting the state would cause is compact/setup method # to be set to False prematurely. if (is_compact_method or is_setup_method) and not is_recurrent: self._state.reset() def __setattr__(self, name: str, val: Any): """Sets an attribute on this Module. We overload setattr solely to support pythonic naming via assignment of submodules in the special :meth:`setup` function:: self.submodule_name = MyModule(...) We also support lists and other general pytrees, e.g.:: self.submodules = [MyModule0(..), MyModule1(..), ...] Args: name: Attribute to set. val: Value of the attribute. """ fields = self.__dataclass_fields__ # pytype: disable=attribute-error is_dataclass_attr = name in fields and fields[name].init if not self._state.in_setup: if not self._state.is_initialized: # Setting attributes before end of Module.__post_init__() object.__setattr__(self, name, val) return else: # We're past all initialization and setup logic: # Raises a TypeError just like frozen python dataclasses. raise errors.SetAttributeFrozenModuleError( self.__class__.__name__, name, val ) # We're inside the setup() method: if is_dataclass_attr: # These names are specified as dataclass fields. They should not be # initialized within the setup() method, but can be modified freely # before it. raise errors.SetAttributeInModuleSetupError() # Values (that may be variables or submodules) are being defined and # attached in setup(), we run some extra logic in that case. self._register_submodules(name, val) def __getattr__(self, name: str) -> Any: """Call setup() before getting any setup-defined attributes.""" # We don't want to return anything for python copy / pickle methods. if name in _UNDEFINED_COPY_PICKLE_METHODS: raise AttributeError() self._try_setup() if name in self.__dict__: return self.__dict__[name] else: msg = f'"{self.__class__.__name__}" object has no attribute "{name}".' if self.scope is None: msg += ( f' If "{name}" is defined in \'.setup()\', remember these fields ' "are only accessible from inside 'init' or 'apply'." ) raise AttributeError(msg) def __dir__(self) -> List[str]: """Call setup() before listing attributes.""" self._try_setup() return object.__dir__(self) # type: ignore def __post_init__(self) -> None: # DO NOT REMOVE - Marker for internal logging. # In dataclasses, __init__ is overridden to process dataclass arguments, # and __post_init__ is called immediately afterwards. Here, depending on the # type of `parent` passed to initialize the Module, we either defer # initialization, attach this Module as a submodule of a parent, or bind # this Module at the top-level to variables and rngs. object.__setattr__(self, '_id', uuid()) object.__setattr__(self, '_state', _ModuleInternalState()) # Typically we set the parent based on the dynamic module context. if self.parent is _unspecified_parent: # pytype: disable=attribute-error object.__setattr__(self, 'parent', _context.module_stack[-1]) # Initialization is deferred for top level Modules or any other "orphan" # Modules until attachment by __setattr__ i.e. MyModule(..., parent=None) if self.parent is None: return # Register submodule on parent Module. if isinstance(self.parent, Module): # When initializing an unnamed Module inside setup() # initialization is deferred until attachment by __setattr__ # i.e. self.mymodule = MyModule(...) self.name: Optional[str] if ( self.parent._state.in_setup and self.name is None ): # pytype: disable=attribute-error return if not self.parent._initialization_allowed: raise errors.AssignSubModuleError(self.__class__.__name__) # Autonaming of submodules. if self.name is None: # pytype: disable=attribute-error prefix = f'{self.__class__.__name__}' cursor = self.parent._state.autoname_cursor.get(prefix, 0) self.name = f'{prefix}_{cursor}' self.parent._state.autoname_cursor[prefix] = cursor + 1 # Allow scope aliasing under transforms for submodules defined in setup. reuse_scopes = ( self.parent._state.in_setup and self.parent._state.setup_called == SetupState.TRANSFORMED ) # Perform name-collision check. if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes): parent_class = self.parent.__class__.__name__ raise errors.NameInUseError('submodule', self.name, parent_class) # Finalize attachment to parent and scope initialization. self.parent._state.children[self.name] = self assert self.parent.scope is not None object.__setattr__( self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes) ) # Top-level invocation with a functional Scope. elif isinstance(self.parent, Scope): object.__setattr__(self, 'scope', self.parent) else: raise ValueError('parent must be None, Module or Scope') # eagerly bind submodules if scope is available if self.scope is not None: for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) self._state.is_initialized = True def __repr__(self) -> str: return _module_repr(self) def setup(self) -> None: """Initializes a Module lazily (similar to a lazy ``__init__``). ``setup`` is called once lazily on a module instance when a module is bound, immediately before any other methods like ``__call__`` are invoked, or before a ``setup``-defined attribute on ``self`` is accessed. This can happen in three cases: 1. Immediately when invoking :meth:`apply`, :meth:`init` or :meth:`init_and_output`. 2. Once the module is given a name by being assigned to an attribute of another module inside the other module's ``setup`` method (see :meth:`__setattr__`):: >>> class MyModule(nn.Module): ... def setup(self): ... submodule = nn.Conv(...) ... # Accessing `submodule` attributes does not yet work here. ... # The following line invokes `self.__setattr__`, which gives ... # `submodule` the name "conv1". ... self.conv1 = submodule ... # Accessing `submodule` attributes or methods is now safe and ... # either causes setup() to be called once. 3. Once a module is constructed inside a method wrapped with :meth:`compact`, immediately before another method is called or ``setup`` defined attribute is accessed. """ pass def _register_submodules(self, name, val): """Registers a submodule.""" assert self.scope, 'Trying to register submodules on unbound scope.' root = self.scope.root cache = _caches.get(root, weakref.WeakValueDictionary()) _caches[root] = cache queue = [] preserve_adopted_names = config.flax_preserve_adopted_names if hasattr(type(self), 'preserve_adopted_names'): preserve_adopted_names = type(self).preserve_adopted_names def adopt_attr_modules(cache, queue, suffix, subvalue): if isinstance(subvalue, Module): current_name = subvalue.name adopted_name = None if subvalue.parent is None: # Preserve sharing-by-reference relationships during adoption # via cache keyed on unique instance ids. key = subvalue._id # Module was passed from outside. It needs to be cloned. # Outside modules are named by attachment, not an outer name, # UNLESS we're using new adopted name policy, in which case an existing # name will be used, as is often supplied by config systems. if preserve_adopted_names: adopted_name = object.__getattribute__(subvalue, 'name') if key in cache: subvalue = cache[key] else: subvalue = subvalue.clone(name=None) cache[key] = subvalue if subvalue.name is None: object.__setattr__(subvalue, 'parent', self) if adopted_name is None: adopted_name = ( f'{name}{suffix}' if not isinstance(subvalue, CompactNameScope) else current_name ) object.__setattr__(subvalue, 'name', adopted_name) queue.append(subvalue) return subvalue val = _freeze_attr( _map_over_modules_in_tree( functools.partial(adopt_attr_modules, cache, queue), val ) ) object.__setattr__(self, name, val) for x in queue: x.__post_init__() def _try_setup(self, shallow: bool = False) -> None: """Tries to setup module if scope is available and setup has not been called yet.""" if ( self.scope and not self._state.in_setup and self._state.setup_called != SetupState.DONE ): try: self._state.in_setup = True # A shallow setup will only register attribute submodules but it does # not call the user's setup. This avoids running before a # transformation. for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) if not shallow: self.setup() # create NonTransparent Modules self._compact_name_scope_modules = { name: CompactNameScope( getattr(type(self), name).inner_fun, lambda: self, name=name ) for name in self._compact_name_scope_methods } # We run static checks abstractly once for setup before any transforms # to detect name collisions and other python errors. elif self._state.setup_called == SetupState.NEW: self._validate_setup() finally: self._state.in_setup = False if not shallow: self._state.setup_called = SetupState.DONE def _validate_setup(self) -> None: """Abstractly evaluates setup only to run static checks.""" def run_setup_only(x): wrapped_id = wrap_method_once(lambda m, x: x) with TestScope({}, rngs={}, mutable=True).temporary() as root: return wrapped_id(self.clone(parent=root), x) _ = jax.eval_shape(run_setup_only, 0) def _name_taken( self, name: str, reuse_scopes: bool = False, collection: Optional[str] = None, ) -> bool: assert self.scope is not None if reuse_scopes: return False return self.scope.name_reserved(name, collection) def _initialization_allowed(self): return ( not self._state.is_initialized # allow eager attachment in post-init or self._state.in_setup or self._state.in_compact_method ) def path(self): if self.scope is None: raise ValueError("Can't access module paths on unbound modules.") return self.scope.path def clone( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = None, _deep_clone: Union[bool, weakref.WeakValueDictionary] = False, _reset_names: bool = False, **updates, ) -> M: """Creates a clone of this Module, with optionally updated arguments. NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used primarily for internal routines, and ``copy`` offers simpler arguments and better defaults. Args: parent: The parent of the clone. The clone will have no parent if no explicit parent is specified. _deep_clone: A boolean or a weak value dictionary to control deep cloning of submodules. If True, submodules will be cloned recursively. If a weak value dictionary is passed, it will be used to cache cloned submodules. This flag is used by init/apply/bind to avoid scope leakage. _reset_names: If True, ``name=None`` is also passed to submodules when cloning. Resetting names in submodules is necessary when calling ``.unbind``. **updates: Attribute updates. Returns: A clone of the this Module with the updated attributes and parent. """ attrs = { f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init } attrs.update(parent=parent, **updates) # Here we implement deep cloning of submodules, this is necessary to avoid scope leakage # from external submodules into init/apply/bind while preserving sharing-by-reference # relationships between submodules. if _deep_clone != False: # We use a weak value dictionary to cache cloned submodules. When a shared # submodule is cloned, its only cloned once else its fetched from the cache. cache = ( weakref.WeakValueDictionary() if isinstance(_deep_clone, bool) else _deep_clone ) def clone_fn(m: Module) -> Module: if hasattr(m, '_id'): key = m._id if key in cache: return cache[key] else: if _reset_names: clone = m.clone( _deep_clone=cache, _reset_names=_reset_names, name=None ) else: clone = m.clone(_deep_clone=cache) cache[key] = clone return clone else: # If the module doesn't have an _id attribute it could be a mock object # so we return it as is. return m # _map_submodules will map over all submodules inside attrs # value here can be any pytree, non-module values are ignored for field_name, value in attrs.items(): if field_name == 'parent': continue attrs[field_name] = _map_submodules(clone_fn, value) module = self.__class__(**attrs) return module def copy( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent, name: Optional[str] = None, **updates, ) -> M: """Creates a copy of this Module, with optionally updated arguments. Args: parent: The parent of the copy. By default the current module is taken as parent if not explicitly specified. name: A new name for the copied Module, by default a new automatic name will be given. **updates: Attribute updates. Returns: A copy of the this Module with the updated name, parent, and attributes. """ return self.clone( parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates ) def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[True], **init_kwargs, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[False], **init_kwargs, ) -> Variable[meta.AxisMetadata[T]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: """Declares and returns a variable in this Module. See :mod:`flax.core.variables` for more information. See also :meth:`param` for a shorthand way to define read-only variables in the "params" collection. Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be passed on explicitly:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... key = self.make_rng('stats') ... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape) ... ... ... return x * mean.value >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats`` has to be provided explicitly when calling :meth:`init` and :meth:`apply`. Args: col: The variable collection name. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this variable is used in this module. If None, the variable must already be initialized otherwise an error is raised. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn Returns: A :class:`flax.core.variables.Variable` that can be read or set via ".value" attribute. Throws an error if the variable exists already. """ if not self._initialization_allowed: raise ValueError( 'Variables must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection=col): raise errors.NameInUseError('variable', name, self.__class__.__name__) assert self.scope is not None v = self.scope.variable( col, name, init_fn, *init_args, unbox=unbox, **init_kwargs ) self._state.children[name] = col return v def param( self, name: str, init_fn: Callable[..., T], *init_args, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[True], **init_kwargs, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[False], **init_kwargs, ) -> meta.AxisMetadata[T]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool = True, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: """Declares and returns a parameter in this Module. Parameters are read-only variables in the collection named "params". See :mod:`flax.core.variables` for more details on variables. The first argument of ``init_fn`` is assumed to be a PRNG key, which is provided automatically and does not have to be passed using ``init_args`` or ``init_kwargs``:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape) ... ... ... return x * mean >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, but only ``shape`` has to be provided explicitly; ``key`` is set automatically using the PRNG for ``params`` that is passed when initializing the module using :meth:`init`. Args: name: The parameter name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn. Returns: The value of the initialized parameter. Throws an error if the parameter exists already. """ if not self._initialization_allowed: raise ValueError( 'Parameters must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection='params'): raise errors.NameInUseError('param', name, self.__class__.__name__) assert self.scope is not None v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs) self._state.children[name] = 'params' return v def has_variable(self, col: str, name: str) -> bool: """Checks if a variable of given collection and name exists in this Module. See :mod:`flax.core.variables` for more explanation on variables and collections. Args: col: The variable collection name. name: The name of the variable. Returns: True if the variable exists. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.has_variable(col, name) def is_mutable_collection(self, col: str) -> bool: """Returns true if the collection ``col`` is mutable.""" if self.scope is None: raise ValueError("Can't check mutability on unbound modules") return self.scope.is_mutable_collection(col) def has_rng(self, name: str) -> bool: """Returns true if a PRNGSequence with name ``name`` exists.""" if self.scope is None: raise ValueError("Can't query for RNGs on unbound modules") return self.scope.has_rng(name) def make_rng(self, name: str = 'params') -> PRNGKey: """Returns a new RNG key from a given RNG sequence for this Module. The new RNG key is split from the previous one. Thus, every call to ``make_rng`` returns a new RNG key, while still guaranteeing full reproducibility. .. note:: If an invalid name is passed (i.e. no RNG key was passed by the user in ``.init`` or ``.apply`` for this name), then ``name`` will default to ``'params'``. Example:: >>> import jax >>> import flax.linen as nn >>> class ParamsModule(nn.Module): ... def __call__(self): ... return self.make_rng('params') >>> class OtherModule(nn.Module): ... def __call__(self): ... return self.make_rng('other') >>> key = jax.random.key(0) >>> params_out, _ = ParamsModule().init_with_output({'params': key}) >>> # self.make_rng('other') will default to using the 'params' RNG stream >>> other_out, _ = OtherModule().init_with_output({'params': key}) >>> assert params_out == other_out Learn more about RNG's by reading the Flax RNG guide: https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html Args: name: The RNG sequence name. Returns: The newly generated RNG key. """ if self.scope is None: raise ValueError("Can't use RNGs on unbound modules") return self.scope.make_rng(name) def is_initializing(self) -> bool: """Returns True if running under self.init(...) or nn.init(...)(). This is a helper method to handle the common case of simple initialization where we wish to have setup logic occur when only called under ``module.init`` or ``nn.init``. For more complicated multi-phase initialization scenarios it is better to test for the mutability of particular variable collections or for the presence of particular variables that potentially need to be initialized. """ if self.scope is None: raise ValueError("Can't check if running under init() on unbound modules") return self.scope.get_flag('initializing', False) def _module_checks(self): """Run standard runtime checks.""" if not isinstance(self, Module): raise errors.InvalidInstanceModuleError() overridden_post_init = self.__post_init__ != Module.__post_init__ if overridden_post_init and not hasattr(self, '_id'): raise errors.IncorrectPostInitOverrideError() def bind( self: M, variables: VariableDict, *args, rngs: Optional[RNGSequences] = None, mutable: CollectionFilter = False, ) -> M: """Creates an interactive Module instance by binding variables and RNGs. ``bind`` provides an "interactive" instance of a Module directly without transforming a function with ``apply``. This is particularly useful for debugging and interactive use cases like notebooks where a function would limit the ability to split up code into different cells. Once the variables (and optionally RNGs) are bound to a ``Module`` it becomes a stateful object. Note that idiomatic JAX is functional and therefore an interactive instance does not mix well with vanilla JAX APIs. ``bind()`` should only be used for interactive experimentation, and in all other cases we strongly encourage users to use ``apply()`` instead. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = nn.Dense(3) ... self.decoder = nn.Dense(5) ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> x = jnp.ones((16, 9)) >>> ae = AutoEncoder() >>> variables = ae.init(jax.random.key(0), x) >>> model = ae.bind(variables) >>> z = model.encoder(x) >>> x_reconstructed = model.decoder(z) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments (not used). rngs: a dict of PRNGKeys to initialize the PRNG sequences. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. Returns: A copy of this instance with bound variables and RNGs. """ Module._module_checks(self) del args scope = core.bind(variables, rngs=rngs, mutable=mutable) return self.clone(parent=scope, _deep_clone=True) def unbind(self: M) -> Tuple[M, VariableDict]: """Returns an unbound copy of a Module and its variables. ``unbind`` helps create a stateless version of a bound Module. An example of a common use case: to extract a sub-Module defined inside ``setup()`` and its corresponding variables: 1) temporarily ``bind`` the parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that ``setup()`` is only called when the Module is bound.):: >>> class Encoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(256)(x) >>> class Decoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(784)(x) >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = Encoder() ... self.decoder = Decoder() ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> module = AutoEncoder() >>> variables = module.init(jax.random.key(0), jnp.ones((1, 784))) >>> # Extract the Encoder sub-Module and its variables >>> encoder, encoder_vars = module.bind(variables).encoder.unbind() Returns: A tuple with an unbound copy of this Module and its variables. """ Module._module_checks(self) if self.scope is None: raise errors.CallUnbindOnUnboundModuleError() variables = self.variables module = self.clone(_deep_clone=True, _reset_names=True, name=None) return module, variables def apply( self, variables: VariableDict, *args, rngs: Optional[Union[PRNGKey, RNGSequences]] = None, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = False, capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]: """Applies a module method to variables and returns output and modified variables. Note that ``method`` should be set if one would like to call ``apply`` on a different class method than ``__call__``. For instance, suppose a Transformer modules has a method called ``encode``, then the following calls ``apply`` on that method:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Transformer(nn.Module): ... def encode(self, x): ... ... >>> x = jnp.ones((16, 9)) >>> model = Transformer() >>> variables = model.init(jax.random.key(0), x, method=Transformer.encode) >>> encoded = model.apply(variables, x, method=Transformer.encode) If a function instance is provided, the unbound function is used. For instance, the example below is equivalent to the one above:: >>> encoded = model.apply(variables, x, method=model.encode) You can also pass a string to a callable attribute of the module. For example, the previous can be written as:: >>> encoded = model.apply(variables, x, method='encode') Note ``method`` can also be a function that is not defined in ``Transformer``. In that case, the function should have at least one argument representing an instance of the Module class:: >>> def other_fn(instance, x): ... # instance.some_module_attr(...) ... instance.encode ... ... >>> model.apply(variables, x, method=other_fn) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, add_noise=False): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... if add_noise: ... # Add gaussian noise ... noise_key = self.make_rng('noise') ... x = x + jax.random.normal(noise_key, x.shape) ... ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)} >>> variables = module.init(rngs, x) >>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> rngs['noise'] = jax.random.key(0) >>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # different output (key(1) vs key(0)) >>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1) >>> del rngs['noise'] >>> # self.make_rng('noise') will default to using the 'params' RNG stream >>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # same output (key(0)) >>> np.testing.assert_allclose(out1, out2) >>> # passing in a single key is equivalent to passing in {'params': key} >>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0)) >>> # same output (key(0)) >>> np.testing.assert_allclose(out2, out3) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments passed to the specified apply method. rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params" PRNG sequence is used to initialize parameters. method: A function to call apply on. This is generally a function in the module. If provided, applies this method. If not provided, applies the ``__call__`` method of the module. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default, only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the specified apply method. Returns: If ``mutable`` is False, returns output. If any collections are mutable, returns ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if rngs is not None and not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) # if the `method` string is a submodule, we create a lambda function # that calls the submodule, forwarding all arguments. if isinstance(method, Module): method = lambda self, *args, **kwargs: getattr(self, attribute_name)( *args, **kwargs ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return apply( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(variables, *args, **kwargs, rngs=rngs) def init_with_output( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]: """Initializes a module method with variables and returns output and modified variables. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return init_with_output( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(rngs, *args, **kwargs) def init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[FrozenVariableDict, Dict[str, Any]]: """Initializes a module method with variables and returns modified variables. ``init`` takes as first argument either a single ``PRNGKey``, or a dictionary mapping variable collections names to their ``PRNGKeys``, and will call ``method`` (which is the module's ``__call__`` function by default) passing ``*args`` and ``**kwargs``, and returns a dictionary of initialized variables. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, train): ... x = nn.Dense(16)(x) ... x = nn.BatchNorm(use_running_average=not train)(x) ... x = nn.relu(x) ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> key = jax.random.key(0) >>> variables = module.init(key, x, train=False) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... other_variable = self.variable( ... 'other_collection', ... 'other_variable', ... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape), ... x, ... ) ... x = x + other_variable.value ... ... return nn.Dense(1)(x) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)} >>> variables0 = module.init(rngs, x) >>> rngs['other_rng'] = jax.random.key(0) >>> variables1 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables0['params'], variables1['params'] ... ) >>> # different other_variable (key(1) vs key(0)) >>> np.testing.assert_raises( ... AssertionError, ... np.testing.assert_allclose, ... variables0['other_collection']['other_variable'], ... variables1['other_collection']['other_variable'], ... ) >>> del rngs['other_rng'] >>> # self.make_rng('other_rng') will default to using the 'params' RNG stream >>> variables2 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables1['params'], variables2['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables1['other_collection']['other_variable'], ... variables2['other_collection']['other_variable'], ... ) >>> # passing in a single key is equivalent to passing in {'params': key} >>> variables3 = module.init(jax.random.key(0), x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables2['params'], variables3['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables2['other_collection']['other_variable'], ... variables3['other_collection']['other_variable'], ... ) Jitting ``init`` initializes a model lazily using only the shapes of the provided arguments, and avoids computing the forward pass with actual values. Example:: >>> module = nn.Dense(1) >>> init_jit = jax.jit(module.init) >>> variables = init_jit(jax.random.key(0), x) ``init`` is a light wrapper over ``apply``, so other ``apply`` arguments like ``method``, ``mutable``, and ``capture_intermediates`` are also available. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) _, v_out = self.init_with_output( rngs, *args, method=method, mutable=mutable, capture_intermediates=capture_intermediates, **kwargs, ) return v_out def lazy_init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Optional[Callable[..., Any]] = None, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> FrozenVariableDict: """Initializes a module without computing on an actual input. lazy_init will initialize the variables without doing unnecessary compute. The input data should be passed as a ``jax.ShapeDtypeStruct`` which specifies the shape and dtype of the input but no concrete data. Example:: >>> model = nn.Dense(features=256) >>> variables = model.lazy_init( ... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) The args and kwargs args passed to ``lazy_init`` can be a mix of concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct) values. Concrete values are only necessary for arguments that affect the initialization of variables. For example, the model might expect a keyword arg that enables/disables a subpart of the model. In this case, an explicit value (True/Flase) should be passed otherwise ``lazy_init`` cannot infer which variables should be initialized. Args: rngs: The rngs for the variable collections. *args: arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) def lazy_wrapper(rngs, *args, **kwargs): return self.init(rngs, *args, method=method, mutable=mutable, **kwargs) return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs) def variables(self) -> VariableDict: """Returns the variables in this module.""" if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.variables() def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T: """Retrieves the value of a Variable. Args: col: the variable collection. name: the name of the variable. default: the default value to return if the variable does not exist in this scope. Returns: The value of the input variable, of the default value if the variable doesn't exist in this scope. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.get_variable(col, name, default) def put_variable(self, col: str, name: str, value: Any): """Updates the value of the given variable if it is mutable, or an error otherwise. Args: col: the variable collection. name: the name of the variable. value: the new value of the variable. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") self.scope.put_variable(col, name, value) def sow(self, col: str, name: str, value: Any) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: """Stores a value in a collection. Collections can be used to collect intermediate values without the overhead of explicitly passing a container through each Module call. If the target collection is not mutable ``sow`` behaves like a no-op and returns ``False``. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... self.sow('intermediates', 'h', h) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply(variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)} By default the values are stored in a tuple and each stored value is appended at the end. This way all intermediates can be tracked when the same module is called multiple times. Alternatively, a custom init/reduce function can be passed:: >>> class Foo2(nn.Module): ... @nn.compact ... def __call__(self, x): ... init_fn = lambda: 0 ... reduce_fn = lambda a, b: a + b ... self.sow('intermediates', 'h', x, ... init_fn=init_fn, reduce_fn=reduce_fn) ... self.sow('intermediates', 'h', x * 2, ... init_fn=init_fn, reduce_fn=reduce_fn) ... return x >>> x = jnp.ones((1, 1)) >>> model = Foo2() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply( ... variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': Array([[3.]], dtype=float32)} Args: col: The name of the variable collection. name: The name of the variable. value: The value of the variable. reduce_fn: The function used to combine the existing value with the new value. The default is to append the value to a tuple. init_fn: For the first value stored, ``reduce_fn`` will be passed the result of ``init_fn`` together with the value to be stored. The default is an empty tuple. Returns: ``True`` if the value has been stored successfully, ``False`` otherwise. """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if not self.scope.is_mutable_collection(col): return False if self.scope.has_variable(col, name): xs = self.scope.get_variable(col, name) else: self.scope.reserve(name, col) self._state.children[name] = col xs = init_fn() xs = reduce_fn(xs, value) self.scope.put_variable(col, name, xs) return True def perturb( self, name: str, value: T, collection: str = 'perturbations' ) -> T: """Add an zero-value variable ('perturbation') to the intermediate value. The gradient of ``value`` would be the same as the gradient of this perturbation variable. Therefore, if you define your loss function with both params and perturbations as standalone arguments, you can get the intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation argument. .. note:: This is an experimental API and may be tweaked later for better performance and usability. At its current stage, it creates extra dummy variables that occupies extra memory space. Use it only to debug gradients in training. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(3)(x) ... x = self.perturb('dense3', x) ... return nn.Dense(2)(x) >>> def loss(variables, inputs, targets): ... preds = model.apply(variables, inputs) ... return jnp.square(preds - targets).mean() >>> x = jnp.ones((2, 9)) >>> y = jnp.ones((2, 2)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y) >>> print(intm_grads['perturbations']['dense3']) [[-1.456924 -0.44332537 0.02422847] [-1.456924 -0.44332537 0.02422847]] If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op so you can easily disable the behavior when not needed:: >>> model.apply(variables, x) # works as expected Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> model.apply({'params': variables['params']}, x) # behaves like a no-op Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y) >>> 'perturbations' not in intm_grads True """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if self.is_mutable_collection(collection): if not self.scope.has_variable(collection, name): self.scope.reserve(name, collection) self._state.children[name] = collection self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore if collection in self.scope.root._variables: if self.scope.has_variable(collection, name): value += self.scope.get_variable(collection, name) # type: ignore else: raise ValueError(f"Perturbation collection {collection} present, but " f"missing perturbation variable {name}") return value def tabulate( self, rngs: Union[PRNGKey, RNGSequences], *args, depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> str: """Creates a summary of the Module represented as a table. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns the string summarizing the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the ``console_kwargs`` argument, for example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> # print(Foo().tabulate( >>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in ``variables`` which are sorted alphabetically. **Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable. Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. depth: controls how many submodule deep the summary can go. By default, its ``None`` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.console.Console`` when rendering the table. Default arguments are ``{'force_terminal': True, 'force_jupyter': False}``. table_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table`` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table.add_column`` when adding columns to the table. compute_flops: whether to include a ``flops`` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a ``vjp_flops`` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of ``compute_flops``. **kwargs: keyword arguments to pass to the forward computation. Returns: A string summarizing the Module. """ from flax.linen import summary tabulate_fn = summary.tabulate( self, rngs, depth=depth, show_repeated=show_repeated, mutable=mutable, console_kwargs=console_kwargs, table_kwargs=table_kwargs, column_kwargs=column_kwargs, compute_flops=compute_flops, compute_vjp_flops=compute_vjp_flops, ) return tabulate_fn(*args, **kwargs) def module_paths( self, rngs: Union[PRNGKey, RNGSequences], *args, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> dict[str, 'Module']: """Returns a dictionary mapping module paths to module instances. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns a dictionary mapping module paths to unbounded copies of module instances that were used at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> modules = Foo().module_paths(jax.random.key(0), x) >>> print({ ... p: type(m).__name__ for p, m in modules.items() ... }) {'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'} Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. **kwargs: keyword arguments to pass to the forward computation. Returns: A dict`ionary mapping module paths to module instances. """ from flax.linen import summary table = summary._get_module_table( module=self, depth=None, show_repeated=show_repeated, compute_flops=False, compute_vjp_flops=False, )(rngs, *args, **kwargs, mutable=mutable) return {'/'.join(row.path): row.module_copy for row in table} The provided code snippet includes necessary dependencies for implementing the `jvp` function. Write a Python function `def jvp( fn: Callable[..., Any], mdl: Module, primals, tangents, variable_tangents, variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, ) -> Union[Tuple[Any, Callable[..., Any]], Tuple[Any, Callable[..., Any], Any]]` to solve the following problem: A lifted version of ``jax.jvp``. See ``jax.jvp`` for the unlifted Jacobian-vector product (forward gradient). Note that no tangents are returned for variables. When variable tangents are required their value should be returned explicitly by ``fn`` using ``Module.variables``:: >>> import flax.linen as nn >>> import jax.numpy as jnp >>> class LearnScale(nn.Module): ... @nn.compact ... def __call__(self, x): ... p = self.param('test', nn.initializers._init(), ()) ... return p * x >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... scale = LearnScale() ... vars_t = jax.tree_util.tree_map(jnp.ones_like, ... scale.variables.get('params', {})) ... _, out_t = nn.jvp( ... lambda mdl, x: mdl(x), scale, (x,), (jnp.zeros_like(x),), ... variable_tangents={'params': vars_t}) ... return out_t Example:: >>> def learn_scale(scope, x): ... p = scope.param('scale', nn.initializers.zeros_init(), ()) ... return p * x >>> def f(scope, x): ... vars_t = jax.tree_util.tree_map(jnp.ones_like, scope.variables().get('params', {})) ... x, out_t = lift.jvp( ... learn_scale, scope, (x,), (jnp.zeros_like(x),), ... variable_tangents={'params': vars_t}) ... return out_t Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. primals: The primal values at which the Jacobian of ``fun`` should be evaluated. Should be either a tuple or a list of arguments, and its length should be equal to the number of positional parameters of ``fun``. tangents: The tangent vector for which the Jacobian-vector product should be evaluated. Should be either a tuple or a list of tangents, with the same tree structure and array shapes as ``primals``. variable_tangents: A dict or PyTree fo dicts with the same structure as scopes. Each entry in the dict specifies the tangents for a variable collection. Not specifying a collection in variable_tangents is equivalent to passing a zero vector as the tangent. variables: other variables collections that are available in ``fn`` but do not receive a tangent. rngs: the prngs that are available inside ``fn``. Returns: A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is ``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of ``function`` evaluated at ``primals`` with ``tangents``. The ``tangents_out`` value has the same Python tree structure and shapes as ``primals_out``. Here is the function: def jvp( fn: Callable[..., Any], mdl: Module, primals, tangents, variable_tangents, variables: CollectionFilter = True, rngs: PRNGSequenceFilter = True, ) -> Union[Tuple[Any, Callable[..., Any]], Tuple[Any, Callable[..., Any], Any]]: """A lifted version of ``jax.jvp``. See ``jax.jvp`` for the unlifted Jacobian-vector product (forward gradient). Note that no tangents are returned for variables. When variable tangents are required their value should be returned explicitly by ``fn`` using ``Module.variables``:: >>> import flax.linen as nn >>> import jax.numpy as jnp >>> class LearnScale(nn.Module): ... @nn.compact ... def __call__(self, x): ... p = self.param('test', nn.initializers._init(), ()) ... return p * x >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... scale = LearnScale() ... vars_t = jax.tree_util.tree_map(jnp.ones_like, ... scale.variables.get('params', {})) ... _, out_t = nn.jvp( ... lambda mdl, x: mdl(x), scale, (x,), (jnp.zeros_like(x),), ... variable_tangents={'params': vars_t}) ... return out_t Example:: >>> def learn_scale(scope, x): ... p = scope.param('scale', nn.initializers.zeros_init(), ()) ... return p * x >>> def f(scope, x): ... vars_t = jax.tree_util.tree_map(jnp.ones_like, scope.variables().get('params', {})) ... x, out_t = lift.jvp( ... learn_scale, scope, (x,), (jnp.zeros_like(x),), ... variable_tangents={'params': vars_t}) ... return out_t Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. primals: The primal values at which the Jacobian of ``fun`` should be evaluated. Should be either a tuple or a list of arguments, and its length should be equal to the number of positional parameters of ``fun``. tangents: The tangent vector for which the Jacobian-vector product should be evaluated. Should be either a tuple or a list of tangents, with the same tree structure and array shapes as ``primals``. variable_tangents: A dict or PyTree fo dicts with the same structure as scopes. Each entry in the dict specifies the tangents for a variable collection. Not specifying a collection in variable_tangents is equivalent to passing a zero vector as the tangent. variables: other variables collections that are available in ``fn`` but do not receive a tangent. rngs: the prngs that are available inside ``fn``. Returns: A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is ``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of ``function`` evaluated at ``primals`` with ``tangents``. The ``tangents_out`` value has the same Python tree structure and shapes as ``primals_out``. """ return lift_direct_transform( lift.jvp, (fn,), mdl, primals, tangents, variable_tangents, multi_scope=False, variables=variables, rngs=rngs, )
A lifted version of ``jax.jvp``. See ``jax.jvp`` for the unlifted Jacobian-vector product (forward gradient). Note that no tangents are returned for variables. When variable tangents are required their value should be returned explicitly by ``fn`` using ``Module.variables``:: >>> import flax.linen as nn >>> import jax.numpy as jnp >>> class LearnScale(nn.Module): ... @nn.compact ... def __call__(self, x): ... p = self.param('test', nn.initializers._init(), ()) ... return p * x >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... scale = LearnScale() ... vars_t = jax.tree_util.tree_map(jnp.ones_like, ... scale.variables.get('params', {})) ... _, out_t = nn.jvp( ... lambda mdl, x: mdl(x), scale, (x,), (jnp.zeros_like(x),), ... variable_tangents={'params': vars_t}) ... return out_t Example:: >>> def learn_scale(scope, x): ... p = scope.param('scale', nn.initializers.zeros_init(), ()) ... return p * x >>> def f(scope, x): ... vars_t = jax.tree_util.tree_map(jnp.ones_like, scope.variables().get('params', {})) ... x, out_t = lift.jvp( ... learn_scale, scope, (x,), (jnp.zeros_like(x),), ... variable_tangents={'params': vars_t}) ... return out_t Args: fn: Function to be differentiated. Its arguments should be arrays, scalars, or standard Python containers of arrays or scalars. It should return an array, scalar, or standard Python container of arrays or scalars. It will receive the scope and primals as arguments. mdl: The module of which the variables will be differentiated. primals: The primal values at which the Jacobian of ``fun`` should be evaluated. Should be either a tuple or a list of arguments, and its length should be equal to the number of positional parameters of ``fun``. tangents: The tangent vector for which the Jacobian-vector product should be evaluated. Should be either a tuple or a list of tangents, with the same tree structure and array shapes as ``primals``. variable_tangents: A dict or PyTree fo dicts with the same structure as scopes. Each entry in the dict specifies the tangents for a variable collection. Not specifying a collection in variable_tangents is equivalent to passing a zero vector as the tangent. variables: other variables collections that are available in ``fn`` but do not receive a tangent. rngs: the prngs that are available inside ``fn``. Returns: A ``(primals_out, tangents_out)`` pair, where ``primals_out`` is ``fun(*primals)``, and ``tangents_out`` is the Jacobian-vector product of ``function`` evaluated at ``primals`` with ``tangents``. The ``tangents_out`` value has the same Python tree structure and shapes as ``primals_out``.
22,615
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax def lift_direct_transform( transform: Callable[..., Any], targets: Tuple[Callable[..., Any], ...], mdl: Module, *args, multi_scope=True, **kwargs, ): """Lift direct transform.""" # TODO(marcvanzee): Improve docstrings (#1977). for target in targets: if _is_module_class(target): raise ValueError( f'The {transform.__name__} transform can only be applied on a Module' ' method. That is function that takes a Module instance as its first' ' arg.' ) elif not callable(target): raise ValueError('transform target must be callable') # normalize self.foo bound methods to class.foo unbound methods. targets = tuple(_get_unbound_fn(target) for target in targets) aug_transform = lambda *fns: functools.partial(transform, *fns) return decorator_lift_transform( aug_transform, targets, multi_scope=multi_scope )(mdl, *args, **kwargs) ModuleT = TypeVar('ModuleT', bound=Module) C = TypeVar('C') class FrozenDict(Mapping[K, V]): """An immutable variant of the Python dict.""" __slots__ = ('_dict', '_hash') def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name # make sure the dict is as xs = dict(*args, **kwargs) if __unsafe_skip_copy__: self._dict = xs else: self._dict = _prepare_freeze(xs) self._hash = None def __getitem__(self, key): v = self._dict[key] if isinstance(v, dict): return FrozenDict(v) return v def __setitem__(self, key, value): raise ValueError('FrozenDict is immutable.') def __contains__(self, key): return key in self._dict def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __repr__(self): return self.pretty_repr() def __reduce__(self): return FrozenDict, (self.unfreeze(),) def pretty_repr(self, num_spaces=4): """Returns an indented representation of the nested dictionary.""" def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})' def __hash__(self): if self._hash is None: h = 0 for key, value in self.items(): h ^= hash((key, value)) self._hash = h return self._hash def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': """Create a new FrozenDict with additional or replaced entries.""" return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type] def keys(self): return FrozenKeysView(self) def values(self): return FrozenValuesView(self) def items(self): for key in self._dict: yield (key, self[key]) def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: """Create a new FrozenDict where one entry is removed. Example:: >>> from flax.core import FrozenDict >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables, params = variables.pop('params') Args: key: the key to remove from the dict Returns: A pair with the new FrozenDict and the removed value. """ value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value def unfreeze(self) -> Dict[K, V]: """Unfreeze this FrozenDict. Returns: An unfrozen version of this FrozenDict instance. """ return unfreeze(self) def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]: """Flattens this FrozenDict. Returns: A flattened version of this FrozenDict instance. """ sorted_keys = sorted(self._dict) return tuple( [(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys] ), tuple(sorted_keys) def tree_unflatten(cls, keys, values): # data is already deep copied due to tree map mechanism # we can skip the deep copy in the constructor return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True) CollectionFilter = Filter PRNGSequenceFilter = Filter The provided code snippet includes necessary dependencies for implementing the `while_loop` function. Write a Python function `def while_loop( cond_fn: Callable[[ModuleT, C], bool], body_fn: Callable[[ModuleT, C], C], mdl: ModuleT, init: C, carry_variables: CollectionFilter = False, broadcast_variables: CollectionFilter = True, split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict(), ) -> C` to solve the following problem: Lifted version of jax.lax.while_loop. The lifted scope is passed to ``cond_fn`` and ``body_fn``. Broadcasted variables are immutable. The carry variable are mutable but cannot change shape and dtype. This also means you cannot initialize variables inside the body. Consider calling ``body_fn`` once manually before calling ``while_loop`` if variable initialization is required. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class WhileLoopExample(nn.Module): ... @nn.compact ... def __call__(self, x): ... def cond_fn(mdl, c): ... return mdl.variables['state']['acc'] < 10 ... def body_fn(mdl, c): ... acc = mdl.variable('state', 'acc', lambda: jnp.array(0)) ... acc.value += 1 ... y = nn.Dense(c.shape[-1])(c) ... return y ... c = x ... if self.is_mutable_collection('params'): ... return body_fn(self, c) ... else: ... return nn.while_loop(cond_fn, body_fn, self, c, ... carry_variables='state') >>> k = jax.random.key(0) >>> x = jnp.ones((2, 2)) >>> intial_vars = WhileLoopExample().init(k, x) >>> result, state = WhileLoopExample().apply(intial_vars, x, mutable=['state']) Args: cond_fn: Should return True as long as the loop should continue. body_fn: The body of the while loop. mdl: The Module which should be lifted into the loop. init: The initial state passed to the loop carry_variables: collections that are carried through the loop and are therefore mutable (default: none). broadcast_variables: collections that are closed over and are therefore read-only (default: all collections) split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: The final state after executing the while loop. Here is the function: def while_loop( cond_fn: Callable[[ModuleT, C], bool], body_fn: Callable[[ModuleT, C], C], mdl: ModuleT, init: C, carry_variables: CollectionFilter = False, broadcast_variables: CollectionFilter = True, split_rngs: Mapping[PRNGSequenceFilter, bool] = FrozenDict(), ) -> C: """Lifted version of jax.lax.while_loop. The lifted scope is passed to ``cond_fn`` and ``body_fn``. Broadcasted variables are immutable. The carry variable are mutable but cannot change shape and dtype. This also means you cannot initialize variables inside the body. Consider calling ``body_fn`` once manually before calling ``while_loop`` if variable initialization is required. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class WhileLoopExample(nn.Module): ... @nn.compact ... def __call__(self, x): ... def cond_fn(mdl, c): ... return mdl.variables['state']['acc'] < 10 ... def body_fn(mdl, c): ... acc = mdl.variable('state', 'acc', lambda: jnp.array(0)) ... acc.value += 1 ... y = nn.Dense(c.shape[-1])(c) ... return y ... c = x ... if self.is_mutable_collection('params'): ... return body_fn(self, c) ... else: ... return nn.while_loop(cond_fn, body_fn, self, c, ... carry_variables='state') >>> k = jax.random.key(0) >>> x = jnp.ones((2, 2)) >>> intial_vars = WhileLoopExample().init(k, x) >>> result, state = WhileLoopExample().apply(intial_vars, x, mutable=['state']) Args: cond_fn: Should return True as long as the loop should continue. body_fn: The body of the while loop. mdl: The Module which should be lifted into the loop. init: The initial state passed to the loop carry_variables: collections that are carried through the loop and are therefore mutable (default: none). broadcast_variables: collections that are closed over and are therefore read-only (default: all collections) split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: The final state after executing the while loop. """ return lift_direct_transform( lift.while_loop, (cond_fn, body_fn), mdl, init, carry_variables, broadcast_variables, split_rngs, )
Lifted version of jax.lax.while_loop. The lifted scope is passed to ``cond_fn`` and ``body_fn``. Broadcasted variables are immutable. The carry variable are mutable but cannot change shape and dtype. This also means you cannot initialize variables inside the body. Consider calling ``body_fn`` once manually before calling ``while_loop`` if variable initialization is required. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class WhileLoopExample(nn.Module): ... @nn.compact ... def __call__(self, x): ... def cond_fn(mdl, c): ... return mdl.variables['state']['acc'] < 10 ... def body_fn(mdl, c): ... acc = mdl.variable('state', 'acc', lambda: jnp.array(0)) ... acc.value += 1 ... y = nn.Dense(c.shape[-1])(c) ... return y ... c = x ... if self.is_mutable_collection('params'): ... return body_fn(self, c) ... else: ... return nn.while_loop(cond_fn, body_fn, self, c, ... carry_variables='state') >>> k = jax.random.key(0) >>> x = jnp.ones((2, 2)) >>> intial_vars = WhileLoopExample().init(k, x) >>> result, state = WhileLoopExample().apply(intial_vars, x, mutable=['state']) Args: cond_fn: Should return True as long as the loop should continue. body_fn: The body of the while loop. mdl: The Module which should be lifted into the loop. init: The initial state passed to the loop carry_variables: collections that are carried through the loop and are therefore mutable (default: none). broadcast_variables: collections that are closed over and are therefore read-only (default: all collections) split_rngs: Split PRNG sequences will be different for each loop iterations. If split is False the PRNGs will be the same across iterations. Returns: The final state after executing the while loop.
22,616
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax def _derive_profiling_name(module, fn): fn_name = _get_fn_name(fn) method_suffix = f'.{fn_name}' if fn_name != '__call__' else '' module_name = module.name or module.__class__.__name__ return f'{module_name}{method_suffix}' The provided code snippet includes necessary dependencies for implementing the `named_call` function. Write a Python function `def named_call(class_fn, force=True)` to solve the following problem: Labels a method for labelled traces in profiles. Note that it is better to use the `jax.named_scope` context manager directly to add names to JAX's metadata name stack. Args: class_fn: The class method to label. force: If True, the named_call transform is applied even if it is globally disabled. (e.g.: by calling `flax.linen.disable_named_call()`) Returns: A wrapped version of ``class_fn`` that is labeled. Here is the function: def named_call(class_fn, force=True): """Labels a method for labelled traces in profiles. Note that it is better to use the `jax.named_scope` context manager directly to add names to JAX's metadata name stack. Args: class_fn: The class method to label. force: If True, the named_call transform is applied even if it is globally disabled. (e.g.: by calling `flax.linen.disable_named_call()`) Returns: A wrapped version of ``class_fn`` that is labeled. """ # We use JAX's dynamic name-stack named_call. No transform boundary needed! @functools.wraps(class_fn) def wrapped_fn(self, *args, **kwargs): if (not force and not linen_module._use_named_call) or self._state.in_setup: # pylint: disable=protected-access # pylint: disable=protected-access return class_fn(self, *args, **kwargs) full_name = _derive_profiling_name(self, class_fn) return jax.named_call(class_fn, name=full_name)(self, *args, **kwargs) return wrapped_fn
Labels a method for labelled traces in profiles. Note that it is better to use the `jax.named_scope` context manager directly to add names to JAX's metadata name stack. Args: class_fn: The class method to label. force: If True, the named_call transform is applied even if it is globally disabled. (e.g.: by calling `flax.linen.disable_named_call()`) Returns: A wrapped version of ``class_fn`` that is labeled.
22,617
import dataclasses import functools import inspect from typing import ( Any, Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from flax import core from flax import errors, struct, traceback_util from flax import serialization from flax.core import Scope, lift, meta from flax.core.frozen_dict import FrozenDict from flax.core.scope import ( CollectionFilter, PRNGSequenceFilter, ) from flax.ids import FlaxId from flax.linen import module as linen_module from flax.linen.module import ( Module, Variable, _derive_profiling_name, _get_unbound_fn, wrap_method_once, ) from flax.typing import ( InOutAxis, InOutScanAxis, ) import jax Target = TypeVar('Target', bound=TransformTarget) def map_variables( target: Target, mapped_collections: CollectionFilter = True, trans_in_fn: Callable[..., Any] = lift.id_fn, trans_out_fn: Callable[..., Any] = lift.id_fn, init: bool = False, mutable: bool = False, rngs: PRNGSequenceFilter = True, variables: CollectionFilter = True, methods=None, ) -> Target: """Map Variables inside a module. ``map_variables`` can be used to transform the variables inside a module both before and after the module is applied. This is useful among other things for masking the weights of a module without having to modify the module itself. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn ... >>> class CausalDense(nn.Module): ... '''A dense layer that masks the weights such that the output is ... causal, i.e. output i only depends on input <= i. ... ''' ... features: int ... ... def apply_mask(self, variables): ... return (jax.tree_util.tree_map(jnp.triu, variables) ... if not self.is_initializing() else variables) ... ... def setup(self): ... # temporary class ... _CausalDense = nn.map_variables( ... nn.Dense, 'params', self.apply_mask, init=self.is_initializing()) ... self.dense = _CausalDense(features=self.features, use_bias=False) ... ... def __call__(self, x): ... return self.dense(x) ... >>> module = CausalDense(features=5) >>> variables = module.init(jax.random.key(0), jnp.ones((1, 5))) Args: target: the module or function to be transformed. mapped_collections: the collection(s) to be transformed. trans_in_fn: modifies the variables before applying the module or function. trans_out_fn: modifies the variables after applying the module or function, it is only applied if either ``init`` or ``mutable`` are not False. init: If True, variables are initialized before transformation. mutable: If True, the mapped variable collections will be mutable. rngs: PRNGSequences added to the transformed scope (default: all). variables: Additional Variable collections added to the transformed scope. Besides those specified by ``target`` (default: all). methods: If ``target`` is a ``Module``, the methods of ``Module`` to map variables for. Returns: a wrapped version of ``target`` that will map the specified collections. """ return lift_transform( lift.map_variables, target, mapped_collections, trans_in_fn, trans_out_fn, init, mutable, rngs, variables, methods=methods, ) class FrozenDict(Mapping[K, V]): """An immutable variant of the Python dict.""" __slots__ = ('_dict', '_hash') def __init__(self, *args, __unsafe_skip_copy__=False, **kwargs): # pylint: disable=invalid-name # make sure the dict is as xs = dict(*args, **kwargs) if __unsafe_skip_copy__: self._dict = xs else: self._dict = _prepare_freeze(xs) self._hash = None def __getitem__(self, key): v = self._dict[key] if isinstance(v, dict): return FrozenDict(v) return v def __setitem__(self, key, value): raise ValueError('FrozenDict is immutable.') def __contains__(self, key): return key in self._dict def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict) def __repr__(self): return self.pretty_repr() def __reduce__(self): return FrozenDict, (self.unfreeze(),) def pretty_repr(self, num_spaces=4): """Returns an indented representation of the nested dictionary.""" def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})' def __hash__(self): if self._hash is None: h = 0 for key, value in self.items(): h ^= hash((key, value)) self._hash = h return self._hash def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': """Create a new FrozenDict with additional or replaced entries.""" return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type] def keys(self): return FrozenKeysView(self) def values(self): return FrozenValuesView(self) def items(self): for key in self._dict: yield (key, self[key]) def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: """Create a new FrozenDict where one entry is removed. Example:: >>> from flax.core import FrozenDict >>> variables = FrozenDict({'params': {...}, 'batch_stats': {...}}) >>> new_variables, params = variables.pop('params') Args: key: the key to remove from the dict Returns: A pair with the new FrozenDict and the removed value. """ value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value def unfreeze(self) -> Dict[K, V]: """Unfreeze this FrozenDict. Returns: An unfrozen version of this FrozenDict instance. """ return unfreeze(self) def tree_flatten_with_keys(self) -> Tuple[Tuple[Any, ...], Hashable]: """Flattens this FrozenDict. Returns: A flattened version of this FrozenDict instance. """ sorted_keys = sorted(self._dict) return tuple( [(jax.tree_util.DictKey(k), self._dict[k]) for k in sorted_keys] ), tuple(sorted_keys) def tree_unflatten(cls, keys, values): # data is already deep copied due to tree map mechanism # we can skip the deep copy in the constructor return cls({k: v for k, v in zip(keys, values)}, __unsafe_skip_copy__=True) CollectionFilter = Filter InOutAxis = Union[Axis, In[Axis], Out[Axis]] The provided code snippet includes necessary dependencies for implementing the `add_metadata_axis` function. Write a Python function `def add_metadata_axis( target: Target, variable_axes: Mapping[CollectionFilter, InOutAxis] = FrozenDict(), metadata_params: Dict[Any, Any] = {}, ) -> Target` to solve the following problem: A helper to manipulate boxed axis metadata. This is a helper to manipulate the *metadata* in boxed variables, similar to how lifted ``vmap`` and ``scan`` will handle the introduction and stripping of the new metadata axis across a transform boundary. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections whose axis metadata is being transformed. Use `None` to indicate a broadcasted collection or an integer to specify an axis index for an introduced axis. methods: If `target` is a `Module`, the methods of `Module` to vmap over. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A transformed version of ``target`` that performs a transform of the axis metadata on its variables. Here is the function: def add_metadata_axis( target: Target, variable_axes: Mapping[CollectionFilter, InOutAxis] = FrozenDict(), metadata_params: Dict[Any, Any] = {}, ) -> Target: """A helper to manipulate boxed axis metadata. This is a helper to manipulate the *metadata* in boxed variables, similar to how lifted ``vmap`` and ``scan`` will handle the introduction and stripping of the new metadata axis across a transform boundary. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections whose axis metadata is being transformed. Use `None` to indicate a broadcasted collection or an integer to specify an axis index for an introduced axis. methods: If `target` is a `Module`, the methods of `Module` to vmap over. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A transformed version of ``target`` that performs a transform of the axis metadata on its variables. """ def add_fn(axis): return lambda x: meta.add_axis(x, axis, metadata_params) def remove_fn(axis): return lambda x: meta.remove_axis(x, axis, metadata_params) for col_name, axis in variable_axes.items(): target = map_variables( target, col_name, trans_in_fn=remove_fn(axis), trans_out_fn=add_fn(axis), mutable=True, ) return target
A helper to manipulate boxed axis metadata. This is a helper to manipulate the *metadata* in boxed variables, similar to how lifted ``vmap`` and ``scan`` will handle the introduction and stripping of the new metadata axis across a transform boundary. Args: target: a ``Module`` or a function taking a ``Module`` as its first argument. variable_axes: the variable collections whose axis metadata is being transformed. Use `None` to indicate a broadcasted collection or an integer to specify an axis index for an introduced axis. methods: If `target` is a `Module`, the methods of `Module` to vmap over. metadata_params: arguments dict passed to AxisMetadata instances in the variable tree. Returns: A transformed version of ``target`` that performs a transform of the axis metadata on its variables.
22,618
import dataclasses import enum import io from abc import ABC, abstractmethod from types import MappingProxyType from typing import ( Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Union, ) import jax import jax.numpy as jnp import numpy as np import rich.console import rich.table import rich.text import yaml import flax.linen.module as module_lib from flax.core import meta, unfreeze from flax.core.scope import ( CollectionFilter, DenyList, LazyRng, ) from flax.typing import ( Array, PRNGKey, RNGSequences, FrozenVariableDict, MutableVariableDict, LogicalNames, ) def _get_module_table( module: module_lib.Module, depth: Optional[int], show_repeated: bool, compute_flops: bool, compute_vjp_flops: bool, ) -> Callable[..., Table]: """A function that takes a Module and returns function with the same signature as `init` but returns the Table representation of the Module.""" def _get_table_fn(*args, **kwargs): with module_lib._tabulate_context(): def _get_variables(): return module.init(*args, **kwargs) variables = jax.eval_shape(_get_variables) calls = module_lib._context.call_info_stack[-1].calls calls.sort(key=lambda c: c.index) collections: Set[str] = set(variables.keys()) rows = [] all_paths: Set[Tuple[str, ...]] = set(call.path for call in calls) visited_paths: Set[Tuple[str, ...]] = set() for c in calls: call_depth = len(c.path) inputs = _process_inputs(c.args, c.kwargs) if c.path in visited_paths: if not show_repeated: continue module_vars = {} counted_vars = {} elif depth is not None: if call_depth > depth: continue module_vars, _ = _get_module_variables(c.path, variables, all_paths) if call_depth == depth: counted_vars = _get_path_variables(c.path, variables) else: counted_vars = module_vars else: module_vars, _ = _get_module_variables(c.path, variables, all_paths) counted_vars = module_vars visited_paths.add(c.path) rows.append( Row( c.path, c.module.copy(parent=None), c.method, inputs, c.outputs, module_vars, counted_vars, *_get_call_flops(c, compute_flops, compute_vjp_flops), ) ) return Table(module, tuple(collections), rows) return _get_table_fn def _render_table( table: Table, console_extras: Optional[Mapping[str, Any]], table_kwargs: Mapping[str, Any], column_kwargs: Mapping[str, Any], non_params_cols: List[str], ) -> str: """A function that renders a Table to a string representation using rich.""" console_kwargs = {'force_terminal': True, 'force_jupyter': False} if console_extras is not None: console_kwargs.update(console_extras) rich_table = rich.table.Table( show_header=True, show_lines=True, show_footer=True, title=f'{table.module.__class__.__name__} Summary', **table_kwargs, ) for c in non_params_cols: rich_table.add_column(c, **column_kwargs) for col in table.collections: rich_table.add_column(col, **column_kwargs) for row in table: collections_size_repr = [] for collection, size_bytes in row.size_and_bytes(table.collections).items(): col_repr = '' if collection in row.module_variables: module_variables = _represent_tree(row.module_variables[collection]) module_variables = _normalize_structure(module_variables) col_repr += _as_yaml_str( _summary_tree_map(_maybe_render, module_variables) ) if col_repr: col_repr += '\n\n' col_repr += f'[bold]{_size_and_bytes_repr(*size_bytes)}[/bold]' collections_size_repr.append(col_repr) no_show_methods = {'__call__', '<lambda>'} path_repr = '/'.join(row.path) method_repr = ( f' [dim]({row.method})[/dim]' if row.method not in no_show_methods else '' ) rich_table.add_row( path_repr, type(row.module_copy).__name__ + method_repr, *( _as_yaml_str( _summary_tree_map( _maybe_render, _normalize_structure(getattr(row, c)) ) ) for c in non_params_cols[2:] ), *collections_size_repr, ) # add footer with totals n_non_params_cols = len(non_params_cols) rich_table.columns[n_non_params_cols - 1].footer = rich.text.Text.from_markup( 'Total', justify='right' ) # get collection totals collection_total = {col: (0, 0) for col in table.collections} for row in table: for col, size_bytes in row.size_and_bytes(table.collections).items(): collection_total[col] = ( collection_total[col][0] + size_bytes[0], collection_total[col][1] + size_bytes[1], ) # add totals to footer for i, col in enumerate(table.collections): rich_table.columns[n_non_params_cols + i].footer = _size_and_bytes_repr( *collection_total[col] ) # add final totals to caption caption_totals = (0, 0) for size, num_bytes in collection_total.values(): caption_totals = ( caption_totals[0] + size, caption_totals[1] + num_bytes, ) rich_table.caption_style = 'bold' rich_table.caption = ( f'\nTotal Parameters: {_size_and_bytes_repr(*caption_totals)}' ) return '\n' + _get_rich_repr(rich_table, console_kwargs) + '\n' class DenyList: """DenyList represents an opt-out based mutability filter. DenyList can be used to make every collection mutable except the ones defined in the given filter. To for example make everything but the params collection mutable:: nn.apply(fn, mutable=nn.DenyList(["params"])) Attributes: deny: The filter representing the collections that are not mutable. """ deny: Filter CollectionFilter = Filter PRNGKey = jax.Array RNGSequences = Dict[str, PRNGKey] The provided code snippet includes necessary dependencies for implementing the `tabulate` function. Write a Python function `def tabulate( module: module_lib.Module, rngs: Union[PRNGKey, RNGSequences], depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> Callable[..., str]` to solve the following problem: Returns a function that creates a summary of the Module represented as a table. This function accepts most of the same arguments and internally calls `Module.init`, except that it returns a function of the form `(*args, **kwargs) -> str` where `*args` and `**kwargs` are passed to `method` (e.g. `__call__`) during the forward pass. `tabulate` uses `jax.eval_shape` under the hood to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the `console_kwargs` argument, for example, `{'width': 120}`. For a full list of `console_kwargs` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> tabulate_fn = nn.tabulate( ... Foo(), jax.random.key(0), compute_flops=True, compute_vjp_flops=True) >>> # print(tabulate_fn(x)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in `variables` which are sorted alphabetically. **Note**: `vjp_flops` returns `0` if the module is not differentiable. Args: module: The module to tabulate. rngs: The rngs for the variable collections as passed to `Module.init`. depth: controls how many submodule deep the summary can go. By default its `None` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If `True`, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is `False`. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.console.Console` when rendering the table. Default arguments are `{'force_terminal': True, 'force_jupyter': False}`. table_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.table.Table` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.table.Table.add_column` when adding columns to the table. compute_flops: whether to include a `flops` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a `vjp_flops` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of `compute_flops`. **kwargs: Additional arguments passed to `Module.init`. Returns: A function that accepts the same `*args` and `**kwargs` of the forward pass (`method`) and returns a string with a tabular representation of the Modules. Here is the function: def tabulate( module: module_lib.Module, rngs: Union[PRNGKey, RNGSequences], depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> Callable[..., str]: """Returns a function that creates a summary of the Module represented as a table. This function accepts most of the same arguments and internally calls `Module.init`, except that it returns a function of the form `(*args, **kwargs) -> str` where `*args` and `**kwargs` are passed to `method` (e.g. `__call__`) during the forward pass. `tabulate` uses `jax.eval_shape` under the hood to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the `console_kwargs` argument, for example, `{'width': 120}`. For a full list of `console_kwargs` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> tabulate_fn = nn.tabulate( ... Foo(), jax.random.key(0), compute_flops=True, compute_vjp_flops=True) >>> # print(tabulate_fn(x)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in `variables` which are sorted alphabetically. **Note**: `vjp_flops` returns `0` if the module is not differentiable. Args: module: The module to tabulate. rngs: The rngs for the variable collections as passed to `Module.init`. depth: controls how many submodule deep the summary can go. By default its `None` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If `True`, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is `False`. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.console.Console` when rendering the table. Default arguments are `{'force_terminal': True, 'force_jupyter': False}`. table_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.table.Table` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.table.Table.add_column` when adding columns to the table. compute_flops: whether to include a `flops` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a `vjp_flops` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of `compute_flops`. **kwargs: Additional arguments passed to `Module.init`. Returns: A function that accepts the same `*args` and `**kwargs` of the forward pass (`method`) and returns a string with a tabular representation of the Modules. """ # add non-default arguments to kwargs, this prevents some issue we overloading init # see: https://github.com/google/flax/issues/3299 if mutable != DenyList('intermediates'): kwargs['mutable'] = mutable def _tabulate_fn(*fn_args, **fn_kwargs): table_fn = _get_module_table( module, depth=depth, show_repeated=show_repeated, compute_flops=compute_flops, compute_vjp_flops=compute_vjp_flops, ) table = table_fn(rngs, *fn_args, **fn_kwargs, **kwargs) non_param_cols = [ 'path', 'module', 'inputs', 'outputs', ] if compute_flops: non_param_cols.append('flops') if compute_vjp_flops: non_param_cols.append('vjp_flops') return _render_table( table, console_kwargs, table_kwargs, column_kwargs, non_param_cols ) return _tabulate_fn
Returns a function that creates a summary of the Module represented as a table. This function accepts most of the same arguments and internally calls `Module.init`, except that it returns a function of the form `(*args, **kwargs) -> str` where `*args` and `**kwargs` are passed to `method` (e.g. `__call__`) during the forward pass. `tabulate` uses `jax.eval_shape` under the hood to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the `console_kwargs` argument, for example, `{'width': 120}`. For a full list of `console_kwargs` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> tabulate_fn = nn.tabulate( ... Foo(), jax.random.key(0), compute_flops=True, compute_vjp_flops=True) >>> # print(tabulate_fn(x)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in `variables` which are sorted alphabetically. **Note**: `vjp_flops` returns `0` if the module is not differentiable. Args: module: The module to tabulate. rngs: The rngs for the variable collections as passed to `Module.init`. depth: controls how many submodule deep the summary can go. By default its `None` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If `True`, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is `False`. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.console.Console` when rendering the table. Default arguments are `{'force_terminal': True, 'force_jupyter': False}`. table_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.table.Table` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to `rich.table.Table.add_column` when adding columns to the table. compute_flops: whether to include a `flops` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a `vjp_flops` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of `compute_flops`. **kwargs: Additional arguments passed to `Module.init`. Returns: A function that accepts the same `*args` and `**kwargs` of the forward pass (`method`) and returns a string with a tabular representation of the Modules.
22,619
import dataclasses import enum import io from abc import ABC, abstractmethod from types import MappingProxyType from typing import ( Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, Union, ) import jax import jax.numpy as jnp import numpy as np import rich.console import rich.table import rich.text import yaml import flax.linen.module as module_lib from flax.core import meta, unfreeze from flax.core.scope import ( CollectionFilter, DenyList, LazyRng, ) from flax.typing import ( Array, PRNGKey, RNGSequences, FrozenVariableDict, MutableVariableDict, LogicalNames, ) def _size_and_bytes(pytree: Any) -> Tuple[int, int]: leaves = jax.tree_util.tree_leaves(pytree) size = sum(x.size for x in leaves if hasattr(x, 'size')) num_bytes = sum( x.size * x.dtype.itemsize for x in leaves if hasattr(x, 'size') ) return size, num_bytes
null
22,620
import functools import warnings from typing import Any, Callable, Optional, Union, overload import jax import jax.numpy as jnp from jax import lax, random from flax.linen import initializers from flax.linen.dtypes import promote_dtype from flax.linen.linear import ( DenseGeneral, default_kernel_init, ) from flax.linen.module import Module, compact, merge_param from flax.linen.normalization import LayerNorm from flax.typing import ( Array, PRNGKey, Dtype, Shape as Shape, Initializer, PrecisionLike, DotGeneralT, ) def dot_product_attention_weights( query: Array, key: Array, bias: Optional[Array] = None, mask: Optional[Array] = None, broadcast_dropout: bool = True, dropout_rng: Optional[PRNGKey] = None, dropout_rate: float = 0.0, deterministic: bool = False, dtype: Optional[Dtype] = None, precision: PrecisionLike = None, module: Optional[Module] = None, ): """Computes dot-product attention weights given query and key. Used by :func:`dot_product_attention`, which is what you'll most likely use. But if you want access to the attention weights for introspection, then you can directly call this function and call einsum yourself. Args: query: queries for calculating attention with shape of ``[batch..., q_length, num_heads, qk_depth_per_head]``. key: keys for calculating attention with shape of ``[batch..., kv_length, num_heads, qk_depth_per_head]``. bias: bias for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks, padding masks, proximity bias, etc. mask: mask for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks. Attention weights are masked out if their corresponding mask value is ``False``. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) dtype: the dtype of the computation (default: infer from inputs and params) precision: numerical precision of the computation see ``jax.lax.Precision`` for details. module: the Module that will sow the attention weights into the 'intermediates' collection. Remember to mark 'intermediates' as mutable via ``mutable=['intermediates']`` in order to have that collection returned. If ``module`` is None, the attention weights will not be sowed. Returns: Output of shape ``[batch..., num_heads, q_length, kv_length]``. """ query, key = promote_dtype(query, key, dtype=dtype) dtype = query.dtype assert query.ndim == key.ndim, 'q, k must have same rank.' assert query.shape[:-3] == key.shape[:-3], 'q, k batch dims must match.' assert query.shape[-2] == key.shape[-2], 'q, k num_heads must match.' assert query.shape[-1] == key.shape[-1], 'q, k depths must match.' # calculate attention matrix depth = query.shape[-1] query = query / jnp.sqrt(depth).astype(dtype) # attn weight shape is (batch..., num_heads, q_length, kv_length) attn_weights = jnp.einsum( '...qhd,...khd->...hqk', query, key, precision=precision ) # apply attention bias: masking, dropout, proximity bias, etc. if bias is not None: attn_weights = attn_weights + bias # apply attention mask if mask is not None: big_neg = jnp.finfo(dtype).min attn_weights = jnp.where(mask, attn_weights, big_neg) # normalize the attention weights attn_weights = jax.nn.softmax(attn_weights).astype(dtype) if module: module.sow('intermediates', 'attention_weights', attn_weights) # apply attention dropout if not deterministic and dropout_rate > 0.0: keep_prob = 1.0 - dropout_rate if broadcast_dropout: # dropout is broadcast across the batch + head dimensions dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:] keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape) # type: ignore else: keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape) # type: ignore multiplier = keep.astype(dtype) / jnp.asarray(keep_prob, dtype=dtype) attn_weights = attn_weights * multiplier return attn_weights def promote_dtype(*args, dtype=None, inexact=True) -> List[Any]: """ "Promotes input arguments to a specified or inferred dtype. All args are cast to the same dtype. See ``canonicalize_dtype`` for how this dtype is determined. The behavior of promote_dtype is mostly a convinience wrapper around ``jax.numpy.promote_types``. The differences being that it automatically casts all input to the inferred dtypes, allows inference to be overridden by a forced dtype, and has an optional check to garantuee the resulting dtype is inexact. Args: *args: JAX array compatible values. None values are returned as is. dtype: Optional dtype override. If specified the arguments are cast to the specified dtype instead and dtype inference is disabled. inexact: When True, the output dtype must be a subdtype of `jnp.inexact`. Inexact dtypes are real or complex floating points. This is useful when you want to apply operations that don't work directly on integers like taking a mean for example. Returns: The arguments cast to arrays of the same dtype. """ dtype = canonicalize_dtype(*args, dtype=dtype, inexact=inexact) return [jnp.asarray(x, dtype) if x is not None else None for x in args] class Module(ModuleBase): """Base class for all neural network modules. Layers and models should subclass this class. All Flax Modules are Python 3.7 `dataclasses <https://docs.python.org/3/library/dataclasses.html>`_. Since dataclasses take over ``__init__``, you should instead override :meth:`setup`, which is automatically called to initialize the module. Modules can contain submodules, and in this way can be nested in a tree structure. Submodels can be assigned as regular attributes inside the :meth:`setup` method. You can define arbitrary "forward pass" methods on your Module subclass. While no methods are special-cased, ``__call__`` is a popular choice because it allows you to use module instances as if they are functions:: >>> from flax import linen as nn >>> from typing import Tuple >>> class Module(nn.Module): ... features: Tuple[int, ...] = (16, 4) ... def setup(self): ... self.dense1 = nn.Dense(self.features[0]) ... self.dense2 = nn.Dense(self.features[1]) ... def __call__(self, x): ... return self.dense2(nn.relu(self.dense1(x))) Optionally, for more concise module implementations where submodules definitions are co-located with their usage, you can use the :meth:`compact` wrapper. """ if typing.TYPE_CHECKING: name: Optional[str] = module_field(kw_only=True, default=None) parent: Union['Module', _Sentinel, None] = module_field( kw_only=True, default=None ) def __init__(self, *args, **kwargs): # this stub makes sure pytype accepts constructor arguments. pass def __call__(self, *args, **kwargs) -> Any: # this stub allows pytype to accept Modules as Callables. pass def __init_subclass__(cls, kw_only: bool = False, **kwargs: Any) -> None: """Automatically initializes all subclasses as custom dataclasses.""" super().__init_subclass__(**kwargs) # All Flax Modules are dataclasses. We force this convention since # it encourages the stateless behavior needed to clone module instances for # functional transformation. Instead of using a python metaclass, we # automatically transform Modules into dataclasses at subclass creation # time, and we set the last dataclass arguments to `parent` and `name`. cls._customized_dataclass_transform(kw_only) # We wrap user-defined methods including setup and __call__ to enforce # a number of different checks and to provide clear error messages. cls._verify_single_or_no_compact() cls._find_compact_name_scope_methods() cls._wrap_module_attributes() # Set empty class defaults. cls._state = _uninitialized_module_internal_state # type: ignore[attr-defined] cls.scope: Optional[Scope] = None # type: ignore # Handles weak referencing of parent Modules to prevent reference cycles. cls._parent_ref = None # type: ignore[attr-defined] cls.parent = ParentDescriptor() # type: ignore[assignment] def _customized_dataclass_transform(cls, kw_only: bool): """Transforms `cls` into a dataclass, with custom additional behavior. 1. Inject `parent` and `name` fields. (If they are already present, then check that they have the expected types.) 2. Set compare, hash, and repr to False for non-init fields. 3. Generate a hash function (if not provided by cls). """ # Check reserved attributes have expected type annotations. annotations = dict(cls.__dict__.get('__annotations__', {})) if annotations.get('parent', _ParentType) != _ParentType: raise errors.ReservedModuleAttributeError(annotations) if annotations.get('name', str) not in ('str', str, Optional[str]): raise errors.ReservedModuleAttributeError(annotations) # any non-init field will only be set in setup # During __hash__ and __eq__ the field is not set yet # so it should not be used in compare, hash or repr. for field in annotations: field_meta = getattr(cls, field, None) if isinstance(field_meta, dataclasses.Field) and not field_meta.init: field_meta.compare = False field_meta.hash = False field_meta.repr = False extra_fields = [ ( 'parent', _ParentType, kw_only_dataclasses.field( repr=False, default=_unspecified_parent, kw_only=True ), ), ( 'name', Optional[str], kw_only_dataclasses.field(default=None, kw_only=True), ), ] if kw_only: if tuple(sys.version_info)[:3] >= (3, 10, 0): for ( name, annotation, # pytype: disable=invalid-annotation default, ) in extra_fields: setattr(cls, name, default) cls.__annotations__[name] = annotation dataclasses.dataclass( # type: ignore[call-overload] unsafe_hash='__hash__' not in cls.__dict__, repr=False, kw_only=True, )(cls) else: raise TypeError('`kw_only` is not available before Py 3.10.') else: # Now apply dataclass transform (which operates in-place). # Do generate a hash function only if not provided by the class. kw_only_dataclasses.dataclass( cls, unsafe_hash='__hash__' not in cls.__dict__, repr=False, extra_fields=extra_fields, ) # pytype: disable=wrong-keyword-args cls.__hash__ = _wrap_hash(cls.__hash__) # type: ignore[method-assign] def _verify_single_or_no_compact(cls): """Statically verifies that at most a single method is labelled compact.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] n_compact_fns = len( [ method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact') ] ) if n_compact_fns > 1: raise errors.MultipleMethodsCompactError() def _find_compact_name_scope_methods(cls): """Finds all compact_name_scope methods in the class.""" methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)] compact_name_scope_fns = tuple( method_name for method_name in methods if hasattr(getattr(cls, method_name), 'compact_name_scope') ) cls._compact_name_scope_methods = compact_name_scope_fns def _wrap_module_attributes(cls): """Wraps user-defined non-inherited methods and descriptors with state management functions. """ # wrap methods method_exclusions = [f.name for f in dataclasses.fields(cls)] + [ '__eq__', '__repr__', '__init__', '__hash__', '__post_init__', ] for key in _get_local_method_names(cls, exclude=method_exclusions): method = getattr(cls, key) if hasattr(method, 'nowrap'): continue setattr(cls, key, wrap_method_once(method)) # wrap descriptors descriptor_exclusions = [f.name for f in dataclasses.fields(cls)] + [ 'parent', '__dict__', ] for key in _get_local_descriptor_names(cls, descriptor_exclusions): # don't use getattr here, since it will call the descriptor descriptor = cls.__dict__[key] if hasattr(descriptor, 'nowrap'): continue setattr(cls, key, wrap_descriptor_once(descriptor)) return cls def _call_wrapped_method(self, fun, args, kwargs): """Calls a wrapped method. This function is responsible for setting up the thread local state correctly before calling the method and cleaning up afterwards. This includes storing intermediates, setup of the compact scope, and making sure setup is called before any other method. Args: fun: The wrapped method. args: Named arguments passed to ``fun``. kwargs: Keyword arguments passed to ``fun``. Returns: The results of calling ``fun``. """ is_compact_method = hasattr(fun, 'compact') fun_name = _get_fn_name(fun) is_setup_method = fun_name == 'setup' add_call_info = not is_setup_method and len(_context.call_info_stack) > 0 # We lazily call setup() only when needed. if is_setup_method: if self.scope is None: raise errors.CallSetupUnboundModuleError() is_recurrent = self._state.in_setup self._state.in_setup = True else: self._try_setup() if is_compact_method: if self.scope is None: raise errors.CallCompactUnboundModuleError() is_recurrent = self._state.in_compact_method self._state.in_compact_method = True _context.module_stack.append(self) try: # get call info if add_call_info: assert self.scope is not None call_index = _context.call_info_stack[-1].get_call_index() if _global_interceptor_stack: run_fun = functools.partial(run_interceptors, fun) else: run_fun = fun # call method if _use_named_call: with jax.named_scope(_derive_profiling_name(self, fun)): y = run_fun(self, *args, **kwargs) else: y = run_fun(self, *args, **kwargs) if _context.capture_stack: filter_fn = _context.capture_stack[-1] if filter_fn and filter_fn(self, fun_name): self.sow('intermediates', fun_name, y) if add_call_info: _args, _kwargs, _y = flax.linen.summary._represent_tree( (args, kwargs, y) ) _context.call_info_stack[-1].calls.append( _CallInfo( call_index, self.path, self.clone(), self.scope.rngs, self.scope.mutable, fun.__name__, _args, _kwargs, _y, ) ) return y finally: _context.module_stack.pop() if is_compact_method: object.__setattr__(self, 'scope', self.scope.rewound()) # setup or compact calls can be recurrent for example due to super calls # resetting the state would cause is compact/setup method # to be set to False prematurely. if (is_compact_method or is_setup_method) and not is_recurrent: self._state.reset() def __setattr__(self, name: str, val: Any): """Sets an attribute on this Module. We overload setattr solely to support pythonic naming via assignment of submodules in the special :meth:`setup` function:: self.submodule_name = MyModule(...) We also support lists and other general pytrees, e.g.:: self.submodules = [MyModule0(..), MyModule1(..), ...] Args: name: Attribute to set. val: Value of the attribute. """ fields = self.__dataclass_fields__ # pytype: disable=attribute-error is_dataclass_attr = name in fields and fields[name].init if not self._state.in_setup: if not self._state.is_initialized: # Setting attributes before end of Module.__post_init__() object.__setattr__(self, name, val) return else: # We're past all initialization and setup logic: # Raises a TypeError just like frozen python dataclasses. raise errors.SetAttributeFrozenModuleError( self.__class__.__name__, name, val ) # We're inside the setup() method: if is_dataclass_attr: # These names are specified as dataclass fields. They should not be # initialized within the setup() method, but can be modified freely # before it. raise errors.SetAttributeInModuleSetupError() # Values (that may be variables or submodules) are being defined and # attached in setup(), we run some extra logic in that case. self._register_submodules(name, val) def __getattr__(self, name: str) -> Any: """Call setup() before getting any setup-defined attributes.""" # We don't want to return anything for python copy / pickle methods. if name in _UNDEFINED_COPY_PICKLE_METHODS: raise AttributeError() self._try_setup() if name in self.__dict__: return self.__dict__[name] else: msg = f'"{self.__class__.__name__}" object has no attribute "{name}".' if self.scope is None: msg += ( f' If "{name}" is defined in \'.setup()\', remember these fields ' "are only accessible from inside 'init' or 'apply'." ) raise AttributeError(msg) def __dir__(self) -> List[str]: """Call setup() before listing attributes.""" self._try_setup() return object.__dir__(self) # type: ignore def __post_init__(self) -> None: # DO NOT REMOVE - Marker for internal logging. # In dataclasses, __init__ is overridden to process dataclass arguments, # and __post_init__ is called immediately afterwards. Here, depending on the # type of `parent` passed to initialize the Module, we either defer # initialization, attach this Module as a submodule of a parent, or bind # this Module at the top-level to variables and rngs. object.__setattr__(self, '_id', uuid()) object.__setattr__(self, '_state', _ModuleInternalState()) # Typically we set the parent based on the dynamic module context. if self.parent is _unspecified_parent: # pytype: disable=attribute-error object.__setattr__(self, 'parent', _context.module_stack[-1]) # Initialization is deferred for top level Modules or any other "orphan" # Modules until attachment by __setattr__ i.e. MyModule(..., parent=None) if self.parent is None: return # Register submodule on parent Module. if isinstance(self.parent, Module): # When initializing an unnamed Module inside setup() # initialization is deferred until attachment by __setattr__ # i.e. self.mymodule = MyModule(...) self.name: Optional[str] if ( self.parent._state.in_setup and self.name is None ): # pytype: disable=attribute-error return if not self.parent._initialization_allowed: raise errors.AssignSubModuleError(self.__class__.__name__) # Autonaming of submodules. if self.name is None: # pytype: disable=attribute-error prefix = f'{self.__class__.__name__}' cursor = self.parent._state.autoname_cursor.get(prefix, 0) self.name = f'{prefix}_{cursor}' self.parent._state.autoname_cursor[prefix] = cursor + 1 # Allow scope aliasing under transforms for submodules defined in setup. reuse_scopes = ( self.parent._state.in_setup and self.parent._state.setup_called == SetupState.TRANSFORMED ) # Perform name-collision check. if self.parent._name_taken(self.name, reuse_scopes=reuse_scopes): parent_class = self.parent.__class__.__name__ raise errors.NameInUseError('submodule', self.name, parent_class) # Finalize attachment to parent and scope initialization. self.parent._state.children[self.name] = self assert self.parent.scope is not None object.__setattr__( self, 'scope', self.parent.scope.push(self.name, reuse=reuse_scopes) ) # Top-level invocation with a functional Scope. elif isinstance(self.parent, Scope): object.__setattr__(self, 'scope', self.parent) else: raise ValueError('parent must be None, Module or Scope') # eagerly bind submodules if scope is available if self.scope is not None: for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) self._state.is_initialized = True def __repr__(self) -> str: return _module_repr(self) def setup(self) -> None: """Initializes a Module lazily (similar to a lazy ``__init__``). ``setup`` is called once lazily on a module instance when a module is bound, immediately before any other methods like ``__call__`` are invoked, or before a ``setup``-defined attribute on ``self`` is accessed. This can happen in three cases: 1. Immediately when invoking :meth:`apply`, :meth:`init` or :meth:`init_and_output`. 2. Once the module is given a name by being assigned to an attribute of another module inside the other module's ``setup`` method (see :meth:`__setattr__`):: >>> class MyModule(nn.Module): ... def setup(self): ... submodule = nn.Conv(...) ... # Accessing `submodule` attributes does not yet work here. ... # The following line invokes `self.__setattr__`, which gives ... # `submodule` the name "conv1". ... self.conv1 = submodule ... # Accessing `submodule` attributes or methods is now safe and ... # either causes setup() to be called once. 3. Once a module is constructed inside a method wrapped with :meth:`compact`, immediately before another method is called or ``setup`` defined attribute is accessed. """ pass def _register_submodules(self, name, val): """Registers a submodule.""" assert self.scope, 'Trying to register submodules on unbound scope.' root = self.scope.root cache = _caches.get(root, weakref.WeakValueDictionary()) _caches[root] = cache queue = [] preserve_adopted_names = config.flax_preserve_adopted_names if hasattr(type(self), 'preserve_adopted_names'): preserve_adopted_names = type(self).preserve_adopted_names def adopt_attr_modules(cache, queue, suffix, subvalue): if isinstance(subvalue, Module): current_name = subvalue.name adopted_name = None if subvalue.parent is None: # Preserve sharing-by-reference relationships during adoption # via cache keyed on unique instance ids. key = subvalue._id # Module was passed from outside. It needs to be cloned. # Outside modules are named by attachment, not an outer name, # UNLESS we're using new adopted name policy, in which case an existing # name will be used, as is often supplied by config systems. if preserve_adopted_names: adopted_name = object.__getattribute__(subvalue, 'name') if key in cache: subvalue = cache[key] else: subvalue = subvalue.clone(name=None) cache[key] = subvalue if subvalue.name is None: object.__setattr__(subvalue, 'parent', self) if adopted_name is None: adopted_name = ( f'{name}{suffix}' if not isinstance(subvalue, CompactNameScope) else current_name ) object.__setattr__(subvalue, 'name', adopted_name) queue.append(subvalue) return subvalue val = _freeze_attr( _map_over_modules_in_tree( functools.partial(adopt_attr_modules, cache, queue), val ) ) object.__setattr__(self, name, val) for x in queue: x.__post_init__() def _try_setup(self, shallow: bool = False) -> None: """Tries to setup module if scope is available and setup has not been called yet.""" if ( self.scope and not self._state.in_setup and self._state.setup_called != SetupState.DONE ): try: self._state.in_setup = True # A shallow setup will only register attribute submodules but it does # not call the user's setup. This avoids running before a # transformation. for field in dataclasses.fields(self): if field.name not in ('parent', 'name') and field.init: self._register_submodules(field.name, getattr(self, field.name)) if not shallow: self.setup() # create NonTransparent Modules self._compact_name_scope_modules = { name: CompactNameScope( getattr(type(self), name).inner_fun, lambda: self, name=name ) for name in self._compact_name_scope_methods } # We run static checks abstractly once for setup before any transforms # to detect name collisions and other python errors. elif self._state.setup_called == SetupState.NEW: self._validate_setup() finally: self._state.in_setup = False if not shallow: self._state.setup_called = SetupState.DONE def _validate_setup(self) -> None: """Abstractly evaluates setup only to run static checks.""" def run_setup_only(x): wrapped_id = wrap_method_once(lambda m, x: x) with TestScope({}, rngs={}, mutable=True).temporary() as root: return wrapped_id(self.clone(parent=root), x) _ = jax.eval_shape(run_setup_only, 0) def _name_taken( self, name: str, reuse_scopes: bool = False, collection: Optional[str] = None, ) -> bool: assert self.scope is not None if reuse_scopes: return False return self.scope.name_reserved(name, collection) def _initialization_allowed(self): return ( not self._state.is_initialized # allow eager attachment in post-init or self._state.in_setup or self._state.in_compact_method ) def path(self): if self.scope is None: raise ValueError("Can't access module paths on unbound modules.") return self.scope.path def clone( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = None, _deep_clone: Union[bool, weakref.WeakValueDictionary] = False, _reset_names: bool = False, **updates, ) -> M: """Creates a clone of this Module, with optionally updated arguments. NOTE: end users are encouraged to use the ``copy`` method. ``clone`` is used primarily for internal routines, and ``copy`` offers simpler arguments and better defaults. Args: parent: The parent of the clone. The clone will have no parent if no explicit parent is specified. _deep_clone: A boolean or a weak value dictionary to control deep cloning of submodules. If True, submodules will be cloned recursively. If a weak value dictionary is passed, it will be used to cache cloned submodules. This flag is used by init/apply/bind to avoid scope leakage. _reset_names: If True, ``name=None`` is also passed to submodules when cloning. Resetting names in submodules is necessary when calling ``.unbind``. **updates: Attribute updates. Returns: A clone of the this Module with the updated attributes and parent. """ attrs = { f.name: getattr(self, f.name) for f in dataclasses.fields(self) if f.init } attrs.update(parent=parent, **updates) # Here we implement deep cloning of submodules, this is necessary to avoid scope leakage # from external submodules into init/apply/bind while preserving sharing-by-reference # relationships between submodules. if _deep_clone != False: # We use a weak value dictionary to cache cloned submodules. When a shared # submodule is cloned, its only cloned once else its fetched from the cache. cache = ( weakref.WeakValueDictionary() if isinstance(_deep_clone, bool) else _deep_clone ) def clone_fn(m: Module) -> Module: if hasattr(m, '_id'): key = m._id if key in cache: return cache[key] else: if _reset_names: clone = m.clone( _deep_clone=cache, _reset_names=_reset_names, name=None ) else: clone = m.clone(_deep_clone=cache) cache[key] = clone return clone else: # If the module doesn't have an _id attribute it could be a mock object # so we return it as is. return m # _map_submodules will map over all submodules inside attrs # value here can be any pytree, non-module values are ignored for field_name, value in attrs.items(): if field_name == 'parent': continue attrs[field_name] = _map_submodules(clone_fn, value) module = self.__class__(**attrs) return module def copy( self: M, *, parent: Optional[Union[Scope, 'Module', _Sentinel]] = _unspecified_parent, name: Optional[str] = None, **updates, ) -> M: """Creates a copy of this Module, with optionally updated arguments. Args: parent: The parent of the copy. By default the current module is taken as parent if not explicitly specified. name: A new name for the copied Module, by default a new automatic name will be given. **updates: Attribute updates. Returns: A copy of the this Module with the updated name, parent, and attributes. """ return self.clone( parent=parent, name=name, _deep_clone=True, _reset_names=False, **updates ) def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[True], **init_kwargs, ) -> Variable[T]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: Literal[False], **init_kwargs, ) -> Variable[meta.AxisMetadata[T]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: ... def variable( self, col: str, name: str, init_fn: Optional[Callable[..., T]] = None, *init_args, unbox: bool = True, **init_kwargs, ) -> Union[Variable[T], Variable[meta.AxisMetadata[T]]]: """Declares and returns a variable in this Module. See :mod:`flax.core.variables` for more information. See also :meth:`param` for a shorthand way to define read-only variables in the "params" collection. Contrary to :meth:`param`, all arguments passing using ``init_fn`` should be passed on explicitly:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... key = self.make_rng('stats') ... mean = self.variable('stats', 'mean', nn.initializers.lecun_normal(), key, x.shape) ... ... ... return x * mean.value >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}}, 'stats': {'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, and both have to be passed on. The PRNG for ``stats`` has to be provided explicitly when calling :meth:`init` and :meth:`apply`. Args: col: The variable collection name. name: The variable name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this variable is used in this module. If None, the variable must already be initialized otherwise an error is raised. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn Returns: A :class:`flax.core.variables.Variable` that can be read or set via ".value" attribute. Throws an error if the variable exists already. """ if not self._initialization_allowed: raise ValueError( 'Variables must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection=col): raise errors.NameInUseError('variable', name, self.__class__.__name__) assert self.scope is not None v = self.scope.variable( col, name, init_fn, *init_args, unbox=unbox, **init_kwargs ) self._state.children[name] = col return v def param( self, name: str, init_fn: Callable[..., T], *init_args, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[True], **init_kwargs, ) -> T: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: Literal[False], **init_kwargs, ) -> meta.AxisMetadata[T]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: ... def param( self, name: str, init_fn: Callable[..., T], *init_args, unbox: bool = True, **init_kwargs, ) -> Union[T, meta.AxisMetadata[T]]: """Declares and returns a parameter in this Module. Parameters are read-only variables in the collection named "params". See :mod:`flax.core.variables` for more details on variables. The first argument of ``init_fn`` is assumed to be a PRNG key, which is provided automatically and does not have to be passed using ``init_args`` or ``init_kwargs``:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(4)(x) ... mean = self.param('mean', nn.initializers.lecun_normal(), x.shape) ... ... ... return x * mean >>> variables = Foo().init({'params': jax.random.key(0), 'stats': jax.random.key(1)}, jnp.ones((2, 3))) >>> jax.tree_util.tree_map(jnp.shape, variables) {'params': {'Dense_0': {'bias': (4,), 'kernel': (3, 4)}, 'mean': (2, 4)}} In the example above, the function ``lecun_normal`` expects two arguments: ``key`` and ``shape``, but only ``shape`` has to be provided explicitly; ``key`` is set automatically using the PRNG for ``params`` that is passed when initializing the module using :meth:`init`. Args: name: The parameter name. init_fn: The function that will be called to compute the initial value of this variable. This function will only be called the first time this parameter is used in this module. *init_args: The positional arguments to pass to init_fn. unbox: If True, ``AxisMetadata`` instances are replaced by their unboxed value, see ``flax.nn.meta.unbox`` (default: True). **init_kwargs: The key-word arguments to pass to init_fn. Returns: The value of the initialized parameter. Throws an error if the parameter exists already. """ if not self._initialization_allowed: raise ValueError( 'Parameters must be initialized in `setup()` or in a method ' 'wrapped in `@compact`' ) if self._name_taken(name, collection='params'): raise errors.NameInUseError('param', name, self.__class__.__name__) assert self.scope is not None v = self.scope.param(name, init_fn, *init_args, unbox=unbox, **init_kwargs) self._state.children[name] = 'params' return v def has_variable(self, col: str, name: str) -> bool: """Checks if a variable of given collection and name exists in this Module. See :mod:`flax.core.variables` for more explanation on variables and collections. Args: col: The variable collection name. name: The name of the variable. Returns: True if the variable exists. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.has_variable(col, name) def is_mutable_collection(self, col: str) -> bool: """Returns true if the collection ``col`` is mutable.""" if self.scope is None: raise ValueError("Can't check mutability on unbound modules") return self.scope.is_mutable_collection(col) def has_rng(self, name: str) -> bool: """Returns true if a PRNGSequence with name ``name`` exists.""" if self.scope is None: raise ValueError("Can't query for RNGs on unbound modules") return self.scope.has_rng(name) def make_rng(self, name: str = 'params') -> PRNGKey: """Returns a new RNG key from a given RNG sequence for this Module. The new RNG key is split from the previous one. Thus, every call to ``make_rng`` returns a new RNG key, while still guaranteeing full reproducibility. .. note:: If an invalid name is passed (i.e. no RNG key was passed by the user in ``.init`` or ``.apply`` for this name), then ``name`` will default to ``'params'``. Example:: >>> import jax >>> import flax.linen as nn >>> class ParamsModule(nn.Module): ... def __call__(self): ... return self.make_rng('params') >>> class OtherModule(nn.Module): ... def __call__(self): ... return self.make_rng('other') >>> key = jax.random.key(0) >>> params_out, _ = ParamsModule().init_with_output({'params': key}) >>> # self.make_rng('other') will default to using the 'params' RNG stream >>> other_out, _ = OtherModule().init_with_output({'params': key}) >>> assert params_out == other_out Learn more about RNG's by reading the Flax RNG guide: https://flax.readthedocs.io/en/latest/guides/flax_fundamentals/rng_guide.html Args: name: The RNG sequence name. Returns: The newly generated RNG key. """ if self.scope is None: raise ValueError("Can't use RNGs on unbound modules") return self.scope.make_rng(name) def is_initializing(self) -> bool: """Returns True if running under self.init(...) or nn.init(...)(). This is a helper method to handle the common case of simple initialization where we wish to have setup logic occur when only called under ``module.init`` or ``nn.init``. For more complicated multi-phase initialization scenarios it is better to test for the mutability of particular variable collections or for the presence of particular variables that potentially need to be initialized. """ if self.scope is None: raise ValueError("Can't check if running under init() on unbound modules") return self.scope.get_flag('initializing', False) def _module_checks(self): """Run standard runtime checks.""" if not isinstance(self, Module): raise errors.InvalidInstanceModuleError() overridden_post_init = self.__post_init__ != Module.__post_init__ if overridden_post_init and not hasattr(self, '_id'): raise errors.IncorrectPostInitOverrideError() def bind( self: M, variables: VariableDict, *args, rngs: Optional[RNGSequences] = None, mutable: CollectionFilter = False, ) -> M: """Creates an interactive Module instance by binding variables and RNGs. ``bind`` provides an "interactive" instance of a Module directly without transforming a function with ``apply``. This is particularly useful for debugging and interactive use cases like notebooks where a function would limit the ability to split up code into different cells. Once the variables (and optionally RNGs) are bound to a ``Module`` it becomes a stateful object. Note that idiomatic JAX is functional and therefore an interactive instance does not mix well with vanilla JAX APIs. ``bind()`` should only be used for interactive experimentation, and in all other cases we strongly encourage users to use ``apply()`` instead. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = nn.Dense(3) ... self.decoder = nn.Dense(5) ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> x = jnp.ones((16, 9)) >>> ae = AutoEncoder() >>> variables = ae.init(jax.random.key(0), x) >>> model = ae.bind(variables) >>> z = model.encoder(x) >>> x_reconstructed = model.decoder(z) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments (not used). rngs: a dict of PRNGKeys to initialize the PRNG sequences. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. Returns: A copy of this instance with bound variables and RNGs. """ Module._module_checks(self) del args scope = core.bind(variables, rngs=rngs, mutable=mutable) return self.clone(parent=scope, _deep_clone=True) def unbind(self: M) -> Tuple[M, VariableDict]: """Returns an unbound copy of a Module and its variables. ``unbind`` helps create a stateless version of a bound Module. An example of a common use case: to extract a sub-Module defined inside ``setup()`` and its corresponding variables: 1) temporarily ``bind`` the parent Module; and then 2) ``unbind`` the desired sub-Module. (Recall that ``setup()`` is only called when the Module is bound.):: >>> class Encoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(256)(x) >>> class Decoder(nn.Module): ... @nn.compact ... def __call__(self, x): ... ... ... return nn.Dense(784)(x) >>> class AutoEncoder(nn.Module): ... def setup(self): ... self.encoder = Encoder() ... self.decoder = Decoder() ... ... def __call__(self, x): ... return self.decoder(self.encoder(x)) >>> module = AutoEncoder() >>> variables = module.init(jax.random.key(0), jnp.ones((1, 784))) >>> # Extract the Encoder sub-Module and its variables >>> encoder, encoder_vars = module.bind(variables).encoder.unbind() Returns: A tuple with an unbound copy of this Module and its variables. """ Module._module_checks(self) if self.scope is None: raise errors.CallUnbindOnUnboundModuleError() variables = self.variables module = self.clone(_deep_clone=True, _reset_names=True, name=None) return module, variables def apply( self, variables: VariableDict, *args, rngs: Optional[Union[PRNGKey, RNGSequences]] = None, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = False, capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[Any, Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]]: """Applies a module method to variables and returns output and modified variables. Note that ``method`` should be set if one would like to call ``apply`` on a different class method than ``__call__``. For instance, suppose a Transformer modules has a method called ``encode``, then the following calls ``apply`` on that method:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Transformer(nn.Module): ... def encode(self, x): ... ... >>> x = jnp.ones((16, 9)) >>> model = Transformer() >>> variables = model.init(jax.random.key(0), x, method=Transformer.encode) >>> encoded = model.apply(variables, x, method=Transformer.encode) If a function instance is provided, the unbound function is used. For instance, the example below is equivalent to the one above:: >>> encoded = model.apply(variables, x, method=model.encode) You can also pass a string to a callable attribute of the module. For example, the previous can be written as:: >>> encoded = model.apply(variables, x, method='encode') Note ``method`` can also be a function that is not defined in ``Transformer``. In that case, the function should have at least one argument representing an instance of the Module class:: >>> def other_fn(instance, x): ... # instance.some_module_attr(...) ... instance.encode ... ... >>> model.apply(variables, x, method=other_fn) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``apply``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, add_noise=False): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... if add_noise: ... # Add gaussian noise ... noise_key = self.make_rng('noise') ... x = x + jax.random.normal(noise_key, x.shape) ... ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'noise': jax.random.key(1)} >>> variables = module.init(rngs, x) >>> out0 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> rngs['noise'] = jax.random.key(0) >>> out1 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # different output (key(1) vs key(0)) >>> np.testing.assert_raises(AssertionError, np.testing.assert_allclose, out0, out1) >>> del rngs['noise'] >>> # self.make_rng('noise') will default to using the 'params' RNG stream >>> out2 = module.apply(variables, x, add_noise=True, rngs=rngs) >>> # same output (key(0)) >>> np.testing.assert_allclose(out1, out2) >>> # passing in a single key is equivalent to passing in {'params': key} >>> out3 = module.apply(variables, x, add_noise=True, rngs=jax.random.key(0)) >>> # same output (key(0)) >>> np.testing.assert_allclose(out2, out3) Args: variables: A dictionary containing variables keyed by variable collections. See :mod:`flax.core.variables` for more details about variables. *args: Named arguments passed to the specified apply method. rngs: a dict of PRNGKeys to initialize the PRNG sequences. The "params" PRNG sequence is used to initialize parameters. method: A function to call apply on. This is generally a function in the module. If provided, applies this method. If not provided, applies the ``__call__`` method of the module. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default, only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the specified apply method. Returns: If ``mutable`` is False, returns output. If any collections are mutable, returns ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if rngs is not None and not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) # if the `method` string is a submodule, we create a lambda function # that calls the submodule, forwarding all arguments. if isinstance(method, Module): method = lambda self, *args, **kwargs: getattr(self, attribute_name)( *args, **kwargs ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return apply( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(variables, *args, **kwargs, rngs=rngs) def init_with_output( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Tuple[Any, Union[FrozenVariableDict, Dict[str, Any]]]: """Initializes a module method with variables and returns output and modified variables. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: ``(output, vars)``, where ``vars`` are is a dict of the modified collections. """ Module._module_checks(self) if not isinstance(rngs, dict): if not core.scope._is_valid_rng(rngs): raise errors.InvalidRngError( 'RNGs should be of shape (2,) or PRNGKey in Module ' f'{self.__class__.__name__}, but rngs are: {rngs}' ) rngs = {'params': rngs} if isinstance(method, str): attribute_name = method method = getattr(self, attribute_name) if not callable(method): class_name = type(self).__name__ raise TypeError( f"'{class_name}.{attribute_name}' must be a callable, got" f' {type(method)}.' ) elif method is None: method = self.__call__ method = _get_unbound_fn(method) return init_with_output( method, self, mutable=mutable, capture_intermediates=capture_intermediates, )(rngs, *args, **kwargs) def init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Union[Callable[..., Any], str, None] = None, mutable: CollectionFilter = DenyList('intermediates'), capture_intermediates: Union[bool, Callable[['Module', str], bool]] = False, **kwargs, ) -> Union[FrozenVariableDict, Dict[str, Any]]: """Initializes a module method with variables and returns modified variables. ``init`` takes as first argument either a single ``PRNGKey``, or a dictionary mapping variable collections names to their ``PRNGKeys``, and will call ``method`` (which is the module's ``__call__`` function by default) passing ``*args`` and ``**kwargs``, and returns a dictionary of initialized variables. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> import numpy as np >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x, train): ... x = nn.Dense(16)(x) ... x = nn.BatchNorm(use_running_average=not train)(x) ... x = nn.relu(x) ... return nn.Dense(1)(x) >>> x = jnp.empty((1, 7)) >>> module = Foo() >>> key = jax.random.key(0) >>> variables = module.init(key, x, train=False) If you pass a single ``PRNGKey``, Flax will use it to feed the ``'params'`` RNG stream. If you want to use a different RNG stream or need to use multiple streams, you can pass a dictionary mapping each RNG stream name to its corresponding ``PRNGKey`` to ``init``. If ``self.make_rng(name)`` is called on an RNG stream name that isn't passed by the user, it will default to using the ``'params'`` RNG stream. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(16)(x) ... x = nn.relu(x) ... ... other_variable = self.variable( ... 'other_collection', ... 'other_variable', ... lambda x: jax.random.normal(self.make_rng('other_rng'), x.shape), ... x, ... ) ... x = x + other_variable.value ... ... return nn.Dense(1)(x) >>> module = Foo() >>> rngs = {'params': jax.random.key(0), 'other_rng': jax.random.key(1)} >>> variables0 = module.init(rngs, x) >>> rngs['other_rng'] = jax.random.key(0) >>> variables1 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables0['params'], variables1['params'] ... ) >>> # different other_variable (key(1) vs key(0)) >>> np.testing.assert_raises( ... AssertionError, ... np.testing.assert_allclose, ... variables0['other_collection']['other_variable'], ... variables1['other_collection']['other_variable'], ... ) >>> del rngs['other_rng'] >>> # self.make_rng('other_rng') will default to using the 'params' RNG stream >>> variables2 = module.init(rngs, x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables1['params'], variables2['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables1['other_collection']['other_variable'], ... variables2['other_collection']['other_variable'], ... ) >>> # passing in a single key is equivalent to passing in {'params': key} >>> variables3 = module.init(jax.random.key(0), x) >>> # equivalent params (key(0)) >>> _ = jax.tree_util.tree_map( ... np.testing.assert_allclose, variables2['params'], variables3['params'] ... ) >>> # equivalent other_variable (key(0)) >>> np.testing.assert_allclose( ... variables2['other_collection']['other_variable'], ... variables3['other_collection']['other_variable'], ... ) Jitting ``init`` initializes a model lazily using only the shapes of the provided arguments, and avoids computing the forward pass with actual values. Example:: >>> module = nn.Dense(1) >>> init_jit = jax.jit(module.init) >>> variables = init_jit(jax.random.key(0), x) ``init`` is a light wrapper over ``apply``, so other ``apply`` arguments like ``method``, ``mutable``, and ``capture_intermediates`` are also available. Args: rngs: The rngs for the variable collections. *args: Named arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. A string can also be provided to specify a method by name. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. capture_intermediates: If ``True``, captures intermediate return values of all Modules inside the "intermediates" collection. By default only the return values of all ``__call__`` methods are stored. A function can be passed to change the filter behavior. The filter function takes the Module instance and method name and returns a bool indicating whether the output of that method invocation should be stored. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) _, v_out = self.init_with_output( rngs, *args, method=method, mutable=mutable, capture_intermediates=capture_intermediates, **kwargs, ) return v_out def lazy_init( self, rngs: Union[PRNGKey, RNGSequences], *args, method: Optional[Callable[..., Any]] = None, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> FrozenVariableDict: """Initializes a module without computing on an actual input. lazy_init will initialize the variables without doing unnecessary compute. The input data should be passed as a ``jax.ShapeDtypeStruct`` which specifies the shape and dtype of the input but no concrete data. Example:: >>> model = nn.Dense(features=256) >>> variables = model.lazy_init( ... jax.random.key(0), jax.ShapeDtypeStruct((1, 128), jnp.float32)) The args and kwargs args passed to ``lazy_init`` can be a mix of concrete (jax arrays, scalars, bools) and abstract (ShapeDtypeStruct) values. Concrete values are only necessary for arguments that affect the initialization of variables. For example, the model might expect a keyword arg that enables/disables a subpart of the model. In this case, an explicit value (True/Flase) should be passed otherwise ``lazy_init`` cannot infer which variables should be initialized. Args: rngs: The rngs for the variable collections. *args: arguments passed to the init function. method: An optional method. If provided, applies this method. If not provided, applies the ``__call__`` method. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default all collections except "intermediates" are mutable. **kwargs: Keyword arguments passed to the init function. Returns: The initialized variable dict. """ Module._module_checks(self) def lazy_wrapper(rngs, *args, **kwargs): return self.init(rngs, *args, method=method, mutable=mutable, **kwargs) return partial_eval.lazy_init(lazy_wrapper)(rngs, *args, **kwargs) def variables(self) -> VariableDict: """Returns the variables in this module.""" if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.variables() def get_variable(self, col: str, name: str, default: Optional[T] = None) -> T: """Retrieves the value of a Variable. Args: col: the variable collection. name: the name of the variable. default: the default value to return if the variable does not exist in this scope. Returns: The value of the input variable, of the default value if the variable doesn't exist in this scope. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") return self.scope.get_variable(col, name, default) def put_variable(self, col: str, name: str, value: Any): """Updates the value of the given variable if it is mutable, or an error otherwise. Args: col: the variable collection. name: the name of the variable. value: the new value of the variable. """ if self.scope is None: raise ValueError("Can't access variables on unbound modules") self.scope.put_variable(col, name, value) def sow(self, col: str, name: str, value: Any) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: ... def sow( self, col: str, name: str, value: T, reduce_fn: Callable[[K, T], K] = tuple_reduce, init_fn: Callable[[], K] = tuple_init, # type: ignore ) -> bool: """Stores a value in a collection. Collections can be used to collect intermediate values without the overhead of explicitly passing a container through each Module call. If the target collection is not mutable ``sow`` behaves like a no-op and returns ``False``. Example:: >>> import jax >>> import jax.numpy as jnp >>> import flax.linen as nn >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... self.sow('intermediates', 'h', h) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply(variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': (Array([[-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ], [-1.503171 , 0.7377704 , -0.59388214, -1.0079019 ]], dtype=float32),)} By default the values are stored in a tuple and each stored value is appended at the end. This way all intermediates can be tracked when the same module is called multiple times. Alternatively, a custom init/reduce function can be passed:: >>> class Foo2(nn.Module): ... @nn.compact ... def __call__(self, x): ... init_fn = lambda: 0 ... reduce_fn = lambda a, b: a + b ... self.sow('intermediates', 'h', x, ... init_fn=init_fn, reduce_fn=reduce_fn) ... self.sow('intermediates', 'h', x * 2, ... init_fn=init_fn, reduce_fn=reduce_fn) ... return x >>> x = jnp.ones((1, 1)) >>> model = Foo2() >>> variables = model.init(jax.random.key(0), x) >>> y, state = model.apply( ... variables, x, mutable=['intermediates']) >>> print(state['intermediates']) {'h': Array([[3.]], dtype=float32)} Args: col: The name of the variable collection. name: The name of the variable. value: The value of the variable. reduce_fn: The function used to combine the existing value with the new value. The default is to append the value to a tuple. init_fn: For the first value stored, ``reduce_fn`` will be passed the result of ``init_fn`` together with the value to be stored. The default is an empty tuple. Returns: ``True`` if the value has been stored successfully, ``False`` otherwise. """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if not self.scope.is_mutable_collection(col): return False if self.scope.has_variable(col, name): xs = self.scope.get_variable(col, name) else: self.scope.reserve(name, col) self._state.children[name] = col xs = init_fn() xs = reduce_fn(xs, value) self.scope.put_variable(col, name, xs) return True def perturb( self, name: str, value: T, collection: str = 'perturbations' ) -> T: """Add an zero-value variable ('perturbation') to the intermediate value. The gradient of ``value`` would be the same as the gradient of this perturbation variable. Therefore, if you define your loss function with both params and perturbations as standalone arguments, you can get the intermediate gradients of ``value`` by running ``jax.grad`` on the perturbation argument. .. note:: This is an experimental API and may be tweaked later for better performance and usability. At its current stage, it creates extra dummy variables that occupies extra memory space. Use it only to debug gradients in training. Example:: >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... x = nn.Dense(3)(x) ... x = self.perturb('dense3', x) ... return nn.Dense(2)(x) >>> def loss(variables, inputs, targets): ... preds = model.apply(variables, inputs) ... return jnp.square(preds - targets).mean() >>> x = jnp.ones((2, 9)) >>> y = jnp.ones((2, 2)) >>> model = Foo() >>> variables = model.init(jax.random.key(0), x) >>> intm_grads = jax.grad(loss, argnums=0)(variables, x, y) >>> print(intm_grads['perturbations']['dense3']) [[-1.456924 -0.44332537 0.02422847] [-1.456924 -0.44332537 0.02422847]] If perturbations are not passed to ``apply``, ``perturb`` behaves like a no-op so you can easily disable the behavior when not needed:: >>> model.apply(variables, x) # works as expected Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> model.apply({'params': variables['params']}, x) # behaves like a no-op Array([[-1.0980128 , -0.67961735], [-1.0980128 , -0.67961735]], dtype=float32) >>> intm_grads = jax.grad(loss, argnums=0)({'params': variables['params']}, x, y) >>> 'perturbations' not in intm_grads True """ if self.scope is None: raise ValueError("Can't store variables on unbound modules") if self.is_mutable_collection(collection): if not self.scope.has_variable(collection, name): self.scope.reserve(name, collection) self._state.children[name] = collection self.scope.put_variable(collection, name, jnp.zeros_like(value)) # type: ignore if collection in self.scope.root._variables: if self.scope.has_variable(collection, name): value += self.scope.get_variable(collection, name) # type: ignore else: raise ValueError(f"Perturbation collection {collection} present, but " f"missing perturbation variable {name}") return value def tabulate( self, rngs: Union[PRNGKey, RNGSequences], *args, depth: Optional[int] = None, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), console_kwargs: Optional[Mapping[str, Any]] = None, table_kwargs: Mapping[str, Any] = MappingProxyType({}), column_kwargs: Mapping[str, Any] = MappingProxyType({}), compute_flops: bool = False, compute_vjp_flops: bool = False, **kwargs, ) -> str: """Creates a summary of the Module represented as a table. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns the string summarizing the Module in a table. ``tabulate`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Additional arguments can be passed into the ``console_kwargs`` argument, for example, ``{'width': 120}``. For a full list of ``console_kwargs`` arguments, see: https://rich.readthedocs.io/en/stable/reference/console.html#rich.console.Console Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> # print(Foo().tabulate( >>> # jax.random.key(0), x, compute_flops=True, compute_vjp_flops=True)) This gives the following output:: Foo Summary ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ path ┃ module ┃ inputs ┃ outputs ┃ flops ┃ vjp_flops ┃ params ┃ ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ │ Foo │ float32[16,9] │ float32[16,2] │ 1504 │ 4460 │ │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_0 │ Dense │ float32[16,9] │ float32[16,4] │ 1216 │ 3620 │ bias: │ │ │ │ │ │ │ │ float32[4] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[9,4] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 40 (160 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ Dense_1 │ Dense │ float32[16,4] │ float32[16,2] │ 288 │ 840 │ bias: │ │ │ │ │ │ │ │ float32[2] │ │ │ │ │ │ │ │ kernel: │ │ │ │ │ │ │ │ float32[4,2] │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 10 (40 B) │ ├─────────┼────────┼───────────────┼───────────────┼───────┼───────────┼─────────────────┤ │ │ │ │ │ │ Total │ 50 (200 B) │ └─────────┴────────┴───────────────┴───────────────┴───────┴───────────┴─────────────────┘ Total Parameters: 50 (200 B) **Note**: rows order in the table does not represent execution order, instead it aligns with the order of keys in ``variables`` which are sorted alphabetically. **Note**: ``vjp_flops`` returns ``0`` if the module is not differentiable. Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. depth: controls how many submodule deep the summary can go. By default, its ``None`` which means no limit. If a submodule is not shown because of the depth limit, its parameter count and bytes will be added to the row of its first shown ancestor such that the sum of all rows always adds up to the total number of parameters of the Module. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. console_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.console.Console`` when rendering the table. Default arguments are ``{'force_terminal': True, 'force_jupyter': False}``. table_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table`` constructor. column_kwargs: An optional dictionary with additional keyword arguments that are passed to ``rich.table.Table.add_column`` when adding columns to the table. compute_flops: whether to include a ``flops`` column in the table listing the estimated FLOPs cost of each module forward pass. Does incur actual on-device computation / compilation / memory allocation, but still introduces overhead for large modules (e.g. extra 20 seconds for a Stable Diffusion's UNet, whereas otherwise tabulation would finish in 5 seconds). compute_vjp_flops: whether to include a ``vjp_flops`` column in the table listing the estimated FLOPs cost of each module backward pass. Introduces a compute overhead of about 2-3X of ``compute_flops``. **kwargs: keyword arguments to pass to the forward computation. Returns: A string summarizing the Module. """ from flax.linen import summary tabulate_fn = summary.tabulate( self, rngs, depth=depth, show_repeated=show_repeated, mutable=mutable, console_kwargs=console_kwargs, table_kwargs=table_kwargs, column_kwargs=column_kwargs, compute_flops=compute_flops, compute_vjp_flops=compute_vjp_flops, ) return tabulate_fn(*args, **kwargs) def module_paths( self, rngs: Union[PRNGKey, RNGSequences], *args, show_repeated: bool = False, mutable: CollectionFilter = DenyList('intermediates'), **kwargs, ) -> dict[str, 'Module']: """Returns a dictionary mapping module paths to module instances. This method has the same signature and internally calls ``Module.init``, but instead of returning the variables, it returns a dictionary mapping module paths to unbounded copies of module instances that were used at runtime. ``module_paths`` uses ``jax.eval_shape`` to run the forward computation without consuming any FLOPs or allocating memory. Example:: >>> import flax.linen as nn >>> import jax, jax.numpy as jnp >>> class Foo(nn.Module): ... @nn.compact ... def __call__(self, x): ... h = nn.Dense(4)(x) ... return nn.Dense(2)(h) >>> x = jnp.ones((16, 9)) >>> modules = Foo().module_paths(jax.random.key(0), x) >>> print({ ... p: type(m).__name__ for p, m in modules.items() ... }) {'': 'Foo', 'Dense_0': 'Dense', 'Dense_1': 'Dense'} Args: rngs: The rngs for the variable collections as passed to ``Module.init``. *args: The arguments to the forward computation. show_repeated: If ``True``, repeated calls to the same module will be shown in the table, otherwise only the first call will be shown. Default is ``False``. mutable: Can be bool, str, or list. Specifies which collections should be treated as mutable: ``bool``: all/no collections are mutable. ``str``: The name of a single mutable collection. ``list``: A list of names of mutable collections. By default, all collections except 'intermediates' are mutable. **kwargs: keyword arguments to pass to the forward computation. Returns: A dict`ionary mapping module paths to module instances. """ from flax.linen import summary table = summary._get_module_table( module=self, depth=None, show_repeated=show_repeated, compute_flops=False, compute_vjp_flops=False, )(rngs, *args, **kwargs, mutable=mutable) return {'/'.join(row.path): row.module_copy for row in table} Array = Union[jax.Array, Any] PRNGKey = jax.Array Dtype = Union[jax.typing.DTypeLike, Any] The provided code snippet includes necessary dependencies for implementing the `dot_product_attention` function. Write a Python function `def dot_product_attention( query: Array, key: Array, value: Array, bias: Optional[Array] = None, mask: Optional[Array] = None, broadcast_dropout: bool = True, dropout_rng: Optional[PRNGKey] = None, dropout_rate: float = 0.0, deterministic: bool = False, dtype: Optional[Dtype] = None, precision: PrecisionLike = None, module: Optional[Module] = None, )` to solve the following problem: Computes dot-product attention given query, key, and value. This is the core function for applying attention based on https://arxiv.org/abs/1706.03762. It calculates the attention weights given query and key and combines the values using the attention weights. .. note:: ``query``, ``key``, ``value`` needn't have any batch dimensions. Args: query: queries for calculating attention with shape of ``[batch..., q_length, num_heads, qk_depth_per_head]``. key: keys for calculating attention with shape of ``[batch..., kv_length, num_heads, qk_depth_per_head]``. value: values to be used in attention with shape of ``[batch..., kv_length, num_heads, v_depth_per_head]``. bias: bias for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks, padding masks, proximity bias, etc. mask: mask for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks. Attention weights are masked out if their corresponding mask value is ``False``. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) dtype: the dtype of the computation (default: infer from inputs) precision: numerical precision of the computation see ``jax.lax.Precision` for details. module: the Module that will sow the attention weights into the 'intermediates' collection. Remember to mark 'intermediates' as mutable via ``mutable=['intermediates']`` in order to have that collection returned. If ``module`` is None, the attention weights will not be sowed. Returns: Output of shape ``[batch..., q_length, num_heads, v_depth_per_head]``. Here is the function: def dot_product_attention( query: Array, key: Array, value: Array, bias: Optional[Array] = None, mask: Optional[Array] = None, broadcast_dropout: bool = True, dropout_rng: Optional[PRNGKey] = None, dropout_rate: float = 0.0, deterministic: bool = False, dtype: Optional[Dtype] = None, precision: PrecisionLike = None, module: Optional[Module] = None, ): """Computes dot-product attention given query, key, and value. This is the core function for applying attention based on https://arxiv.org/abs/1706.03762. It calculates the attention weights given query and key and combines the values using the attention weights. .. note:: ``query``, ``key``, ``value`` needn't have any batch dimensions. Args: query: queries for calculating attention with shape of ``[batch..., q_length, num_heads, qk_depth_per_head]``. key: keys for calculating attention with shape of ``[batch..., kv_length, num_heads, qk_depth_per_head]``. value: values to be used in attention with shape of ``[batch..., kv_length, num_heads, v_depth_per_head]``. bias: bias for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks, padding masks, proximity bias, etc. mask: mask for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks. Attention weights are masked out if their corresponding mask value is ``False``. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) dtype: the dtype of the computation (default: infer from inputs) precision: numerical precision of the computation see ``jax.lax.Precision` for details. module: the Module that will sow the attention weights into the 'intermediates' collection. Remember to mark 'intermediates' as mutable via ``mutable=['intermediates']`` in order to have that collection returned. If ``module`` is None, the attention weights will not be sowed. Returns: Output of shape ``[batch..., q_length, num_heads, v_depth_per_head]``. """ query, key, value = promote_dtype(query, key, value, dtype=dtype) dtype = query.dtype assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.' assert ( query.shape[:-3] == key.shape[:-3] == value.shape[:-3] ), 'q, k, v batch dims must match.' assert ( query.shape[-2] == key.shape[-2] == value.shape[-2] ), 'q, k, v num_heads must match.' assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.' # compute attention weights attn_weights = dot_product_attention_weights( query, key, bias, mask, broadcast_dropout, dropout_rng, dropout_rate, deterministic, dtype, precision, module, ) # return weighted sum over values for each query position return jnp.einsum( '...hqk,...khd->...qhd', attn_weights, value, precision=precision )
Computes dot-product attention given query, key, and value. This is the core function for applying attention based on https://arxiv.org/abs/1706.03762. It calculates the attention weights given query and key and combines the values using the attention weights. .. note:: ``query``, ``key``, ``value`` needn't have any batch dimensions. Args: query: queries for calculating attention with shape of ``[batch..., q_length, num_heads, qk_depth_per_head]``. key: keys for calculating attention with shape of ``[batch..., kv_length, num_heads, qk_depth_per_head]``. value: values to be used in attention with shape of ``[batch..., kv_length, num_heads, v_depth_per_head]``. bias: bias for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks, padding masks, proximity bias, etc. mask: mask for the attention weights. This should be broadcastable to the shape ``[batch..., num_heads, q_length, kv_length]``. This can be used for incorporating causal masks. Attention weights are masked out if their corresponding mask value is ``False``. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout dropout_rate: dropout rate deterministic: bool, deterministic or not (to apply dropout) dtype: the dtype of the computation (default: infer from inputs) precision: numerical precision of the computation see ``jax.lax.Precision` for details. module: the Module that will sow the attention weights into the 'intermediates' collection. Remember to mark 'intermediates' as mutable via ``mutable=['intermediates']`` in order to have that collection returned. If ``module`` is None, the attention weights will not be sowed. Returns: Output of shape ``[batch..., q_length, num_heads, v_depth_per_head]``.
22,621
import functools import warnings from typing import Any, Callable, Optional, Union, overload import jax import jax.numpy as jnp from jax import lax, random from flax.linen import initializers from flax.linen.dtypes import promote_dtype from flax.linen.linear import ( DenseGeneral, default_kernel_init, ) from flax.linen.module import Module, compact, merge_param from flax.linen.normalization import LayerNorm from flax.typing import ( Array, PRNGKey, Dtype, Shape as Shape, Initializer, PrecisionLike, DotGeneralT, ) def make_attention_mask( query_input: Array, key_input: Array, pairwise_fn: Callable[..., Any] = jnp.multiply, extra_batch_dims: int = 0, dtype: Dtype = jnp.float32, ): """Mask-making helper for attention weights. In case of 1d inputs (i.e., ``[batch..., len_q]``, ``[batch..., len_kv]``, the attention weights will be ``[batch..., heads, len_q, len_kv]`` and this function will produce ``[batch..., 1, len_q, len_kv]``. Args: query_input: a batched, flat input of query_length size key_input: a batched, flat input of key_length size pairwise_fn: broadcasting elementwise comparison function extra_batch_dims: number of extra batch dims to add singleton axes for, none by default dtype: mask return dtype Returns: A ``[batch..., 1, len_q, len_kv]`` shaped mask for 1d attention. """ mask = pairwise_fn( jnp.expand_dims(query_input, axis=-1), jnp.expand_dims(key_input, axis=-2) ) mask = jnp.expand_dims(mask, axis=-3) mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims))) return mask.astype(dtype) Array = Union[jax.Array, Any] Dtype = Union[jax.typing.DTypeLike, Any] The provided code snippet includes necessary dependencies for implementing the `make_causal_mask` function. Write a Python function `def make_causal_mask( x: Array, extra_batch_dims: int = 0, dtype: Dtype = jnp.float32 ) -> Array` to solve the following problem: Make a causal mask for self-attention. In case of 1d inputs (i.e., ``[batch..., len]``, the self-attention weights will be ``[batch..., heads, len, len]`` and this function will produce a causal mask of shape ``[batch..., 1, len, len]``. Args: x: input array of shape ``[batch..., len]`` extra_batch_dims: number of batch dims to add singleton axes for, none by default dtype: mask return dtype Returns: A ``[batch..., 1, len, len]`` shaped causal mask for 1d attention. Here is the function: def make_causal_mask( x: Array, extra_batch_dims: int = 0, dtype: Dtype = jnp.float32 ) -> Array: """Make a causal mask for self-attention. In case of 1d inputs (i.e., ``[batch..., len]``, the self-attention weights will be ``[batch..., heads, len, len]`` and this function will produce a causal mask of shape ``[batch..., 1, len, len]``. Args: x: input array of shape ``[batch..., len]`` extra_batch_dims: number of batch dims to add singleton axes for, none by default dtype: mask return dtype Returns: A ``[batch..., 1, len, len]`` shaped causal mask for 1d attention. """ idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape) return make_attention_mask( idxs, idxs, jnp.greater_equal, extra_batch_dims=extra_batch_dims, dtype=dtype, )
Make a causal mask for self-attention. In case of 1d inputs (i.e., ``[batch..., len]``, the self-attention weights will be ``[batch..., heads, len, len]`` and this function will produce a causal mask of shape ``[batch..., 1, len, len]``. Args: x: input array of shape ``[batch..., len]`` extra_batch_dims: number of batch dims to add singleton axes for, none by default dtype: mask return dtype Returns: A ``[batch..., 1, len, len]`` shaped causal mask for 1d attention.
22,622
import functools import warnings from typing import Any, Callable, Optional, Union, overload import jax import jax.numpy as jnp from jax import lax, random from flax.linen import initializers from flax.linen.dtypes import promote_dtype from flax.linen.linear import ( DenseGeneral, default_kernel_init, ) from flax.linen.module import Module, compact, merge_param from flax.linen.normalization import LayerNorm from flax.typing import ( Array, PRNGKey, Dtype, Shape as Shape, Initializer, PrecisionLike, DotGeneralT, ) Array = Union[jax.Array, Any] Dtype = Union[jax.typing.DTypeLike, Any] The provided code snippet includes necessary dependencies for implementing the `combine_masks` function. Write a Python function `def combine_masks( *masks: Optional[Array], dtype: Dtype = jnp.float32 ) -> Optional[Array]` to solve the following problem: Combine attention masks. Args: *masks: set of attention mask arguments to combine, some can be None. dtype: dtype for the returned mask. Returns: Combined mask, reduced by logical and, returns None if no masks given. Here is the function: def combine_masks( *masks: Optional[Array], dtype: Dtype = jnp.float32 ) -> Optional[Array]: """Combine attention masks. Args: *masks: set of attention mask arguments to combine, some can be None. dtype: dtype for the returned mask. Returns: Combined mask, reduced by logical and, returns None if no masks given. """ masks_list = [m for m in masks if m is not None] if not masks_list: return None assert all( map(lambda x: x.ndim == masks_list[0].ndim, masks_list) ), f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks_list))}' mask, *other_masks = masks_list for other_mask in other_masks: mask = jnp.logical_and(mask, other_mask) return mask.astype(dtype)
Combine attention masks. Args: *masks: set of attention mask arguments to combine, some can be None. dtype: dtype for the returned mask. Returns: Combined mask, reduced by logical and, returns None if no masks given.
22,623
import dataclasses import functools from typing import Any, Iterable, Optional, Tuple import jax import jax.numpy as jnp from jax import lax from jax.nn import initializers from flax.linen import dtypes, module, transforms from flax.typing import ( Array, PRNGKey as PRNGKey, Dtype, Shape as Shape, Initializer, Axes, ) def _canonicalize_axes(rank: int, axes: Axes) -> Tuple[int, ...]: """Returns a tuple of deduplicated, sorted, and positive axes.""" if not isinstance(axes, Iterable): axes = (axes,) return tuple(set([rank + axis if axis < 0 else axis for axis in axes])) def _abs_sq(x): """Computes the elementwise square of the absolute value |x|^2.""" if jnp.iscomplexobj(x): return lax.square(lax.real(x)) + lax.square(lax.imag(x)) else: return lax.square(x) Array = Union[jax.Array, Any] The provided code snippet includes necessary dependencies for implementing the `_compute_stats` function. Write a Python function `def _compute_stats( x: Array, axes: Axes, dtype: Optional[Dtype], axis_name: Optional[str] = None, axis_index_groups: Any = None, use_mean: bool = True, use_fast_variance: bool = True, mask: Optional[Array] = None, )` to solve the following problem: Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for stability in half precision training. - If `use_fast_variance` is `True`, mean and variance are computed using Var = E[|x|^2] - |E[x]|^2, instead of Var = E[|x - E[x]|^2]), in a single XLA fusion. - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency. Arguments: x: Input array. axes: The axes in ``x`` to compute mean and variance statistics for. dtype: Optional dtype specifying the minimal precision. Statistics are always at least float32 for stability (default: dtype of x). axis_name: Optional name for the pmapped axis to compute mean over. Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. axis_index_groups: Optional axis indices. use_mean: If true, calculate the mean from the input and use it when computing the variance. If false, set the mean to zero and compute the variance without subtracting the mean. use_fast_variance: If true, use a faster, but less numerically stable, calculation for the variance. mask: Binary array of shape broadcastable to `inputs` tensor, indicating the positions for which the mean and variance should be computed. Returns: A pair ``(mean, var)``. Here is the function: def _compute_stats( x: Array, axes: Axes, dtype: Optional[Dtype], axis_name: Optional[str] = None, axis_index_groups: Any = None, use_mean: bool = True, use_fast_variance: bool = True, mask: Optional[Array] = None, ): """Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for stability in half precision training. - If `use_fast_variance` is `True`, mean and variance are computed using Var = E[|x|^2] - |E[x]|^2, instead of Var = E[|x - E[x]|^2]), in a single XLA fusion. - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency. Arguments: x: Input array. axes: The axes in ``x`` to compute mean and variance statistics for. dtype: Optional dtype specifying the minimal precision. Statistics are always at least float32 for stability (default: dtype of x). axis_name: Optional name for the pmapped axis to compute mean over. Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. axis_index_groups: Optional axis indices. use_mean: If true, calculate the mean from the input and use it when computing the variance. If false, set the mean to zero and compute the variance without subtracting the mean. use_fast_variance: If true, use a faster, but less numerically stable, calculation for the variance. mask: Binary array of shape broadcastable to `inputs` tensor, indicating the positions for which the mean and variance should be computed. Returns: A pair ``(mean, var)``. """ if dtype is None: dtype = jnp.result_type(x) # promote x to at least float32, this avoids half precision computation # but preserves double or complex floating points dtype = jnp.promote_types(dtype, jnp.float32) x = jnp.asarray(x, dtype) axes = _canonicalize_axes(x.ndim, axes) def maybe_distributed_mean(*xs, mask=None): mus = tuple(x.mean(axes, where=mask) for x in xs) if axis_name is None: return mus if len(xs) > 1 else mus[0] else: # In the distributed case we stack multiple arrays to speed comms. if len(xs) > 1: reduced_mus = lax.pmean( jnp.stack(mus, axis=0), axis_name, axis_index_groups=axis_index_groups, ) return tuple(reduced_mus[i] for i in range(len(xs))) else: return lax.pmean(mus[0], axis_name, axis_index_groups=axis_index_groups) if use_mean: if use_fast_variance: mu, mu2 = maybe_distributed_mean(x, _abs_sq(x), mask=mask) # mean2 - _abs_sq(mean) is not guaranteed to be non-negative due # to floating point round-off errors. var = jnp.maximum(0.0, mu2 - _abs_sq(mu)) else: mu = maybe_distributed_mean(x, mask=mask) var = maybe_distributed_mean( _abs_sq(x - jnp.expand_dims(mu, axes)), mask=mask ) else: var = maybe_distributed_mean(_abs_sq(x), mask=mask) mu = jnp.zeros_like(var) return mu, var
Computes mean and variance statistics. This implementation takes care of a few important details: - Computes in float32 precision for stability in half precision training. - If `use_fast_variance` is `True`, mean and variance are computed using Var = E[|x|^2] - |E[x]|^2, instead of Var = E[|x - E[x]|^2]), in a single XLA fusion. - Clips negative variances to zero which can happen due to roundoff errors. This avoids downstream NaNs. - Supports averaging across a parallel axis and subgroups of a parallel axis with a single `lax.pmean` call to avoid latency. Arguments: x: Input array. axes: The axes in ``x`` to compute mean and variance statistics for. dtype: Optional dtype specifying the minimal precision. Statistics are always at least float32 for stability (default: dtype of x). axis_name: Optional name for the pmapped axis to compute mean over. Note, this is only used for pmap and shard map. For SPMD jit, you do not need to manually synchronize. Just make sure that the axes are correctly annotated and XLA:SPMD will insert the necessary collectives. axis_index_groups: Optional axis indices. use_mean: If true, calculate the mean from the input and use it when computing the variance. If false, set the mean to zero and compute the variance without subtracting the mean. use_fast_variance: If true, use a faster, but less numerically stable, calculation for the variance. mask: Binary array of shape broadcastable to `inputs` tensor, indicating the positions for which the mean and variance should be computed. Returns: A pair ``(mean, var)``.