repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/rotary_embedding.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library with rotary embedding functions."""
import functools
import math
from typing import Optional, Tuple
import jax
from jax import numpy as jnp
from flaxformer.components import embedding
from flaxformer.types import Array
def rotate_half(x: Array) -> Array:
"""Helper that splits a tensor at last dim into half and rotate it."""
x1, x2 = jnp.split(x, 2, axis=-1)
x = jnp.concatenate([-x2, x1], axis=-1)
return x
@functools.partial(jax.jit, static_argnums=(4,))
def apply_rotary_embedding(
q: Array,
k: Array,
cos: Array,
sin: Array,
decode: bool = False,
q_position_offset: Optional[Array] = None,
rotary_index: Optional[Array] = None) -> Tuple[Array, Array]:
"""Helper function to apply Rotary Embeddings, supports Q position offset."""
if len(k.shape) == 3:
# for multi query attention
k = jnp.expand_dims(k, 2)
multiquery = True
else:
multiquery = False
batch, qlen, qheads, d = q.shape
kbatch, klen, kheads, kd = k.shape
assert batch == kbatch, f'{batch} != {kbatch}'
assert d == kd, f'{d} != {kd}'
# cos: [len, d]
# sin: [len, d]
# rotary_index: [batch]
# q_position_offset: [batch]
if decode and qlen == 1 and rotary_index is not None:
# we check qlen == 1 so that we don't do this when initializing cache.
qcos = cos[rotary_index, :]
qsin = sin[rotary_index, :]
# qcos, qsin: [batch, d]
qcos = jax.lax.broadcast_in_dim(qcos, (batch, qlen, qheads, d), (0, 3))
qsin = jax.lax.broadcast_in_dim(qsin, (batch, qlen, qheads, d), (0, 3))
# qcos, qsin: [batch, qlen, qheads, d]
else:
if q_position_offset is None:
qcos, qsin = cos[:qlen, :], sin[:qlen, :]
else:
# If q_position_offset is specified, we'll slice per-example after
# broadcasting to batch size.
qcos, qsin = cos, sin
# qcos, qsin: [qlen, d]
qcos = jax.lax.broadcast_in_dim(qcos, (batch, qcos.shape[0], qheads, d),
(1, 3))
qsin = jax.lax.broadcast_in_dim(qsin, (batch, qsin.shape[0], qheads, d),
(1, 3))
# qcos, qsin: [batch, qlen, qheads, d]
if q_position_offset is not None:
qcos = jax.vmap(
functools.partial(
jax.lax.dynamic_slice_in_dim, slice_size=qlen,
axis=0))(qcos, q_position_offset)
qsin = jax.vmap(
functools.partial(
jax.lax.dynamic_slice_in_dim, slice_size=qlen,
axis=0))(qsin, q_position_offset)
kcos, ksin = cos[:klen, :], sin[:klen, :]
# kcos, ksin: [klen, d]
kcos = jax.lax.broadcast_in_dim(kcos, (batch, klen, kheads, d), (1, 3))
ksin = jax.lax.broadcast_in_dim(ksin, (batch, klen, kheads, d), (1, 3))
# kcos, ksin: [batch, klen, kheads, d]
out_q = (q * qcos) + (rotate_half(q) * qsin)
out_k = (k * kcos) + (rotate_half(k) * ksin)
if multiquery:
out_k = jnp.squeeze(out_k, 2)
return out_q, out_k
def apply_rotary_embedding_to_subset(
query: Array,
key: Array,
max_timescale: float,
fraction_to_rotate: float,
decode: bool = False,
query_position_offset: Optional[Array] = None,
rotary_index: Optional[Array] = None) -> Tuple[Array, Array]:
"""Apply rotary embedding to a fraction of dimensions."""
if fraction_to_rotate > 1.0 or fraction_to_rotate <= 0.0:
raise ValueError(
f'fraction_to_rotate must be in (0, 1], got {fraction_to_rotate}.')
dim = query.shape[-1]
def _to_even(x):
return math.floor(x / 2.) * 2
num_rotated_channels = _to_even(dim * fraction_to_rotate)
max_length = max(query.shape[1], key.shape[1])
sin, cos = embedding.generate_fixed_pos_embedding(
num_rotated_channels, max_length, max_timescale=max_timescale)
if num_rotated_channels == dim:
return apply_rotary_embedding(
query,
key,
cos,
sin,
decode=decode,
rotary_index=rotary_index,
q_position_offset=query_position_offset)
else:
query_r = query[..., :num_rotated_channels]
query_u = query[..., num_rotated_channels:]
key_r = key[..., :num_rotated_channels]
key_u = key[..., num_rotated_channels:]
query_r, key_r = apply_rotary_embedding(
query_r,
key_r,
cos,
sin,
decode=decode,
rotary_index=rotary_index,
q_position_offset=query_position_offset)
query = jnp.concatenate((query_r, query_u), axis=-1)
key = jnp.concatenate((key_r, key_u), axis=-1)
return query, key
| 5,084 | 31.388535 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/perceiver_ar_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perceiver AR Architecture implementation.
As described in:
"General-purpose, long-context autoregressive modeling with Perceiver AR"
https://arxiv.org/abs/2202.07765
"""
import dataclasses
from typing import List, Optional, Tuple
from flax import linen as nn
import jax.numpy as jnp
from flaxformer.architectures.perceiver_ar import attention
from flaxformer.architectures.perceiver_ar import slicing
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.types import Array
@dataclasses.dataclass(frozen=True)
class PerceiverARTransparentLayerSequence:
"""Perceiver AR version of TransparentLayerSequence that manages slicing.
The decoder_mask is different for the first layer vs. the remaining layers.
Similar for the logit mask and prefill lengths. It's better to do the change
outside of the scan-over-layers so that it is done only once.
Attributes:
layers: List of nn.Modules, which should be owned by a parent Flax module.
num_latents: Number of latents and outputs.
"""
layers: List[nn.Module]
num_latents: int
def __call__(self,
inputs: Array,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
num_latents: Optional[int] = None,
sequence_lengths: Optional[Array] = None) -> Array:
"""Applies all Transformer layers to the inputs sequentially.
Args:
inputs: Input data for decoder with shape [batch_size, decoder_seq_length,
decoder_hidden_size].
encoded: required to be None, block is Decoder only, only kept for
__call__ signature uniformity.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: required to be None, block is Decoder only, only
kept for __call__ signature uniformity.
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
num_latents: Used to override the number of output Perceiver AR latents
during decoding.
sequence_lengths: Lengths of all target sequences. Required for Perceiver
AR operation.
Returns:
The encoded inputs <float>[..., seq_len, hidden_size].
"""
if num_latents and num_latents > self.num_latents:
raise ValueError(
f'Overridden num_latents ({num_latents}) must be <= self.num_latents '
f'({self.num_latents}).')
num_latents = num_latents or self.num_latents
current_activations = inputs
for i, layer in enumerate(self.layers):
layer_decoder_mask = decoder_mask
if (layer_decoder_mask is not None and
layer_decoder_mask.shape[-1] != current_activations.shape[-2]):
assert i > 0
# If we're in the self-attention stack, then kv should also be sliced.
# From: [batch, 1, num_latents, input_length]
# To: [batch, 1, num_latents, num_latents]
assert layer_decoder_mask.shape[-1] >= current_activations.shape[-2]
layer_decoder_mask = slicing.slice_sequences_vmap(
layer_decoder_mask,
sequence_lengths,
num_latents,
axis_within_vmap=-1)
layer_prefill_lengths = prefill_lengths
if prefill:
if layer_prefill_lengths is None:
layer_prefill_lengths = sequence_lengths
# Ensure prefill_lengths isn't longer than the input length.
# For Perceiver AR, this can happen in the self-attention stack, which
# is narrower than the actual sequence length.
layer_prefill_lengths = jnp.minimum(current_activations.shape[-2],
layer_prefill_lengths)
layer_logit_mask = logit_mask
if (layer_logit_mask is not None and
layer_logit_mask.shape[-2] != current_activations.shape[-2]):
assert layer_logit_mask.shape[-2] >= current_activations.shape[-2]
layer_logit_mask = slicing.slice_sequences_vmap(
layer_logit_mask,
sequence_lengths,
current_activations.shape[-2],
axis_within_vmap=0)
current_activations = layer(
current_activations,
encoded,
layer_decoder_mask,
encoder_decoder_mask,
logit_mask=layer_logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=layer_prefill_lengths,
num_latents=num_latents,
sequence_lengths=sequence_lengths)
return current_activations
class Decoder(t5_architecture.Decoder):
"""Perceiver AR Decoder.
Attributes:
num_latents: Number of latents for queries and number of output latents.
"""
# num_latents is actually required, but has to be marked as optional because
# we don't yet require Python 3.10, which provides keyword-only dataclasses.
num_latents: Optional[int] = None
def setup(self):
if self.num_latents is None:
raise ValueError('num_latents must be specified.')
super().setup()
def _setup_layer_sequence(self):
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
lyrf = t5_architecture.maybe_remat(
lyrf,
self.layer_remat,
self.scan_layers,
static_argnums=(5, 6, 7, 8, 9, 10))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
return PerceiverARTransparentLayerSequence(self.layers, self.num_latents)
else:
# Create a non-scanned version of lyrf to use for the first layer.
lyrf_notscanned = lambda: self.layer_factory( # pylint: disable=g-long-lambda # pytype: disable=wrong-keyword-args
shared_relative_position_bias=self.relpos_bias,
scanned=False)
lyrf_notscanned = t5_architecture.maybe_remat(
lyrf_notscanned,
self.layer_remat,
self.scan_layers,
static_argnums=(5, 6, 7, 8, 9, 10))
self.layers = [
lyrf_notscanned(),
self._construct_scanned_decoder(
lyrf, self.num_layers - 1, num_broadcast_args=11)
]
return PerceiverARTransparentLayerSequence(self.layers, self.num_latents)
def decode_from_continuous_inputs(self,
embedded_inputs,
encoder_outputs,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
logit_mask=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
num_latents: Optional[int] = None,
sequence_lengths: Optional[Array] = None):
"""Applies the decoder on the continuous (embedded) inputs."""
if decoder_positions is not None:
raise NotImplementedError('Perceiver AR does not yet support packing.')
# sequence_lengths is required, but has to be defined as optional to
# maintain API compatibility.
if sequence_lengths is None:
raise ValueError('sequence_lengths must be supplied fo Perceiver AR.')
if num_latents and num_latents > self.num_latents:
raise ValueError(
f'Overridden num_latents ({num_latents}) must be <= self.num_latents '
f'({self.num_latents}).')
num_latents = num_latents or self.num_latents
# If encoded is not given, this block is decoder only and does not contain
# attention from decoder to encoder.
if encoder_outputs is not None:
assert encoder_outputs.ndim == 3 # (batch, len, depth)
# Apply the decoder layers, attending to the encoder outputs (if provided),
# and attending to previous decoder inputs (by masking future inputs).
decoder_outputs = self.decoder(
embedded_inputs,
encoder_outputs,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
num_latents=num_latents,
sequence_lengths=sequence_lengths)
if self.scan_layers:
decoder_outputs = decoder_outputs[0]
# Output length should always be <= the number of latents regardless of
# input length or configured number of latents. During training it will be
# the same. During fast decoding, it may just be 1.
assert decoder_outputs.shape[-2] <= num_latents
# Post-process final decoder layer outputs.
decoder_outputs = self.decoder_norm(decoder_outputs)
decoder_outputs = self.output_dropout(
decoder_outputs, deterministic=not enable_dropout)
# Slice logit_mask to match output positions.
if logit_mask is not None:
if logit_mask.shape[-2] != decoder_outputs.shape[-2]:
assert logit_mask.shape[-2] >= decoder_outputs.shape[-2]
logit_mask = slicing.slice_sequences_vmap(
logit_mask,
sequence_lengths,
decoder_outputs.shape[-2],
axis_within_vmap=-2)
decoder_outputs = logit_mask * decoder_outputs
if self.sow_intermediates:
self.sow('intermediates', 'pre_logits_layer', decoder_outputs)
# Decoded Logits
if self.logits_dense is not None:
logits = self.logits_dense(decoder_outputs)
else:
# Use the transpose of embedding matrix for logit transform.
logits = self.embedder.embedders['token_ids'].attend(decoder_outputs) # pytype: disable=attribute-error
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(decoder_outputs.shape[-1])
if self.sow_intermediates:
self.sow('intermediates', 'logits', logits)
return logits
class DecoderOnly(t5_architecture.DecoderOnly):
"""Perceiver AR Decoder-only model."""
# num_latents is actually required, but has to be marked as optional because
# we don't yet require Python 3.10, which provides keyword-only dataclasses.
num_latents: Optional[int] = None
def setup(self):
if self.num_latents is None:
raise ValueError('num_latents must be specified.')
super().setup()
def __call__(self,
decoder_input_tokens,
decoder_target_tokens,
decoder_segment_ids=None,
decoder_positions=None,
decoder_causal_attention=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
num_latents: Optional[int] = None,
**kwargs):
"""Applies Perceiver AR Decoder-only model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is typically a shifted version of the former. For a packed dataset, it
Packing is not currently supported for Perceiver AR.
Args:
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
decoder_causal_attention: a binary mask indicating the "inputs" portion of
the concatenated sequence for a prefix LM.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
num_latents: Used to override the number of output Perceiver AR latents
during decoding.
**kwargs: Additional keyword arguments to pass on to the decoder.
Returns:
logits array from LanguageModel.
"""
if decode and prefill:
raise ValueError('Only one of `decode` and `prefill` can be set. Use '
'`prefill` to pre-populate the cache for Prefix LMs '
'before using `decode`')
# Perceiver AR operation does not support packing.
if decoder_positions is not None:
raise NotImplementedError(
'decoder_positions is provided, but Perceiver AR does not yet '
'support packing.')
if decoder_segment_ids is not None:
raise NotImplementedError(
'decoder_segment_ids is provided, but Perceiver AR does not yet '
'support packing.')
if num_latents and num_latents > self.num_latents:
raise ValueError(
f'Overridden num_latents ({num_latents}) must be <= self.num_latents '
f'({self.num_latents}).')
num_latents = num_latents or self.num_latents
# Calculate sequence lengths based on target tokens.
sequence_lengths = slicing.get_sequence_lengths(
decoder_target_tokens=decoder_target_tokens)
if decode:
decoder_mask = None
else:
decoder_mask = attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
sequence_lengths=sequence_lengths,
num_latents=num_latents,
dtype=self.dtype,
decoder_causal_attention=decoder_causal_attention)
# We reuse Decoder class, which can optionally takes in encoded and
# encoder_decoder_mask. These are used when Decoder is used in the context
# of encoder-decoder model. For LM, we don't have an encoder. So set these
# to None.
return self.decoder( # pytype: disable=attribute-error
encoder_outputs=None,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=None,
segment_ids=decoder_segment_ids,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
num_latents=num_latents,
sequence_lengths=sequence_lengths,
**kwargs)
def create_residuals_and_queries(
layer_input: Array, x: Array, logit_mask, *, num_latents: Optional[Array],
sequence_lengths: Array) -> Tuple[Array, Array, Optional[Array], Array]:
"""Slice layer inputs to get versions to use as queries."""
if x.shape[-2] > num_latents:
layer_input_residuals = slicing.slice_sequences_xmap( # pytype: disable=wrong-arg-types # jax-ndarray
layer_input, sequence_lengths, num_latents, axis_within_xmap=0)
x_queries = slicing.slice_sequences_xmap( # pytype: disable=wrong-arg-types # jax-ndarray
x, sequence_lengths, num_latents, axis_within_xmap=0)
query_offset = slicing.sequence_slice_start(sequence_lengths, num_latents) # pytype: disable=wrong-arg-types # jax-ndarray
else:
layer_input_residuals = layer_input
x_queries = x
query_offset = None
if logit_mask.shape[-2] > num_latents:
logit_mask_queries = slicing.slice_sequences_vmap( # pytype: disable=wrong-arg-types # jax-ndarray
logit_mask, sequence_lengths, num_latents, axis_within_vmap=0)
else:
logit_mask_queries = logit_mask
return layer_input_residuals, x_queries, query_offset, logit_mask_queries
| 17,399 | 40.232227 | 128 | py |
flaxformer | flaxformer-main/flaxformer/architectures/perceiver_ar/t5_models_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5_models."""
import functools
from typing import Sequence
from unittest import mock
from absl.testing import absltest
import chex
from flax.training import common_utils
import jax
import jax.numpy as jnp
from t5x import losses
from flaxformer.architectures.perceiver_ar import t5_models
def _mock_randint_minval(key: chex.PRNGKey,
shape: Sequence[int],
minval: chex.Array,
maxval: chex.Array,
dtype: chex.ArrayDType = jnp.int_):
del key, maxval
return jnp.full(shape, minval, dtype)
def _mock_randint_maxval(key: chex.PRNGKey,
shape: Sequence[int],
minval: chex.Array,
maxval: chex.Array,
dtype: chex.ArrayDType = jnp.int_):
del key, minval
return jnp.full(shape, maxval - 1, dtype)
class T5ModelsCroppingTest(absltest.TestCase):
def test_no_cropping(self):
batch = {
'decoder_target_tokens': jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights': jnp.ones([8, 128], jnp.int32),
}
cropped_batch = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch=batch.copy(),
cropping_method=t5_models.CroppingMethod.NONE,
num_latents=16)
chex.assert_trees_all_close(batch, cropped_batch)
def test_full_latents_cropping_min(self):
batch = {
'decoder_target_tokens': jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights': jnp.ones([8, 128], jnp.int32),
}
expected_batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 16], jnp.int32),
jnp.zeros([8, 112], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate(
[jnp.ones([8, 16], jnp.int32),
jnp.zeros([8, 112], jnp.int32)],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_minval):
cropped_batch_full_latents = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS,
num_latents=16)
cropped_batch_full_latents_with_prefix = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS_WITH_PREFIX,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch_full_latents)
chex.assert_trees_all_close(expected_batch,
cropped_batch_full_latents_with_prefix)
def test_full_latents_cropping_max(self):
batch = {
'decoder_target_tokens': jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights': jnp.ones([8, 128], jnp.int32),
}
expected_batch = {
'decoder_target_tokens':
jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights':
jnp.concatenate(
[jnp.zeros([8, 112], jnp.int32),
jnp.ones([8, 16], jnp.int32)],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_maxval):
cropped_batch_full_latents = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS,
num_latents=16)
cropped_batch_full_latents_with_prefix = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS_WITH_PREFIX,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch_full_latents)
chex.assert_trees_all_close(expected_batch,
cropped_batch_full_latents_with_prefix)
def test_prefix_seq_full_latents_cropping_min(self):
batch = {
'decoder_target_tokens':
jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights':
jnp.concatenate(
[jnp.zeros([8, 28], jnp.int32),
jnp.ones([8, 100], jnp.int32)],
axis=1),
}
expected_batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 44], jnp.int32),
jnp.zeros([8, 84], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate([
jnp.zeros([8, 28], jnp.int32),
jnp.ones([8, 16], jnp.int32),
jnp.zeros([8, 84], jnp.int32)
],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_minval):
cropped_batch = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch)
def test_prefix_seq_full_latents_with_prefix_cropping_min(self):
batch = {
'decoder_target_tokens':
jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights':
jnp.concatenate(
[jnp.zeros([8, 28], jnp.int32),
jnp.ones([8, 100], jnp.int32)],
axis=1),
}
expected_batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 29], jnp.int32),
jnp.zeros([8, 99], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate([
jnp.zeros([8, 28], jnp.int32),
jnp.ones([8, 1], jnp.int32),
jnp.zeros([8, 99], jnp.int32)
],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_minval):
cropped_batch = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS_WITH_PREFIX,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch)
def test_prefix_seq_full_latents_cropping_max(self):
batch = {
'decoder_target_tokens':
jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights':
jnp.concatenate(
[jnp.zeros([8, 28], jnp.int32),
jnp.ones([8, 100], jnp.int32)],
axis=1),
}
expected_batch = {
'decoder_target_tokens':
jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights':
jnp.concatenate(
[jnp.zeros([8, 112], jnp.int32),
jnp.ones([8, 16], jnp.int32)],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_maxval):
cropped_batch_full_latents = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS,
num_latents=16)
cropped_batch_full_latents_with_prefix = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS_WITH_PREFIX,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch_full_latents)
chex.assert_trees_all_close(expected_batch,
cropped_batch_full_latents_with_prefix)
def test_partial_seq_full_latents_cropping_min(self):
batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 100], jnp.int32),
jnp.zeros([8, 28], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate(
[jnp.ones([8, 100], jnp.int32),
jnp.zeros([8, 28], jnp.int32)],
axis=1),
}
expected_batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 16], jnp.int32),
jnp.zeros([8, 112], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate(
[jnp.ones([8, 16], jnp.int32),
jnp.zeros([8, 112], jnp.int32)],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_minval):
cropped_batch_full_latents = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS,
num_latents=16)
cropped_batch_full_latents_with_prefix = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS_WITH_PREFIX,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch_full_latents)
chex.assert_trees_all_close(expected_batch,
cropped_batch_full_latents_with_prefix)
def test_prefix_full_latents_cropping_max(self):
batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 100], jnp.int32),
jnp.zeros([8, 28], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate(
[jnp.ones([8, 100], jnp.int32),
jnp.zeros([8, 28], jnp.int32)],
axis=1),
}
expected_batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 100], jnp.int32),
jnp.zeros([8, 28], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate([
jnp.zeros([8, 84], jnp.int32),
jnp.ones([8, 16], jnp.int32),
jnp.zeros([8, 28], jnp.int32)
],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_maxval):
cropped_batch_full_latents = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS,
num_latents=16)
cropped_batch_full_latents_with_prefix = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.FULL_LATENTS_WITH_PREFIX,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch_full_latents)
chex.assert_trees_all_close(expected_batch,
cropped_batch_full_latents_with_prefix)
def test_equal_position_likelihood_cropping_min(self):
batch = {
'decoder_target_tokens': jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights': jnp.ones([8, 128], jnp.int32),
}
expected_batch = {
'decoder_target_tokens':
jnp.concatenate(
[jnp.ones([8, 1], jnp.int32),
jnp.zeros([8, 127], jnp.int32)],
axis=1),
'decoder_loss_weights':
jnp.concatenate(
[jnp.ones([8, 1], jnp.int32),
jnp.zeros([8, 127], jnp.int32)],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_minval):
cropped_batch = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.EQUAL_POSITION_LIKELIHOOD,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch)
def test_equal_position_likelihood_cropping_max(self):
batch = {
'decoder_target_tokens': jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights': jnp.ones([8, 128], jnp.int32),
}
expected_batch = {
'decoder_target_tokens':
jnp.ones([8, 128], jnp.int32),
'decoder_loss_weights':
jnp.concatenate(
[jnp.zeros([8, 127], jnp.int32),
jnp.ones([8, 1], jnp.int32)],
axis=1),
}
with mock.patch.object(jax.random, 'randint', new=_mock_randint_maxval):
cropped_batch = t5_models.crop_train_batch(
jax.random.PRNGKey(0),
batch={**batch},
cropping_method=t5_models.CroppingMethod.EQUAL_POSITION_LIKELIHOOD,
num_latents=16)
chex.assert_trees_all_close(expected_batch, cropped_batch)
def _mock_compute_logits(params,
batch,
dropout_rng,
num_latents,
vocab_size,
mutable=False):
del params, dropout_rng, mutable
batch_size = batch['decoder_input_tokens'].shape[0]
logits = jnp.zeros((batch_size, num_latents, vocab_size), jnp.float32)
# Assuming input tokens are all 1s except the first position, summing will
# determine the input_length.
input_length = batch['decoder_input_tokens'].sum(axis=1) + 1
# Set vocab position 0 to be the input length * 10 for all sequence positions.
logits = jax.vmap(lambda x, l: x.at[:, 0].set(l * 10))(logits, input_length)
# If sequence length is shorter than latents, don't fill positions without
# inputs.
logits = jnp.where(
jnp.arange(logits.shape[1])[jnp.newaxis, :, jnp.newaxis] < input_length,
logits, 0)
return logits
def _get_token_scores(logits, target_tokens, weights):
return -losses.cross_entropy_with_logits(
logits,
common_utils.onehot(
target_tokens, logits.shape[-1], on_value=1, off_value=0),
z_loss=0.0)[0] * weights
class T5ModelsScoreBatchTest(absltest.TestCase):
"""Tests for score_batch.
The goal of these tests is to ensure that the striding and logits combining
is happening as intended. So we use _mock_compute_logits to return fake logits
that are deterministic based on sequence position. By calculating an expected
final score based on the logits created by an expected set of strides, we can
ensure the process is completing as expected.
"""
def test_score_batch(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens': jnp.ones([2, 8]),
'decoder_input_tokens': jnp.ones([2, 8]).at[:, 0].set(0),
'decoder_loss_weights': jnp.ones([2, 8]),
'decoder_causal_attention': jnp.zeros([2, 8]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[40., 0.], [40., 0.], [40., 0.], [40., 0.],
[60., 0.], [60., 0.], [80., 0.], [80., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_with_remainder(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens': jnp.ones([2, 9]),
'decoder_input_tokens': jnp.ones([2, 9]).at[:, 0].set(0),
'decoder_loss_weights': jnp.ones([2, 9]),
'decoder_causal_attention': jnp.zeros([2, 9]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[40., 0.], [40., 0.], [40., 0.], [40., 0.],
[60., 0.], [60., 0.], [80., 0.], [80., 0.],
[90., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_inputs_match_latents(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens': jnp.ones([2, 4]),
'decoder_input_tokens': jnp.ones([2, 4]).at[:, 0].set(0),
'decoder_loss_weights': jnp.ones([2, 4]),
'decoder_causal_attention': jnp.zeros([2, 4]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[40., 0.], [40., 0.], [40., 0.], [40., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_short_sequence(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens':
jnp.ones([2, 8]).at[:, 5:].set(0),
'decoder_input_tokens':
jnp.ones([2, 8]).at[:, 0].set(0).at[:, 5:].set(0),
'decoder_loss_weights':
jnp.ones([2, 8]).at[:, 5:].set(0),
'decoder_causal_attention':
jnp.zeros([2, 8]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[40., 0.], [40., 0.], [40., 0.], [40., 0.],
[50., 0.], [0., 0.], [0., 0.], [0., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_sequence_shorter_than_latents(self):
num_latents = 8
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens':
jnp.ones([2, 8]).at[:, 5:].set(0),
'decoder_input_tokens':
jnp.ones([2, 8]).at[:, 0].set(0).at[:, 5:].set(0),
'decoder_loss_weights':
jnp.ones([2, 8]).at[:, 5:].set(0),
'decoder_causal_attention':
jnp.zeros([2, 8]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[50., 0.], [50., 0.], [50., 0.], [50., 0.],
[50., 0.], [0., 0.], [0., 0.], [0., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_different_lengths(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens':
jnp.ones([2, 8]).at[0, 5:].set(0),
'decoder_input_tokens':
jnp.ones([2, 8]).at[:, 0].set(0).at[0, 5:].set(0),
'decoder_loss_weights':
jnp.ones([2, 8]).at[0, 5:].set(0),
'decoder_causal_attention':
jnp.zeros([2, 8]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([
[[40., 0.], [40., 0.], [40., 0.], [40., 0.], [50., 0.], [0., 0.],
[0., 0.], [0., 0.]],
[[40., 0.], [40., 0.], [40., 0.], [40., 0.], [60., 0.], [60., 0.],
[80., 0.], [80., 0.]],
])
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_different_lengths_with_input_prefix(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens':
jnp.array([
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]),
'decoder_input_tokens':
jnp.array([
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]),
'decoder_loss_weights':
jnp.array([
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
]),
'decoder_causal_attention':
jnp.array([
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([
[[0., 0.], [0., 0.], [60., 0.], [60., 0.], [60., 0.], [60., 0.],
[70., 0.], [0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [60., 0.], [60., 0.], [60., 0.], [60., 0.],
[80., 0.], [80., 0.], [100., 0.], [100., 0.]],
])
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_num_latents_equal_sequence_length(self):
num_latents = 8
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=3)
batch = {
'decoder_target_tokens': jnp.ones([2, 8]),
'decoder_input_tokens': jnp.ones([2, 8]).at[:, 0].set(0),
'decoder_loss_weights': jnp.ones([2, 8]),
'decoder_causal_attention': jnp.zeros([2, 8]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[80., 0.], [80., 0.], [80., 0.], [80., 0.],
[80., 0.], [80., 0.], [80., 0.], [80., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
def test_score_batch_refill_matches_latents(self):
num_latents = 4
vocab_size = 2
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=num_latents,
decoding_latent_reset_fill=4)
batch = {
'decoder_target_tokens': jnp.ones([2, 8]),
'decoder_input_tokens': jnp.ones([2, 8]).at[:, 0].set(0),
'decoder_loss_weights': jnp.ones([2, 8]),
'decoder_causal_attention': jnp.zeros([2, 8]),
}
with mock.patch.object(
model,
'_compute_logits',
new=functools.partial(
_mock_compute_logits,
num_latents=num_latents,
vocab_size=vocab_size)):
sequence_scores = model.score_batch(params=None, batch=batch)
expected_logits = jnp.array([[[40., 0.], [40., 0.], [40., 0.], [40., 0.],
[50., 0.], [60., 0.], [70., 0.], [80., 0.]]])
expected_logits = jnp.tile(expected_logits, (2, 1, 1))
expected_token_scores = _get_token_scores(expected_logits,
batch['decoder_target_tokens'],
batch['decoder_loss_weights'])
expected_sequence_scores = expected_token_scores.sum(-1)
chex.assert_trees_all_close(expected_sequence_scores, sequence_scores)
class T5ModelsDecodingLatentResetFillTest(absltest.TestCase):
def test_get_decoding_latent_reset_fill(self):
model = t5_models.PerceiverARModel(
module=None, vocabulary=None, optimizer_def=None, num_latents=2048)
# Short sequence, use all the latents.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=32))
# Sequence length equal to num_latents, use them all.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2048))
# Sequence length only 1 longer than num_latents, so use all the latents
# because this still results in only 2 forward passes.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2049))
# Sequence now 2 more than num_latents, so use num_latents-1 for 2 passes.
self.assertEqual(2047,
model.get_decoding_latent_reset_fill(input_length=2050))
# Sequence is very long, use standard num_latents-128.
self.assertEqual(1920,
model.get_decoding_latent_reset_fill(input_length=5000))
# Ensure we can decode with only 1 latent.
model_single_latent = t5_models.PerceiverARModel(
module=None, vocabulary=None, optimizer_def=None, num_latents=1)
self.assertEqual(
1,
model_single_latent.get_decoding_latent_reset_fill(input_length=8192))
def test_get_decoding_latent_reset_fill_when_configured(self):
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=2048,
decoding_latent_reset_fill=2000)
# Short sequence, use all the latents.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=32))
# Sequence length equal to num_latents, use them all.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2048))
# Sequence length only 1 longer than num_latents, so use all the latents
# because this still results in only 2 forward passes.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2049))
# Sequence now 2 more than num_latents, so use num_latents-1 for 2 passes.
self.assertEqual(2047,
model.get_decoding_latent_reset_fill(input_length=2050))
# Sequence is very long, use configured value.
self.assertEqual(2000,
model.get_decoding_latent_reset_fill(input_length=5000))
def test_get_decoding_latent_reset_fill_when_configured_max(self):
model = t5_models.PerceiverARModel(
module=None,
vocabulary=None,
optimizer_def=None,
num_latents=2048,
decoding_latent_reset_fill=2048)
# Short sequence, use all the latents.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=32))
# Sequence length equal to num_latents, use them all.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2048))
# Sequence length only 1 longer than num_latents, so use all the latents
# because this still results in only 2 forward passes.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2049))
# Sequence now 2 more than num_latents, so use num_latents-1 for 2 passes.
# Except configured value is higher, so use that.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=2050))
# Sequence is very long, use configured value.
self.assertEqual(2048,
model.get_decoding_latent_reset_fill(input_length=5000))
if __name__ == '__main__':
absltest.main()
| 31,728 | 36.109942 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/fido/fido_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules for FiDO architecture (see https://arxiv.org/abs/2212.08153).
Use Decoder in this file for layer-sparse cross-attention, or standard T5
decoder with DecoderLayerBlock for scanned layer-sparse cross-attention. See
example configs t5_base_lsa.gin and t5_base_lsa_scan.gin.
"""
import dataclasses
from typing import List, Optional
import flax.linen as nn
import jax
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.t5 import t5_architecture
@dataclasses.dataclass(frozen=True)
class TransparentDecoderLayerSequence:
"""Layer sequence to apply cross attention every k layers."""
layers: List[nn.Module]
encoder_decoder_attention_period: int = 1
def __call__(self, inputs: jax.Array, *args, **kwargs) -> jax.Array:
"""Applies all Transformer layers to the inputs sequentially."""
return self.apply_range_of_layers(0, None, inputs, *args, **kwargs)
def apply_range_of_layers(
self,
start_idx: int,
end_idx: Optional[int],
inputs: jax.Array,
*args,
**kwargs,
) -> jax.Array:
"""Split off encoded from args and pass only for selected layers."""
current_activations = inputs
encoded = args[0]
for layer_idx, layer in enumerate(self.layers[start_idx:end_idx]):
apply_encoder_decoder_attention = (
(layer_idx + 1) % self.encoder_decoder_attention_period
) == 0
current_activations = layer(
current_activations,
encoded if apply_encoder_decoder_attention else None,
*args[1:],
**kwargs,
) # pytype: disable=not-callable
return current_activations
class Decoder(t5_architecture.Decoder):
"""Decoder with cross-attention every k layers.
Use this class instead of the T5 decoder for layer-sparse cross-attention
without scanned layers.
Attributes:
encoder_decoder_attention_period: apply cross-attention every this many
layers. For example, if there are 24 decoder layers and
encoder_decoder_attention_period=6, then layers 5, 11, 17 and 23 have
cross-attention.
"""
encoder_decoder_attention_period: int = 1
def _setup_layer_sequence(self):
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias
)
# If scanning layers, use standard T5 decoder with DecoderLayerBlock below.
if self.scan_layers:
raise ValueError('Scan not supported for this decoder class.')
lyrf = t5_architecture.maybe_remat(
lyrf, self.layer_remat, self.scan_layers, static_argnums=(5, 6, 7, 8, 9)
)
self.layers = [lyrf() for _ in range(self.num_layers)]
return TransparentDecoderLayerSequence(
self.layers,
encoder_decoder_attention_period=self.encoder_decoder_attention_period,
)
class DecoderLayerBlock(nn.Module, param_remapping.ParameterRemappable):
"""Block of decoder layers with single cross-attention layer.
Use this class as a layer factory for the standard T5 decoder in order to
employ layer-sparse cross-attention with scanned layers, with scanned equal to
True in the decoder and this block, but not in the T5 Decoderlayer. Each block
has a single cross-attention layer in the last layer of the block, so to build
a decoder with 12 layers and sparsity 4, set num_layers=4 in this class and
use 3 blocks in the T5 decoder.
Attributes:
num_layers: Number of decoder layers in block.
layer_factory: T5 decoder layer factory.
shared_relative_position_bias: Supply in case of shared relative position
bias. Scanning normally prevents sharing relative position bias, but here
we can optionally share within blocks.
scanned: Whether the block is part of a scanned decoder. Normally true,
otherwise no reason to use this module.
layer_remat: Rematerialization strategy.
"""
num_layers: int
layer_factory: t5_architecture.MakeDecoderLayerFn
shared_relative_position_bias: Optional[nn.Module] = None
scanned: bool = True
layer_remat: str = 'legacy'
def setup(self):
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.shared_relative_position_bias
)
lyrf = t5_architecture.maybe_remat(
lyrf, self.layer_remat, False, static_argnums=(5, 6, 7, 8, 9)
)
self.layers = [lyrf() for _ in range(self.num_layers)]
self.block_layers = TransparentDecoderLayerSequence(
self.layers,
encoder_decoder_attention_period=self.num_layers,
)
def __call__(
self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[jax.Array] = None,
**kwargs,
):
output = self.block_layers(
targets,
encoded,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
if self.scanned:
return output, None
else:
return output
| 5,930 | 33.888235 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/fido/fido_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for FiDO architecture."""
import functools
from absl.testing import absltest
from flax import linen as nn
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.fido import fido_architecture
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.architectures.t5 import t5_common_layers
from flaxformer.components import embedding
from flaxformer.components import layer_norm
expected_files = testing_utils.ExpectedJsonFiles(
'flaxformer/architectures/fido/testdata'
)
check_params = expected_files.check_params_shapes_only
# TODO mutate scanned test
BATCH_SIZE = 2
SRC_LEN = 6
TARGET_LEN = 4
NUM_HEADS = 8
EMBED_DIM = 13
MLP_DIM = 32
HEAD_DIM = 3
DTYPE = jnp.float32
NUM_LAYERS = 2
DROPOUT_RATE = 0.1
ACTIVATIONS = ('gelu', 'linear')
VOCAB_SIZE = 4
dropout_factory = lambda: nn.Dropout(rate=DROPOUT_RATE, broadcast_dims=(-2,))
embedding_factory = lambda: embedding.Embed(VOCAB_SIZE, EMBED_DIM)
layer_norm_factory = layer_norm.T5LayerNorm
relative_position_bias_factory = functools.partial(
t5_common_layers.relative_position_bias, num_heads=NUM_HEADS, dtype=DTYPE
)
def decoder_layer_factory(shared_relative_position_bias=None, scanned=False):
return t5_architecture.DecoderLayer(
self_attention=t5_common_layers.attention_layer(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
dropout_rate=DROPOUT_RATE,
dtype=DTYPE,
),
encoder_decoder_attention=t5_common_layers.attention_layer(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
dropout_rate=DROPOUT_RATE,
dtype=DTYPE,
),
mlp=t5_common_layers.mlp_block(
mlp_dim=MLP_DIM,
dropout_rate=DROPOUT_RATE,
activations=ACTIVATIONS,
dtype=DTYPE,
),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
shared_relative_position_bias=shared_relative_position_bias,
scanned=scanned,
)
def fido_decoder_factory(encoder_decoder_attention_period=1):
return fido_architecture.Decoder(
layer_factory=decoder_layer_factory,
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=2,
token_embedder_factory=embedding_factory,
shared_relative_position_bias_factory=relative_position_bias_factory,
dtype=DTYPE,
encoder_decoder_attention_period=encoder_decoder_attention_period,
) # pytype: disable=wrong-keyword-args
def t5_decoder_factory():
return t5_architecture.Decoder(
layer_factory=decoder_layer_factory,
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=2,
token_embedder_factory=embedding_factory,
shared_relative_position_bias_factory=relative_position_bias_factory,
dtype=DTYPE,
)
def decoder_block_factory(num_layers, scanned=False, **kwargs):
return fido_architecture.DecoderLayerBlock(
num_layers=num_layers,
layer_factory=functools.partial(decoder_layer_factory),
scanned=scanned,
**kwargs,
)
def decoder_with_blocks_factory(block_size, scan_layers):
return t5_architecture.Decoder(
layer_factory=functools.partial(
decoder_block_factory, num_layers=block_size, scanned=scan_layers
),
dropout_factory=dropout_factory,
layer_norm_factory=layer_norm_factory,
num_layers=2 // block_size,
token_embedder_factory=embedding_factory,
dtype=DTYPE,
scan_layers=scan_layers,
) # pytype: disable=wrong-keyword-args
class FiDODecoderTest(absltest.TestCase):
"""Tests for FiDO decoder."""
def test_shape(self):
decoder_lsa1 = fido_decoder_factory(encoder_decoder_attention_period=1)
encoder_outputs = np.ones(
(BATCH_SIZE, SRC_LEN, EMBED_DIM), dtype=np.float32
)
decoder_input_tokens = np.ones((BATCH_SIZE, TARGET_LEN), dtype=np.int32)
output_logits_lsa1, variables_lsa1 = decoder_lsa1.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
check_params(variables_lsa1['params'], 'decoder_shapes_lsa1.json')
self.assertEqual(
output_logits_lsa1.shape, (BATCH_SIZE, TARGET_LEN, VOCAB_SIZE)
)
decoder_lsa2 = fido_decoder_factory(encoder_decoder_attention_period=2)
encoder_outputs = np.ones(
(BATCH_SIZE, SRC_LEN, EMBED_DIM), dtype=np.float32
)
decoder_input_tokens = np.ones((BATCH_SIZE, TARGET_LEN), dtype=np.int32)
output_logits_lsa2, variables_lsa2 = decoder_lsa2.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
check_params(variables_lsa2['params'], 'decoder_shapes_lsa2.json')
self.assertEqual(
output_logits_lsa2.shape, (BATCH_SIZE, TARGET_LEN, VOCAB_SIZE)
)
def test_consistent_t5(self):
encoder_outputs = np.ones(
(BATCH_SIZE, SRC_LEN, EMBED_DIM), dtype=np.float32
)
decoder_input_tokens = np.ones((BATCH_SIZE, TARGET_LEN), dtype=np.int32)
decoder_lsa1 = fido_decoder_factory(encoder_decoder_attention_period=1)
output_logits_lsa1, _ = decoder_lsa1.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
decoder_t5 = t5_decoder_factory()
output_logits_t5, _ = decoder_t5.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
np.testing.assert_allclose(output_logits_lsa1, output_logits_t5, rtol=1e-8)
class FiDOScanTest(absltest.TestCase):
"""Tests for scanned FiDO decoder."""
def test_shape(self):
decoder_lsa1 = decoder_with_blocks_factory(block_size=1, scan_layers=True)
encoder_outputs = np.ones(
(BATCH_SIZE, SRC_LEN, EMBED_DIM), dtype=np.float32
)
decoder_input_tokens = np.ones((BATCH_SIZE, TARGET_LEN), dtype=np.int32)
output_logits_lsa1, variables_lsa1 = decoder_lsa1.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
check_params(variables_lsa1['params'], 'decoder_shapes_blocklsa1.json')
self.assertEqual(
output_logits_lsa1.shape, (BATCH_SIZE, TARGET_LEN, VOCAB_SIZE)
)
decoder_lsa2 = decoder_with_blocks_factory(block_size=2, scan_layers=True)
output_logits_lsa2, variables_lsa2 = decoder_lsa2.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
check_params(variables_lsa2['params'], 'decoder_shapes_blocklsa2.json')
self.assertEqual(
output_logits_lsa2.shape, (BATCH_SIZE, TARGET_LEN, VOCAB_SIZE)
)
decoder_lsa2_noscan = decoder_with_blocks_factory(
block_size=2, scan_layers=False
)
output_logits_lsa2_noscan, variables_lsa2_noscan = (
decoder_lsa2_noscan.init_with_output(
random.PRNGKey(0),
encoder_outputs=encoder_outputs,
decoder_input_tokens=decoder_input_tokens,
enable_dropout=False,
)
)
check_params(
variables_lsa2_noscan['params'], 'decoder_shapes_blocklsa2_noscan.json'
)
self.assertEqual(
output_logits_lsa2_noscan.shape, (BATCH_SIZE, TARGET_LEN, VOCAB_SIZE)
)
if __name__ == '__main__':
absltest.main()
| 8,296 | 32.321285 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/common/param_remapping_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for param_remapping."""
from typing import Any, List, Mapping, Tuple, Union
from absl.testing import absltest
from absl.testing import parameterized
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
from flaxformer import testing_utils
from flaxformer.architectures.common import param_remapping
class OldDense(nn.Module):
shape: Tuple[int, ...]
def setup(self):
self.w = self.param('w', nn.initializers.normal(), self.shape)
def __call__(self, x):
return self.w @ x
class NewDense(nn.Module, param_remapping.ParameterRemappable):
shape: Tuple[int, ...]
def setup(self):
self.weights = self.param('weights', nn.initializers.normal(), self.shape)
def __call__(self, x):
return self.weights @ x
@nn.nowrap # exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _from_save_format(
self, params: param_remapping.RecursiveDefaultDict) -> Mapping[str, Any]:
params.merge('weights', params.pop('w'))
return params
@nn.nowrap # exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _to_save_format(
self, params: param_remapping.RecursiveDefaultDict) -> Mapping[str, Any]:
params.merge('w', params.pop('weights'))
return params
class Mlp(nn.Module, param_remapping.ParameterRemappable):
layers: List[Union[OldDense, NewDense]]
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
class MlpSetupInit(nn.Module, param_remapping.ParameterRemappable):
def setup(self):
self.layers = [NewDense(shape=(3, 4)), NewDense(shape=(5, 3))]
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
class NestedStructuresMlp(nn.Module, param_remapping.ParameterRemappable):
layers: List[List[Union[OldDense, NewDense]]]
def __call__(self, x):
for inner_layers in self.layers:
for layer in inner_layers:
x = layer(x)
return x
class ParameterRemappableTest(parameterized.TestCase):
@parameterized.named_parameters([
('direct_init', Mlp([NewDense(shape=(3, 4)),
NewDense(shape=(5, 3))])),
('setup_init', MlpSetupInit()),
])
def test_load_old_checkpoint(self, new_mlp):
# Instantiate an old model and get its params in order to simulate the
# existence of an old checkpoint.
old_mlp = Mlp([OldDense(shape=(3, 4)), OldDense(shape=(5, 3))])
old_mlp_vars = old_mlp.init(jax.random.PRNGKey(0), jnp.zeros([4]))
self.assertSameStructure(
testing_utils.param_shapes(old_mlp_vars),
{'params': {
'layers_0': {
'w': [3, 4]
},
'layers_1': {
'w': [5, 3]
},
}}, 'old_mlp_vars = ' + testing_utils.format_params_shapes(
testing_utils.param_shapes(old_mlp_vars)))
# Use the new model to remap the old parameters into the new format.
new_mlp_init_vars = new_mlp.init(jax.random.PRNGKey(0), jnp.zeros([4]))
new_mlp_remapped_vars = {
'params':
new_mlp.apply(
new_mlp_init_vars,
old_mlp_vars['params'],
method=new_mlp.from_save_format)
}
self.assertSameStructure(
testing_utils.param_shapes(new_mlp_remapped_vars), {
'params': {
'layers_0': {
'weights': [3, 4]
},
'layers_1': {
'weights': [5, 3]
},
}
}, 'new_mlp_remapped_vars = ' + testing_utils.format_params_shapes(
testing_utils.param_shapes(new_mlp_remapped_vars)))
# Map the new model's parameters into the save format.
new_mlp_saveformat_vars = {
'params':
new_mlp.apply(
new_mlp_remapped_vars,
new_mlp_remapped_vars['params'],
method=new_mlp.to_save_format)
}
self.assertSameStructure(
testing_utils.param_shapes(new_mlp_saveformat_vars),
{'params': {
'layers_0': {
'w': [3, 4]
},
'layers_1': {
'w': [5, 3]
},
}}, 'new_mlp_saveformat_vars = ' + testing_utils.format_params_shapes(
testing_utils.param_shapes(new_mlp_saveformat_vars)))
def test_nested_structures(self):
old_mlp = NestedStructuresMlp(
[[OldDense(shape=(3, 4)),
OldDense(shape=(5, 3))],
[OldDense(shape=(6, 5)),
OldDense(shape=(7, 6))]])
old_mlp_vars = old_mlp.init(jax.random.PRNGKey(0), jnp.zeros([4]))
self.assertSameStructure(
testing_utils.param_shapes(old_mlp_vars), {
'params': {
'layers_0_0': {
'w': [3, 4]
},
'layers_0_1': {
'w': [5, 3]
},
'layers_1_0': {
'w': [6, 5]
},
'layers_1_1': {
'w': [7, 6]
},
}
}, 'old_mlp_vars = ' + testing_utils.format_params_shapes(
testing_utils.param_shapes(old_mlp_vars)))
new_mlp = NestedStructuresMlp(
[[NewDense(shape=(3, 4)),
NewDense(shape=(5, 3))],
[NewDense(shape=(6, 5)),
NewDense(shape=(7, 6))]])
# Use the new model to remap the old parameters into the new format.
new_mlp_init_vars = new_mlp.init(jax.random.PRNGKey(0), jnp.zeros([4]))
new_mlp_remapped_vars = {
'params':
new_mlp.apply(
new_mlp_init_vars,
old_mlp_vars['params'],
method=new_mlp.from_save_format)
}
self.assertSameStructure(
testing_utils.param_shapes(new_mlp_remapped_vars), {
'params': {
'layers_0_0': {
'weights': [3, 4]
},
'layers_0_1': {
'weights': [5, 3]
},
'layers_1_0': {
'weights': [6, 5]
},
'layers_1_1': {
'weights': [7, 6]
},
}
}, 'new_mlp_remapped_vars = ' + testing_utils.format_params_shapes(
testing_utils.param_shapes(new_mlp_remapped_vars)))
# Map the new model's parameters into the save format.
new_mlp_saveformat_vars = {
'params':
new_mlp.apply(
new_mlp_remapped_vars,
new_mlp_remapped_vars['params'],
method=new_mlp.to_save_format)
}
self.assertSameStructure(
testing_utils.param_shapes(new_mlp_saveformat_vars), {
'params': {
'layers_0_0': {
'w': [3, 4]
},
'layers_0_1': {
'w': [5, 3]
},
'layers_1_0': {
'w': [6, 5]
},
'layers_1_1': {
'w': [7, 6]
},
}
}, 'new_mlp_saveformat_vars = ' + testing_utils.format_params_shapes(
testing_utils.param_shapes(new_mlp_saveformat_vars)))
class RecursiveDefaultDictTest(absltest.TestCase):
def test_merge_mapping(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
d['a'].merge('b', {'d': 2})
self.assertSameStructure(d.to_dict(), {'a': {'b': {'c': 1, 'd': 2}}})
def test_merge_leaf(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
d['a']['b'].merge('d', 2)
self.assertSameStructure(d.to_dict(), {'a': {'b': {'c': 1, 'd': 2}}})
def test_merge_overwrite_mapping_with_leaf_1(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
with self.assertRaisesRegex(
ValueError, "Cannot merge a non-mapping into a mapping; key: 'b'.*"):
d['a'].merge('b', 2)
def test_merge_overwrite_mapping_with_leaf_2(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
with self.assertRaisesRegex(
ValueError, "Cannot merge a non-mapping into a mapping; key: 'b'.*"):
d.merge('a', {'b': 2})
def test_merge_overwrite_leaf_with_mapping_1(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
with self.assertRaisesWithLiteralMatch(
ValueError, "Cannot overwrite existing leaf key 'c' via merge"):
d['a']['b'].merge('c', {'d', 2})
def test_merge_overwrite_leaf_with_mapping_2(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
with self.assertRaisesWithLiteralMatch(
ValueError, "Cannot overwrite existing leaf key 'c' via merge"):
d['a'].merge('b', {'c': {'d', 2}})
def test_pop_and_merge(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
self.assertSameStructure(d.to_dict(), {'a': {'b': {'c': 1}}})
d2 = param_remapping.RecursiveDefaultDict()
d2['a'].merge('b2', d['a'].pop('b'))
self.assertSameStructure(d.to_dict(), {})
self.assertSameStructure(d2.to_dict(), {'a': {'b2': {'c': 1}}})
def test_pop_and_update(self):
d = param_remapping.RecursiveDefaultDict()
d['a']['b']['c'] = 1
self.assertSameStructure(d.to_dict(), {'a': {'b': {'c': 1}}})
d2 = param_remapping.RecursiveDefaultDict()
d2['a']['b2'].update(d['a'].pop('b'))
self.assertSameStructure(d.to_dict(), {})
self.assertSameStructure(d2.to_dict(), {'a': {'b2': {'c': 1}}})
if __name__ == '__main__':
absltest.main()
| 10,190 | 31.559105 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/common/param_remapping.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs and code related to the automatic parameter-remapping strategy.
TODO: Move this out of `architectures` and into a common FF area?
TODO: Write a g3doc with examples once this is all worked out.
TODO: Expand docstrings and include examples.
TODO: Add unit tests that demonstrate the behavior.
"""
from __future__ import annotations
import abc
import collections
import dataclasses
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple
import flax
from flax import linen as nn
METADATA_KEY = '__save_format_metadata__'
VERSION_KEY = '__version__'
class ParameterRemappable(metaclass=abc.ABCMeta):
"""Interface for components that can load checkpoints from old versions.
All modules should inherit from this class, even if they do not currently
require parameter remapping, so that their submodules (and transitive
submodules) will have their parameters remapped when a checkpoint is loaded.
When a module's code is modified in a way that changes its parameter tree, the
`_from_save_format` method should be implemented to handle the conversion from
the parameter tree of the older version(s) to the new one. If it is desired
that the checkpoint "save format" should reflect this change, then the value
of the `save_format_metadata` property method should be updated. If, instead,
it is desired that the checkpoint "save format" *not* change, then the
`_to_save_format` method should be implemented to convert from the new
parameter tree structure to the old structure.
"""
@property
def save_format_metadata(self) -> Dict[str, Any]:
"""Returns this module's current version."""
return {VERSION_KEY: 0}
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def from_save_format(self, params: Mapping[str, Any]) -> Dict[str, Any]:
"""Handles remapping `params`, including recursively calling submodules.
This default implementation iterates through the class's fields to find
submodules, and recursively calls `from_save_format` on them. This avoids
a large amount of boilerplate code that would otherwise be needed in each
module.
To define additional (custom) logic, implement `_from_save_format`.
Args:
params: The parameter tree before remapping. Usually comes from a
checkpoint (or legacy model). May contain a special key "__version__"
whose value is the version number (i.e., `save_format_metadata`'s value)
of the module corresponding to structure of the parameter tree layout.
If a "__version__" key is missing, it is assumed to be `0`, indicating
"the parameter structure before the `ParameterRemappable` was added to
the module".
Returns:
The remapped parameter tree.
"""
# Copy `params` into a more convenient dict type.
params = RecursiveDefaultDict(params)
# Apply any custom remapping logic.
params = self._from_save_format(params)
# Recursively call `from_save_format` for all submodules.
params = self._submodules_from_save_format(params)
return filter_out_metadata(params.to_dict())
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def to_save_format(self, params: Mapping[str, Any]) -> Dict[str, Any]:
"""Converts `params` to the format of `save_format_metadata`.
This default implementation iterates through the class's fields to find
submodules, and recursively calls `to_save_format` on them. This avoids
a large amount of boilerplate code that would otherwise be needed in each
module.
To define additional (custom) logic implement `_to_save_format`.
Args:
params: The parameter tree before remapping.
Returns:
The remapped parameter tree, with each module's `save_format_metadata`
value stored under the special key given by `METADATA_KEY`.
"""
# Recursively call `to_save_format` for all submodules.
params = self._submodules_to_save_format(params)
# Copy `params` and convert to a more convenient dict type.
params = RecursiveDefaultDict(params)
# Apply any custom remapping logic.
params = self._to_save_format(params)
params[METADATA_KEY] = self.save_format_metadata
return params.to_dict()
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _from_save_format(self,
params: RecursiveDefaultDict) -> Mapping[str, Any]:
"""Clients may override this method to add custom remapping logic."""
return params
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _to_save_format(self, params: RecursiveDefaultDict) -> Mapping[str, Any]:
"""Clients may override this method to add custom remapping logic."""
return params
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _submodules_from_save_format(
self, params: Mapping[str, Any]) -> RecursiveDefaultDict:
"""Recursively calls `from_save_format` for all submodules."""
# Copy `params` and convert to a more convenient dict type.
result = RecursiveDefaultDict(params)
for name, submodule in self._get_remappable_submodules():
if name in params:
result[name] = submodule.from_save_format(params[name])
return result
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _submodules_to_save_format(
self, params: Mapping[str, Any]) -> RecursiveDefaultDict:
"""Recursively calls `to_save_format` for all submodules."""
# Copy `params` and convert to a more convenient dict type.
result = RecursiveDefaultDict(params)
for name, submodule in self._get_remappable_submodules():
if name in params:
result[name] = submodule.to_save_format(params[name])
return result
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _get_remappable_submodules(self) -> List[Tuple[str, ParameterRemappable]]:
"""Returns the parameter-remappable submodules."""
return [(e.name, e)
for e in self._get_submodules()
if isinstance(e, ParameterRemappable)]
# TODO: There has got to be a library for doing this.
@nn.nowrap # Exempt from named call decorator.
@flax.linen.module.wrap_method_once
def _get_submodules(self) -> List[nn.Module]:
"""Returns a list of this object's submodules."""
field_names: List[str] = []
if dataclasses.is_dataclass(self):
for field in dataclasses.fields(self):
field_names.append(field.name)
for field_name in vars(self):
if field_name not in field_names:
field_names.append(field_name)
field_values: List[Any] = [
getattr(self, field_name)
for field_name in field_names
if field_name not in ('name', 'parent')
]
all_elements: List[ParameterRemappable] = []
while field_values:
field_value = field_values.pop(0)
if isinstance(field_value, ParameterRemappable):
all_elements.append(field_value)
elif isinstance(field_value, (str, bytes)):
# Explicitly skip strings since they are also instances of `Sequence`.
continue
elif isinstance(field_value, Mapping):
field_values = list(field_value.values()) + field_values
elif isinstance(field_value, Sequence):
field_values = list(field_value) + field_values
return [e for e in all_elements if isinstance(e, nn.Module)]
class RecursiveDefaultDict(collections.defaultdict):
"""A `defaultdict` that allows recursively indexing to an arbitrary depth.
For example:
d = RecursiveDefaultDict()
d['a']['b']['c'] = 1
d.to_dict() # {'a': {'b': {'c': 1}}}
d2 = RecursiveDefaultDict()
d2['a'].merge('b2', d['a'].pop('b'))
d.to_dict() # {}
d2.to_dict() # {'a': {'b2': {'c': 1}}}
"""
def __init__(self, initial_content: Optional[Mapping[Any, Any]] = None):
super().__init__(RecursiveDefaultDict)
if initial_content is not None:
self.update(initial_content)
def merge(self, key: Any, value: Any) -> None:
"""Adds the key-value pair, but recursively if `value` is a Mapping."""
# If `key` is not yet present, then there is nothing to merge.
if key not in self:
if isinstance(value, Mapping):
value = RecursiveDefaultDict(value)
self[key] = value
return
# If `self[key]` is a RecursiveDefaultDict, then recursively merge `value`.
if isinstance(self[key], RecursiveDefaultDict):
if not isinstance(value, Mapping):
raise ValueError('Cannot merge a non-mapping into a mapping; '
f'key: {key!r}; new value: ({value!r})')
self[key].update(value)
return
# Otherwise `self[key]` is a leaf, and cannot be "merged" over.
raise ValueError(f'Cannot overwrite existing leaf key {key!r} via merge')
def update(self, other: Mapping[Any, Any]) -> None:
"""Calls `merge` for each key-value pair in `other`."""
for k, v in other.items():
self.merge(k, v)
def pop(self, key: Any, default: Any = None) -> Any:
return super().pop(key,
RecursiveDefaultDict() if default is None else default)
def to_dict(self) -> Dict[Any, Any]:
"""Recursively converts this object to a regular `dict`.
Returns:
A 'dict' with this object's content. Note that entries with empty values
are recursively filtered out.
"""
result = {}
for k, v in self.items():
if isinstance(v, RecursiveDefaultDict):
v = v.to_dict()
if not v:
continue
result[k] = v
return result
def filter_out_metadata(params: Mapping[str, Any]) -> Dict[str, Any]:
"""Removes "__save_format_metadata__" entries from a parameter tree."""
result = {}
for k, v in params.items():
if k == METADATA_KEY:
continue
if isinstance(v, Mapping):
v = filter_out_metadata(v)
if not v:
continue
result[k] = v
return result
| 10,677 | 37 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/moe_parallel_fused_decoder_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for moe_parallel_fused_decoder."""
from absl.testing import absltest
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer.architectures.moe import moe_layers
from flaxformer.architectures.moe import moe_parallel_fused_decoder
from flaxformer.architectures.moe import routing
from flaxformer.components import dense
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
FrozenDict = flax.core.FrozenDict
MoeLayer = moe_layers.MoeLayer
SparseParallelFusedDecoderLayer = (
moe_parallel_fused_decoder.SparseParallelFusedDecoderLayer
)
EMBEDDING_INIT = nn.initializers.normal(stddev=1.0)
RELPOS_BIAS_INIT = nn.initializers.variance_scaling(1.0, 'fan_avg', 'uniform')
ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal'
)
MLP_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal'
)
BIAS_INIT = nn.initializers.zeros
DTYPE = jnp.float32
_make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
_make_layer_norm = layer_norm.T5LayerNorm
def _make_multi_query_attention(
embed_dim: int, num_heads: int, head_dim
) -> dense_attention.MultiQueryDotProductAttention:
"""Test configuration for attention."""
return dense_attention.MultiQueryDotProductAttention(
num_heads=num_heads,
dtype=DTYPE,
head_dim=head_dim,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
float32_logits=True,
rescale_logits=True,
split_head_kernel=True,
# Must be specified for constructing fused O-Wo projection.
out_features=embed_dim,
use_rotary_embedding=True,
dropout_rate=0.1,
)
def _make_relative_position_bias() -> (
relative_position_biases.RelativePositionBiases
):
"""Test configuration for the position bias."""
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=64,
num_heads=8,
dtype=DTYPE,
embedding_init=RELPOS_BIAS_INIT,
)
def _make_dense_mlp(embed_dim: int) -> dense.MlpBlock:
"""Test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
out_dim=embed_dim, # Project back to embedding dimension
activations=('swish', 'linear'),
# MLP block only applies the activation functions.
precomputed_intermediates=True,
fuse_kernels=False,
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
dtype=DTYPE,
)
def _make_q_wi_fused_projection(
attention_module: dense_attention.MultiQueryDotProductAttention,
mlp_module: dense.MlpBlock,
) -> MoeLayer:
"""Returns sparse Q-Wi projection."""
expert = dense.DenseGeneral(
axis=-1,
features=moe_parallel_fused_decoder.compute_fused_q_wi_dims(
attention_module, mlp_module
),
use_bias=False,
dtype=DTYPE,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
reshape_kernel=False,
kernel_axis_names=('embed', 'heads', 'q_wi_fused'),
)
router = routing.TokensChooseMaskedRouter(
# Default router weights fine for Q-Wi projection which takes 3D inputs:
# [batch_size, seq_length, hidden_dim].
router_weights=routing.RouterWeights(),
num_selected_experts=1,
jitter_noise=0.0,
batch_prioritized_routing=False,
dtype=DTYPE,
ignore_padding_tokens=False,
)
return MoeLayer(
num_experts=4,
num_expert_partitions=4,
router=router,
max_group_size=2,
train_capacity_factor=1.0,
eval_capacity_factor=1.0,
expert=expert,
num_model_partitions=1,
input_hidden_dims_axes=('embed',),
output_hidden_dims_axes=('heads', 'mlp'),
dtype=DTYPE,
)
def _make_o_wo_fused_projection(
attention_module: dense_attention.MultiQueryDotProductAttention,
) -> MoeLayer:
"""Returns sparse O-Wo projection."""
expert = dense.DenseGeneral(
axis=(-2, -1),
features=moe_parallel_fused_decoder.compute_fused_o_wo_dims(
attention_module
),
use_bias=False,
dtype=DTYPE,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
reshape_kernel=False,
# o_wo_fused = mlp//heads + head_dim.
kernel_axis_names=('heads', 'o_wo_fused', 'embed'),
)
router = routing.TokensChooseMaskedRouter(
# Specialized router weights required for O-Wo projection which takes 4D
# inputs: [batch_size, seq_length, heads, o_wo_fused].
router_weights=routing.RouterWeights(
axis=(-2, -1), # ('heads', 'o_wo_fused') projection
kernel_axis_names=('heads', 'o_wo_fused', 'unmodeled'),
reshape_kernel=False,
),
num_selected_experts=1,
jitter_noise=0.0,
batch_prioritized_routing=False,
dtype=DTYPE,
ignore_padding_tokens=False,
)
return MoeLayer(
num_experts=4,
num_expert_partitions=4,
router=router,
max_group_size=2,
train_capacity_factor=1.0,
eval_capacity_factor=1.0,
expert=expert,
num_model_partitions=1,
input_hidden_dims_axes=('heads', 'mlp'),
output_hidden_dims_axes=('embed',),
dtype=DTYPE,
)
def _make_kv_fused_projection(
attention_module: dense_attention.MultiQueryDotProductAttention,
) -> dense.DenseGeneral:
"""Returns dense KV projection."""
return dense.DenseGeneral(
axis=-1,
features=moe_parallel_fused_decoder.compute_fused_kv_dims(
attention_module
),
use_bias=False,
dtype=DTYPE,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
reshape_kernel=False,
kernel_axis_names=('embed', 'multiquery_heads', 'kv_fused'),
)
class ParallelFusedDecoderOnlyTest(absltest.TestCase):
def test_layer(self):
batch_size = 3
seq_length = 8
embed_dim = 32
num_heads = 8
head_dim = 4
attention_module = _make_multi_query_attention(
embed_dim, num_heads, head_dim
)
mlp_module = _make_dense_mlp(embed_dim)
decoder_layer = SparseParallelFusedDecoderLayer(
self_attention=attention_module,
mlp=mlp_module,
# Attention and MLP modules are indirectly used to construct the fused
# projections.
q_wi_fused=_make_q_wi_fused_projection(attention_module, mlp_module),
o_wo_fused=_make_o_wo_fused_projection(attention_module),
kv_fused=_make_kv_fused_projection(attention_module),
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
relative_position_bias_factory=_make_relative_position_bias,
)
decoder_target_tokens = np.zeros(
(batch_size, seq_length, embed_dim), dtype=np.float32
)
output, variables = decoder_layer.init_with_output(
{'params': random.PRNGKey(0), 'dropout': random.PRNGKey(0)},
targets=decoder_target_tokens,
encoded=None,
enable_dropout=True,
)
self.assertEqual(output.shape, (batch_size, seq_length, embed_dim))
self.assertEqual(
jax.tree_util.tree_map(jnp.shape, variables['params']),
FrozenDict({
'kv_fused': {
# [embed_dim, 1, 2 * head_dim]
'kernel': (32, 1, 8),
},
'layer_norm': {
'scale': (32,),
},
'o_wo_fused': {
'expert': {
# [num_experts, heads, mlp//heads + head_dim, embed_dim]
'kernel': (4, 8, 260, 32),
},
'router': {
'router_weights': {
'w': {
'bias': (4,),
# [heads, mlp//heads + head_dim, num_experts]
'kernel': (8, 260, 4),
},
},
},
},
'q_wi_fused': {
'expert': {
# [num_experts, embed_dim, num_heads,
# mlp//heads * n_act + head_dim]
'kernel': (4, 32, 8, 516),
},
'router': {
'router_weights': {
'w': {
'bias': (4,),
# [embed_dim, num_experts]
'kernel': (32, 4),
},
},
},
},
'relpos_bias': {
'rel_embedding': (8, 32),
},
}),
)
def test_projection_dims(self):
embed_dim = 8
num_heads = 8
head_dim = 2
attention_module = _make_multi_query_attention(
embed_dim, num_heads, head_dim
)
mlp_module = _make_dense_mlp(embed_dim)
with self.subTest(name='fused_o_wo_dims'):
self.assertEqual(
attention_module.out_features,
moe_parallel_fused_decoder.compute_fused_o_wo_dims(attention_module),
)
with self.subTest(name='fused_kv_dims'):
self.assertEqual(
(1, 2 * head_dim),
moe_parallel_fused_decoder.compute_fused_kv_dims(attention_module),
)
with self.subTest(name='fused_q_wi_dims'):
self.assertEqual(
(num_heads, 514),
moe_parallel_fused_decoder.compute_fused_q_wi_dims(
attention_module, mlp_module
),
)
if __name__ == '__main__':
absltest.main()
| 10,187 | 29.872727 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/moe_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for moe_architecture."""
import functools
from typing import Optional, Sequence
from absl.testing import absltest
from absl.testing import parameterized
import flax
from flax import linen as nn
import jax
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer.architectures.moe import moe_architecture
from flaxformer.architectures.moe import moe_enums
from flaxformer.architectures.moe import moe_layers
from flaxformer.architectures.moe import routing
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
DecoderLayer = t5_architecture.DecoderLayer
EncoderLayer = t5_architecture.EncoderLayer
SparseDecoderLayer = moe_architecture.SparseDecoderLayer
SparseEncoderLayer = moe_architecture.SparseEncoderLayer
LayerLayout = moe_enums.LayerLayout
MoeLayer = moe_layers.MoeLayer
EMBEDDING_INIT = nn.initializers.normal(stddev=1.0)
RELPOS_BIAS_INIT = nn.initializers.variance_scaling(1.0, 'fan_avg', 'uniform')
ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal')
MLP_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
BIAS_INIT = nn.initializers.normal(stddev=1e-6)
DTYPE = jnp.float32
MOE_METRICS = ('auxiliary_loss', 'router_z_loss', 'fraction_tokens_left_behind',
'expert_usage', 'router_confidence')
_make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
_make_layer_norm = layer_norm.T5LayerNorm
def _make_token_emb(num_embeddings: int) -> embedding.Embed:
"""Test configuration for token embeddings."""
return embedding.Embed(
num_embeddings=num_embeddings,
features=13,
cast_input_dtype=jnp.int32,
dtype=DTYPE,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=EMBEDDING_INIT,
name='token_embedder')
def _make_multi_query_attention(
) -> dense_attention.MultiQueryDotProductAttention:
"""Test configuration for attention."""
return dense_attention.MultiQueryDotProductAttention(
num_heads=8,
dtype=DTYPE,
qkv_features=16,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1)
def _make_dense_mlp() -> dense.MlpBlock:
"""Test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=8,
activations=('relu',),
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=DTYPE)
def _make_sparse_mlp(ignore_padding_tokens: bool = False) -> MoeLayer:
"""Test configuration for the sparse MLP."""
expert = dense.MlpBlock(
use_bias=True,
activations=('gelu',),
intermediate_dim=16,
intermediate_dropout_rate=0.1)
router = routing.TokensChooseMaskedRouter(
router_weights=routing.RouterWeights(),
num_selected_experts=1,
jitter_noise=0.,
batch_prioritized_routing=True,
dtype=jnp.float32,
ignore_padding_tokens=ignore_padding_tokens)
return MoeLayer(
num_experts=4,
num_expert_partitions=4,
router=router,
max_group_size=2,
train_capacity_factor=1.,
eval_capacity_factor=1.,
expert=expert,
num_model_partitions=1,
dtype=jnp.float32)
def _make_relative_position_bias(
) -> relative_position_biases.RelativePositionBiases:
"""Test configuration for the position bias."""
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=64,
num_heads=8,
dtype=DTYPE,
embedding_init=RELPOS_BIAS_INIT)
def _make_sparse_encoder_layer(
extra_mlp: bool = False,
shared_relative_position_bias: Optional[nn.Module] = None,
scanned: bool = False,
ignore_padding_tokens: bool = False) -> SparseEncoderLayer:
"""Test configuration for sparse MLP-attention encoder blocks."""
return SparseEncoderLayer(
attention=_make_multi_query_attention(),
mlp=_make_sparse_mlp(ignore_padding_tokens=ignore_padding_tokens),
extra_mlp=_make_dense_mlp() if extra_mlp else None,
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
relative_position_bias_factory=_make_relative_position_bias,
shared_relative_position_bias=shared_relative_position_bias,
scanned=scanned)
def _make_dense_encoder_layer(shared_relative_position_bias: Optional[
nn.Module] = None,
scanned: bool = False) -> EncoderLayer:
"""Test configuration for dense MLP-attention encoder blocks."""
return EncoderLayer(
attention=_make_multi_query_attention(),
mlp=_make_dense_mlp(),
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
relative_position_bias_factory=_make_relative_position_bias,
shared_relative_position_bias=shared_relative_position_bias,
scanned=scanned)
def _make_sparse_decoder_layer(
extra_mlp: bool = False,
shared_relative_position_bias: Optional[nn.Module] = None,
scanned: bool = False,
ignore_padding_tokens: bool = False) -> SparseDecoderLayer:
"""Test configuration for sparse MLP-self-attention decoder blocks."""
return SparseDecoderLayer(
self_attention=_make_multi_query_attention(),
encoder_decoder_attention=_make_multi_query_attention(),
mlp=_make_sparse_mlp(ignore_padding_tokens=ignore_padding_tokens),
extra_mlp=_make_dense_mlp() if extra_mlp else None,
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
relative_position_bias_factory=_make_relative_position_bias,
shared_relative_position_bias=shared_relative_position_bias,
scanned=scanned)
def _make_dense_decoder_layer(shared_relative_position_bias: Optional[
nn.Module] = None,
scanned: bool = False) -> DecoderLayer:
"""Test configuration for dense MLP-self-self_attention decoder blocks."""
return DecoderLayer(
self_attention=_make_multi_query_attention(),
encoder_decoder_attention=_make_multi_query_attention(),
mlp=_make_dense_mlp(),
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
relative_position_bias_factory=_make_relative_position_bias,
shared_relative_position_bias=shared_relative_position_bias,
scanned=scanned)
class MoeArchitectureTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='default', extra_mlp=False),
dict(testcase_name='extra_mlp', extra_mlp=True))
def test_moe_architecture(self, extra_mlp):
batch_size = 2
seq_length = 4
num_embeddings = 64
encoder_factory = functools.partial(
moe_architecture.SparseEncoder,
num_layers=2,
num_sparse_layers=1,
sparse_layout=LayerLayout.MIXED,
sparse_layer_factory=functools.partial(
_make_sparse_encoder_layer, extra_mlp=extra_mlp),
layer_factory=_make_dense_encoder_layer,
input_dropout_factory=_make_dropout,
output_dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
decoder_factory = functools.partial(
moe_architecture.SparseDecoder,
num_layers=2,
num_sparse_layers=1,
sparse_layout=LayerLayout.MIDDLE,
sparse_layer_factory=functools.partial(
_make_sparse_decoder_layer, extra_mlp=extra_mlp),
layer_factory=_make_dense_decoder_layer,
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
transformer = t5_architecture.EncoderDecoder(
shared_token_embedder_factory=functools.partial(
_make_token_emb, num_embeddings=num_embeddings),
encoder_factory=encoder_factory,
decoder_factory=decoder_factory,
)
encoder_input_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
decoder_input_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
decoder_target_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
output, variables = transformer.init_with_output(
{
'params': random.PRNGKey(0),
'dropout': random.PRNGKey(0)
},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
self.assertEqual(output.shape, (batch_size, seq_length, num_embeddings))
# For readability, we only verify the weight names to check that the layouts
# are correct.
encoder_layer_0 = variables['params']['encoder']['layers_0']
# First encoder layer should have regular self-attention.
self.assertIn('attention', encoder_layer_0)
# And it should contain a dense MLP (because there is only one sparse MLP
# layer at the top for MIXED layout).
self.assertIn('wi', encoder_layer_0['mlp']) # Dense
self.assertIn('wo', encoder_layer_0['mlp']) # Dense
self.assertNotIn('expert', encoder_layer_0['mlp']) # Sparse
encoder_layer_1 = variables['params']['encoder']['layers_1']
# Second encoder layer should have regular self-attention.
self.assertIn('attention', encoder_layer_1)
# And it should contain a sparse MoeLayer (from sparse MIXED layout).
self.assertIn('expert', encoder_layer_1['mlp']) # Sparse
if extra_mlp:
# Check that the additional mlp blocks are added to the sparse layers.
self.assertIn('extra_mlp', encoder_layer_1)
decoder_layer_0 = variables['params']['decoder']['layers_0']
# First decoder layer should have regular encoder-decoder attention.
self.assertIn('query', decoder_layer_0['encoder_decoder_attention'])
# It should contain regular self-attention.
self.assertIn('self_attention', decoder_layer_0)
# And it should contain a regular dense MLP (sparse layout is MIDDLE, so
# first layer will be dense MLP).
self.assertIn('wi', decoder_layer_0['mlp']) # Dense
self.assertIn('wo', decoder_layer_0['mlp']) # Dense
self.assertNotIn('expert', decoder_layer_0['mlp']) # Sparse
decoder_layer_1 = variables['params']['decoder']['layers_1']
# Second decoder layer should have regular encoder-decoder attention.
self.assertIn('query', decoder_layer_1['encoder_decoder_attention'])
# It should contain regular self-attention.
self.assertIn('self_attention', decoder_layer_1)
# And it should contain a sparse MLP (sparse layout is MIDDLE, which for a
# 2 layer decoder results in top layer being sparse).
self.assertIn('expert', decoder_layer_1['mlp']) # Sparse
if extra_mlp:
# Check that the additional mlp blocks are added to the sparse layers.
self.assertIn('extra_mlp', decoder_layer_1)
def test_moe_architecture_logit_mask_propagates(self):
batch_size = 2
seq_length = 4
num_embeddings = 5
encoder_factory = functools.partial(
moe_architecture.SparseEncoder,
num_layers=1,
num_sparse_layers=1,
sparse_layout=LayerLayout.MIXED,
sparse_layer_factory=functools.partial(
_make_sparse_encoder_layer, ignore_padding_tokens=True),
layer_factory=_make_dense_encoder_layer,
input_dropout_factory=_make_dropout,
output_dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
decoder_factory = functools.partial(
moe_architecture.SparseDecoder,
num_layers=1,
num_sparse_layers=1,
sparse_layout=LayerLayout.MIDDLE,
sparse_layer_factory=functools.partial(
_make_sparse_decoder_layer, ignore_padding_tokens=True),
layer_factory=_make_dense_decoder_layer,
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
transformer = t5_architecture.EncoderDecoder(
shared_token_embedder_factory=functools.partial(
_make_token_emb, num_embeddings=num_embeddings),
encoder_factory=encoder_factory,
decoder_factory=decoder_factory,
)
encoder_input_tokens = jax.random.randint(
jax.random.PRNGKey(0), (batch_size, seq_length), minval=0, maxval=4)
decoder_input_tokens = jax.random.randint(
jax.random.PRNGKey(1), (batch_size, seq_length), minval=0, maxval=4)
decoder_target_tokens = jax.random.randint(
jax.random.PRNGKey(2), (batch_size, seq_length), minval=0, maxval=4)
output, _ = transformer.init_with_output(
{
'params': random.PRNGKey(0),
'dropout': random.PRNGKey(0)
},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
# To identify padding vs non-padding tokens in MoE models, we rely on how
# the Flaxformer T5 architectures constructs and applies the logit mask.
# See
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L315
# and
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L603.
# So here, we test the specific outputs to catch any changes to the
# underlying T5 architecture logic.
np.testing.assert_allclose(
output, [
[
[1.0098116, -0.07620703, 2.2726512, 1.5326656, -0.56177557],
[-0.94259655, 1.7607443, 2.3942919, 0.72459084, -1.0425543],
[0.02322888, 0.65254104, 4.597586, 1.4008591, -0.45962948],
[-0.94259655, 1.7607443, 2.3942919, 0.72459084, -1.0425543],
],
[
[0.01904473, 0.6017947, 4.61797, 1.3143052, -0.33733633],
[0.04458817, 0.12540922, 2.4217446, 3.548073, 0.06769713],
[0.01904476, 0.6017947, 4.61797, 1.3143052, -0.33733624],
[0., 0., 0., 0., 0.],
],
],
rtol=1e-6,
atol=1e-6)
def test_degenerate_architecture(self):
batch_size = 2
seq_length = 4
num_embeddings = 64
encoder_factory = functools.partial(
moe_architecture.SparseEncoder,
num_layers=2,
num_sparse_layers=0,
sparse_layout=LayerLayout.MIXED,
sparse_layer_factory=_make_sparse_encoder_layer,
layer_factory=_make_dense_encoder_layer,
input_dropout_factory=_make_dropout,
output_dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
decoder_factory = functools.partial(
moe_architecture.SparseDecoder,
num_layers=2,
num_sparse_layers=2,
sparse_layout=LayerLayout.MIDDLE,
sparse_layer_factory=_make_sparse_decoder_layer,
layer_factory=_make_dense_decoder_layer,
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
transformer = t5_architecture.EncoderDecoder(
shared_token_embedder_factory=functools.partial(
_make_token_emb, num_embeddings=num_embeddings),
encoder_factory=encoder_factory,
decoder_factory=decoder_factory,
)
encoder_input_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
decoder_input_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
decoder_target_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
output, variables = transformer.init_with_output(
{
'params': random.PRNGKey(0),
'dropout': random.PRNGKey(0)
},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
self.assertEqual(output.shape, (batch_size, seq_length, num_embeddings))
# For degenerate cases, we only have one underlying Encoder.
encoder = variables['params']['encoder']
for layer in ['layers_0', 'layers_1']:
encoder_layer = encoder[layer]
# All encoder layers should have regular attention.
self.assertIn('attention', encoder_layer)
# And dense MLPs.
self.assertIn('wi', encoder_layer['mlp']) # Dense
self.assertIn('wo', encoder_layer['mlp']) # Dense
self.assertNotIn('expert', encoder_layer['mlp']) # Sparse
# For degenerate cases, we only have one underlying Decoder.
decoder = variables['params']['decoder']
for layer in ['layers_0', 'layers_1']:
decoder_layer = decoder[layer]
# All decoder layers should have self-attention.
self.assertIn('self_attention', decoder_layer)
# Regular cross encoder-decoder attention.
self.assertIn('encoder_decoder_attention', decoder_layer)
# And sparse MLPs.
self.assertIn('expert', decoder_layer['mlp']) # Sparse
self.assertNotIn('wi', decoder_layer['mlp']) # Dense
self.assertNotIn('wo', decoder_layer['mlp']) # Dense
def test_scan_architecture(self):
batch_size = 2
seq_length = 4
num_embeddings = 9
encoder_factory = functools.partial(
moe_architecture.SparseEncoder,
num_layers=4,
num_sparse_layers=2,
sparse_layout=LayerLayout.MIXED,
# Individual layers in MoE models are never scanned; only blocks.
sparse_layer_factory=functools.partial(
_make_sparse_encoder_layer, scanned=False),
layer_factory=functools.partial(
_make_dense_encoder_layer, scanned=False),
input_dropout_factory=_make_dropout,
output_dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
decoder_factory = functools.partial(
moe_architecture.SparseDecoder,
num_layers=3,
num_sparse_layers=3,
sparse_layout=LayerLayout.MIXED,
# Individual layers in MoE models are never scanned; only blocks.
sparse_layer_factory=functools.partial(
_make_sparse_decoder_layer, scanned=False),
layer_factory=functools.partial(
_make_dense_decoder_layer, scanned=False),
dropout_factory=_make_dropout,
layer_norm_factory=_make_layer_norm,
)
scanned_transformer = t5_architecture.EncoderDecoder(
scan_layers=True,
shared_token_embedder_factory=functools.partial(
_make_token_emb, num_embeddings=num_embeddings),
encoder_factory=functools.partial(encoder_factory, scan_layers=True),
decoder_factory=functools.partial(decoder_factory, scan_layers=True),
)
encoder_input_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
decoder_input_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
decoder_target_tokens = np.zeros((batch_size, seq_length), dtype=np.float32)
output, variables = scanned_transformer.init_with_output(
{
'params': random.PRNGKey(0),
'dropout': random.PRNGKey(0)
},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False)
with self.subTest(name='init_with_output'):
self.assertEqual(output.shape, (batch_size, seq_length, num_embeddings))
# Verify model keys and param shapes for scan.
self.assertEqual(
jax.tree_map(jnp.shape, variables['params']['encoder']),
flax.core.FrozenDict({
'encoder': {
'subblock_0': {
'attention': {
'key': {
# (emb_dim, scan_dim, qkv_features // num_heads)
'kernel': (13, 2, 2),
},
'out': {
# (qkv_features, scan_dim, emb_dim)
'kernel': (16, 2, 13),
},
'query': {
'kernel': (13, 2, 16),
},
'value': {
'kernel': (13, 2, 2),
},
},
'mlp': {
'wi': {
# (emb_dim, scan_dim, dense MLP dim)
'kernel': (13, 2, 8),
},
'wo': {
'kernel': (8, 2, 13),
},
},
'pre_attention_layer_norm': {
'scale': (13, 2), # (emb_dim, scan_dim)
},
'pre_mlp_layer_norm': {
'scale': (13, 2),
},
'relpos_bias': {
'rel_embedding': (8, 2, 32),
},
},
'subblock_1': {
'attention': {
'key': {
'kernel': (13, 2, 2),
},
'out': {
'kernel': (16, 2, 13),
},
'query': {
'kernel': (13, 2, 16),
},
'value': {
'kernel': (13, 2, 2),
},
},
'mlp': {
'expert': {
'wi': {
'bias': (4, 2, 16),
# (num_experts, scan_dim, emb_dim, MoE MLP)
'kernel': (4, 2, 13, 16),
},
'wo': {
'bias': (4, 2, 13),
'kernel': (4, 2, 16, 13),
},
},
'router': {
'router_weights': {
'w': {
'bias': (4, 2),
# (emb_dim, scan_dim, num_experts)
'kernel': (13, 2, 4),
},
},
},
},
'pre_attention_layer_norm': {
'scale': (13, 2),
},
'pre_mlp_layer_norm': {
'scale': (13, 2),
},
'relpos_bias': {
'rel_embedding': (8, 2, 32),
},
},
},
'encoder_norm': {
'scale': (13,),
},
}))
self.assertEqual(
jax.tree_map(jnp.shape, variables['params']['decoder']),
flax.core.FrozenDict({
'decoder': {
'subblock_0': {
'encoder_decoder_attention': {
'key': {
# (emb_dim, scan_dim, qkv_features // num_heads)
'kernel': (13, 3, 2),
},
'out': {
# (qkv_features, scan_dim, emb_dim)
'kernel': (16, 3, 13),
},
'query': {
'kernel': (13, 3, 16),
},
'value': {
'kernel': (13, 3, 2),
},
},
'mlp': {
'expert': {
'wi': {
'bias': (4, 3, 16),
# (num_experts, scan_dim, emb_dim, MoE MLP)
'kernel': (4, 3, 13, 16),
},
'wo': {
'bias': (4, 3, 13),
'kernel': (4, 3, 16, 13),
},
},
'router': {
'router_weights': {
'w': {
'bias': (4, 3),
# (emb_dim, scan_dim, num_experts)
'kernel': (13, 3, 4),
},
},
},
},
'pre_cross_attention_layer_norm': {
# (emb_dim, scan_dim)
'scale': (13, 3),
},
'pre_mlp_layer_norm': {
'scale': (13, 3),
},
'pre_self_attention_layer_norm': {
'scale': (13, 3),
},
'relpos_bias': {
'rel_embedding': (8, 3, 32),
},
'self_attention': {
'key': {
'kernel': (13, 3, 2),
},
'out': {
# (qkv_features, scan_dim, emb_dim)
'kernel': (16, 3, 13),
},
'query': {
'kernel': (13, 3, 16),
},
'value': {
'kernel': (13, 3, 2),
},
},
},
},
'decoder_norm': {
'scale': (13,),
},
}))
with self.subTest(name='sow_intermediates'):
_, modified_variables = scanned_transformer.apply(
{'params': variables['params']},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
mutable=['intermediates'])
intermediates = modified_variables['intermediates']
for metric in MOE_METRICS:
self.assertIn(metric,
intermediates['encoder']['encoder']['subblock_1']['mlp'])
self.assertIn(metric,
intermediates['decoder']['decoder']['subblock_0']['mlp'])
@parameterized.named_parameters(
dict(
testcase_name=LayerLayout.BOTTOM.name,
sparse_layout=LayerLayout.BOTTOM,
num_sparse_layers=4,
expected_sparse_layers=[0, 1, 2, 3]),
dict(
testcase_name=LayerLayout.MIDDLE.name,
sparse_layout=LayerLayout.MIDDLE,
num_sparse_layers=2,
expected_sparse_layers=[5, 6]),
dict(
testcase_name=LayerLayout.MIXED.name,
sparse_layout=LayerLayout.MIXED,
num_sparse_layers=3,
expected_sparse_layers=[3, 7, 11]),
dict(
testcase_name=LayerLayout.TOP.name,
sparse_layout=LayerLayout.TOP,
num_sparse_layers=1,
expected_sparse_layers=[11]))
def test_sparse_layouts(self, sparse_layout: LayerLayout,
num_sparse_layers: int,
expected_sparse_layers: Sequence[int]):
num_layers = 12
expected_dense_layers = set(range(num_layers)) - set(expected_sparse_layers)
for layer in expected_sparse_layers:
self.assertTrue(
moe_architecture._is_sparse_layer(layer, num_layers,
num_sparse_layers, sparse_layout))
for layer in expected_dense_layers:
self.assertFalse(
moe_architecture._is_sparse_layer(layer, num_layers,
num_sparse_layers, sparse_layout))
def test_scan_block_factory(self):
scan_block_factory = functools.partial(
moe_architecture._scan_block_factory,
dense_layer_factory=_make_dense_encoder_layer,
sparse_layer_factory=_make_sparse_encoder_layer)
with self.subTest(name='degenerate_dense'):
degenerate_dense_block = scan_block_factory(
num_layers=12, num_sparse_layers=0, sparse_layout=LayerLayout.MIXED)
self.assertLen(degenerate_dense_block, 1)
self.assertIsInstance(degenerate_dense_block[0], EncoderLayer)
with self.subTest(name='degenerate_sparse'):
degenerate_sparse_block = scan_block_factory(
num_layers=12, num_sparse_layers=12, sparse_layout=LayerLayout.TOP)
self.assertLen(degenerate_sparse_block, 1)
self.assertIsInstance(degenerate_sparse_block[0], SparseEncoderLayer)
with self.subTest(name='unsupported_layouts'):
for layer_layout in [
LayerLayout.BOTTOM, LayerLayout.MIDDLE, LayerLayout.TOP
]:
with self.assertRaisesRegex(ValueError,
'Scan is only supported for MIXED'):
scan_block_factory(
num_layers=12, num_sparse_layers=6, sparse_layout=layer_layout)
with self.subTest(name='mixed_layouts'):
block = scan_block_factory(
num_layers=12, num_sparse_layers=3, sparse_layout=LayerLayout.MIXED)
self.assertLen(block, 4)
for sublock in range(3):
self.assertIsInstance(block[sublock], EncoderLayer)
self.assertIsInstance(block[-1], SparseEncoderLayer)
def test_num_scan_blocks(self):
self.assertEqual(
moe_architecture._num_scan_blocks(
num_layers=24, num_sparse_layers=3,
sparse_layout=LayerLayout.MIXED), 3)
if __name__ == '__main__':
absltest.main()
| 30,979 | 38.414758 | 108 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/routing_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for routing."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
import numpy as np
from flaxformer.architectures.moe import routing
class RoutingTest(parameterized.TestCase):
def test_load_balancing_loss(self):
num_tokens = 5
num_experts = 2
num_selected_experts = 1
rng = jax.random.PRNGKey(0)
router_probs = jax.random.uniform(
rng, (num_tokens, num_experts), minval=0, maxval=1)
expert_indices = jax.random.randint(
rng, (num_tokens, num_selected_experts), minval=0, maxval=2)
self.assertEqual(
routing._load_balancing_loss(router_probs, expert_indices), 0.8741045)
def test_tokens_choose_one_expert_scatter_router(self):
num_groups = 2
tokens_per_group = 4
hidden_dim = 4
expert_capacity = 2
num_experts = 4
num_selected_experts = 1 # Switch routing case
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_indices, _ = routing.TokensChooseScatterRouter(
router_weights=routing.RouterWeights(name='router_weights'),
num_selected_experts=num_selected_experts,
jitter_noise=0.01,
batch_prioritized_routing=True,
ignore_padding_tokens=False,
dtype=jnp.float32).init_with_output(
{
'params': jax.random.PRNGKey(0),
'jitter': jax.random.PRNGKey(0)
}, token_inputs, num_experts, expert_capacity)
expected_indices = jnp.array([
[
[[2, 1]],
[[3, 0]],
[[2, 0]],
[[2, 2]],
],
[
[[3, 0]],
[[2, 1]],
[[3, 1]],
[[2, 0]],
],
],
dtype=jnp.int32)
np.testing.assert_allclose(router_indices.dispatch_indices,
expected_indices)
expected_weights = jnp.array([
[[0.25390625], [0.2578125], [0.25585938], [0.]],
[[0.2578125], [0.25390625], [0.25585938], [0.25585938]],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_indices.combine_weights, expected_weights)
self.assertEqual(router_indices.auxiliary_loss, 1.0166016)
self.assertEqual(router_indices.router_z_loss, 1.8994141)
def test_tokens_choose_one_expert_scatter_router_no_bpr(self):
num_groups = 2
tokens_per_group = 4
hidden_dim = 4
expert_capacity = 2
num_experts = 4
num_selected_experts = 1 # Switch routing case
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_indices, _ = routing.TokensChooseScatterRouter(
router_weights=routing.RouterWeights(name='router_weights'),
num_selected_experts=num_selected_experts,
jitter_noise=0.01,
batch_prioritized_routing=False,
ignore_padding_tokens=False,
dtype=jnp.float32).init_with_output(
{
'params': jax.random.PRNGKey(0),
'jitter': jax.random.PRNGKey(0)
}, token_inputs, num_experts, expert_capacity)
expected_indices = jnp.array([
[
[[2, 0]],
[[3, 0]],
[[2, 1]],
[[2, 2]],
],
[
[[3, 0]],
[[2, 0]],
[[3, 1]],
[[2, 1]],
],
],
dtype=jnp.int32)
np.testing.assert_allclose(router_indices.dispatch_indices,
expected_indices)
expected_weights = jnp.array([
[[0.25390625], [0.2578125], [0.25585938], [0.]],
[[0.2578125], [0.25390625], [0.25585938], [0.25585938]],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_indices.combine_weights, expected_weights)
self.assertEqual(router_indices.auxiliary_loss, 1.0166016)
self.assertEqual(router_indices.router_z_loss, 1.8994141)
def test_tokens_choose_multiple_experts_scatter_router(self):
num_groups = 2
tokens_per_group = 4
hidden_dim = 4
expert_capacity = 2
num_experts = 4
num_selected_experts = 2
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_indices, _ = routing.TokensChooseScatterRouter(
router_weights=routing.RouterWeights(name='router_weights'),
num_selected_experts=num_selected_experts,
jitter_noise=0.01,
batch_prioritized_routing=True,
ignore_padding_tokens=False,
dtype=jnp.float32).init_with_output(
{
'params': jax.random.PRNGKey(0),
'jitter': jax.random.PRNGKey(0)
}, token_inputs, num_experts, expert_capacity)
expected_indices = jnp.array([
[
[[2, 1], [3, 2]],
[[3, 0], [2, 3]],
[[2, 0], [3, 1]],
[[2, 2], [0, 0]],
],
[
[[3, 0], [2, 2]],
[[2, 1], [3, 3]],
[[3, 1], [2, 3]],
[[2, 0], [3, 2]],
],
],
dtype=jnp.int32)
np.testing.assert_allclose(router_indices.dispatch_indices,
expected_indices)
expected_weights = jnp.array([
[
[0.25390625, 0.],
[0.2578125, 0.],
[0.25585938, 0.25390625],
[0., 0.25],
],
[
[0.2578125, 0.],
[0.25390625, 0.],
[0.25585938, 0.],
[0.25585938, 0.],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_indices.combine_weights, expected_weights)
self.assertEqual(router_indices.auxiliary_loss, 2.0289307)
self.assertEqual(router_indices.router_z_loss, 1.8994141)
def test_tokens_choose_one_expert_mask_router(self):
num_groups = 2
tokens_per_group = 3
hidden_dim = 4
num_experts = 2
num_selected_experts = 1 # Switch routing case
expert_capacity = 1 # Total capacity = 2*2*1 = 4 < num_tokens
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_mask, _ = routing.TokensChooseMaskedRouter(
router_weights=routing.RouterWeights(name='router_weights'),
num_selected_experts=num_selected_experts,
jitter_noise=0.,
batch_prioritized_routing=True,
ignore_padding_tokens=False,
dtype=jnp.float32).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
expected_mask = jnp.array([
[
[[True], [False]],
[[False], [True]],
[[False], [False]],
],
[
[[True], [False]],
[[False], [True]],
[[False], [False]],
],
],
dtype=jnp.bool_)
np.testing.assert_allclose(router_mask.dispatch_mask, expected_mask)
expected_weights = jnp.array([
[
[[0.5078125], [0.]],
[[0.], [0.50390625]],
[[0.], [0.]],
],
[
[[0.50390625], [0.]],
[[0.], [0.5078125]],
[[0.], [0.]],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_mask.combine_array, expected_weights)
self.assertEqual(router_mask.auxiliary_loss, 1.0006511)
self.assertEqual(router_mask.router_z_loss, 0.47721353)
def test_routers_ignore_padding(self):
num_groups = 2
tokens_per_group = 6
hidden_dim = 2
num_experts = 2
num_selected_experts = 2
expert_capacity = 1 # Total capacity = 2*2*1 = 4 < num_tokens
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
# Simulate masked inputs.
padding_mask = jax.random.randint(
rng, (num_groups, tokens_per_group, 1), minval=0, maxval=2)
token_inputs *= padding_mask
router_weights = routing.RouterWeights(name='router_weights')
with self.subTest(name='tokens_choose_masked_router'):
router_mask, _ = routing.TokensChooseMaskedRouter(
router_weights=router_weights,
num_selected_experts=num_selected_experts,
jitter_noise=0.,
batch_prioritized_routing=True,
ignore_padding_tokens=True,
dtype=jnp.float32).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
expected_mask = jnp.array([
[
[[False], [False]],
[[True], [True]],
[[False], [False]],
[[False], [False]],
[[False], [False]],
[[False], [False]],
],
[
[[False], [False]],
[[True], [True]],
[[False], [False]],
[[False], [False]],
[[False], [False]],
[[False], [False]],
],
],
dtype=jnp.bool_)
np.testing.assert_allclose(router_mask.dispatch_mask, expected_mask)
expected_weights = jnp.array([
[
[[0.], [0.]],
[[0.50390625], [0.49804688]],
[[0.0], [0.]],
[[0.0], [0.]],
[[0.0], [0.]],
[[0.0], [0.]],
],
[
[[0.], [0.]],
[[0.50390625], [0.49414062]],
[[0.], [0.]],
[[0.], [0.]],
[[0.], [0.]],
[[0.], [0.]],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_mask.combine_array, expected_weights)
self.assertEqual(router_mask.auxiliary_loss, 0.6951497)
self.assertEqual(router_mask.router_z_loss, 0.48541257)
with self.subTest(name='tokens_choose_scatter_router'):
router_indices, _ = routing.TokensChooseScatterRouter(
router_weights=router_weights,
num_selected_experts=num_selected_experts,
jitter_noise=0.,
batch_prioritized_routing=True,
ignore_padding_tokens=True,
dtype=jnp.float32).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
expected_indices = jnp.array([
[
[[0, 4], [1, 4]],
[[0, 0], [1, 0]],
[[0, 1], [1, 1]],
[[0, 5], [1, 5]],
[[0, 2], [1, 2]],
[[0, 3], [1, 3]],
],
[
[[0, 3], [1, 3]],
[[0, 0], [1, 0]],
[[0, 1], [1, 1]],
[[0, 4], [1, 4]],
[[0, 2], [1, 2]],
[[0, 5], [1, 5]],
],
],
dtype=jnp.int32)
np.testing.assert_allclose(router_indices.dispatch_indices,
expected_indices)
expected_weights = jnp.array([
[
[0., 0.],
[0.50390625, 0.49804688],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
],
[
[0., 0.],
[0.50390625, 0.49414062],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_indices.combine_weights,
expected_weights)
self.assertEqual(router_indices.auxiliary_loss, 1.1676432)
self.assertEqual(router_indices.router_z_loss, 0.48541257)
with self.subTest(name='experts_choose_masked_router'):
router_mask, _ = routing.ExpertsChooseMaskedRouter(
router_weights=router_weights,
jitter_noise=0.,
ignore_padding_tokens=True,
dtype=jnp.float32).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
expected_mask = jnp.array([
[
[[0], [0]],
[[1], [1]],
[[0], [0]],
[[0], [0]],
[[0], [0]],
[[0], [0]],
],
[
[[0], [0]],
[[1], [0]],
[[0], [1]],
[[0], [0]],
[[0], [0]],
[[0], [0]],
],
],
dtype=jnp.bool_)
np.testing.assert_allclose(router_mask.dispatch_mask, expected_mask)
expected_weights = jnp.array([
[
[[0.], [0.]],
[[0.50390625], [0.49804688]],
[[0.0], [0.]],
[[0.0], [0.]],
[[0.0], [0.]],
[[0.0], [0.]],
],
[
[[0.], [0.]],
[[0.50390625], [0.]],
[[0.], [0.49804688]],
[[0.], [0.]],
[[0.], [0.]],
[[0.], [0.]],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_mask.combine_array, expected_weights)
self.assertEqual(router_mask.auxiliary_loss, 0.)
self.assertEqual(router_mask.router_z_loss, 0.48541257)
def test_tokens_choose_one_expert_mask_router_no_bpr(self):
num_groups = 2
tokens_per_group = 3
hidden_dim = 4
num_experts = 2
num_selected_experts = 1 # Switch routing case
expert_capacity = 1 # Total capacity = 2*2*1 = 4 < num_tokens
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_mask, _ = routing.TokensChooseMaskedRouter(
router_weights=routing.RouterWeights(name='router_weights'),
num_selected_experts=num_selected_experts,
jitter_noise=0.,
batch_prioritized_routing=False,
ignore_padding_tokens=False,
dtype=jnp.float32).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
expected_mask = jnp.array([
[
[[True], [False]],
[[False], [True]],
[[False], [False]],
],
[
[[True], [False]],
[[False], [True]],
[[False], [False]],
],
],
dtype=jnp.bool_)
np.testing.assert_allclose(router_mask.dispatch_mask, expected_mask)
expected_weights = jnp.array([
[
[[0.5078125], [0.]],
[[0.], [0.50390625]],
[[0.], [0.]],
],
[
[[0.50390625], [0.]],
[[0.], [0.5078125]],
[[0.], [0.]],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_mask.combine_array, expected_weights)
self.assertEqual(router_mask.auxiliary_loss, 1.0006511)
self.assertEqual(router_mask.router_z_loss, 0.47721353)
def test_tokens_choose_multiple_experts_mask_router(self):
num_groups = 2
tokens_per_group = 4
hidden_dim = 3
num_experts = 3
num_selected_experts = 2
expert_capacity = 1
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_mask, _ = routing.TokensChooseMaskedRouter(
router_weights=routing.RouterWeights(name='router_weights'),
num_selected_experts=num_selected_experts,
jitter_noise=0.01,
batch_prioritized_routing=True,
ignore_padding_tokens=False,
dtype=jnp.float32).init_with_output(
{
'params': jax.random.PRNGKey(0),
'jitter': jax.random.PRNGKey(0)
}, token_inputs, num_experts, expert_capacity)
expected_mask = jnp.array([
[
[[True], [False], [True]],
[[False], [True], [False]],
[[False], [False], [False]],
[[False], [False], [False]],
],
[
[[True], [True], [False]],
[[False], [False], [True]],
[[False], [False], [False]],
[[False], [False], [False]],
],
],
dtype=jnp.bool_)
np.testing.assert_allclose(router_mask.dispatch_mask, expected_mask)
expected_weights = jnp.array([
[
[[0.33203125], [0.], [0.3359375]],
[[0.], [0.3359375], [0.]],
[[0.], [0.], [0.]],
[[0.], [0.], [0.]],
],
[
[[0.33007812], [0.34179688], [0.]],
[[0.], [0.], [0.3359375]],
[[0.], [0.], [0.]],
[[0.], [0.], [0.]],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_mask.combine_array, expected_weights)
self.assertEqual(router_mask.auxiliary_loss, 2.001709)
self.assertEqual(router_mask.router_z_loss, 1.2714844)
def test_experts_choose_mask_router(self):
num_groups = 2
tokens_per_group = 4
hidden_dim = 3
num_experts = 2
expert_capacity = 2
rng = jax.random.PRNGKey(0)
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_mask, _ = routing.ExpertsChooseMaskedRouter(
router_weights=routing.RouterWeights(name='router_weights'),
jitter_noise=0.,
dtype=jnp.float32,
ignore_padding_tokens=False).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
expected_mask = jnp.array([
[
[[0, 1], [1, 0]],
[[0, 0], [0, 1]],
[[1, 0], [0, 0]],
[[0, 0], [0, 0]],
],
[
[[1, 0], [0, 0]],
[[0, 1], [0, 0]],
[[0, 0], [1, 0]],
[[0, 0], [0, 1]],
],
],
dtype=jnp.int32)
np.testing.assert_allclose(router_mask.dispatch_mask, expected_mask)
expected_weights = jnp.array([
[
[[0., 0.49609375], [0.50390625, 0.]],
[[0., 0.], [0., 0.50390625]],
[[0.49804688, 0.], [0., 0.]],
[[0., 0.], [0., 0.]],
],
[
[[0.49804688, 0.], [0., 0.]],
[[0., 0.49414062], [0., 0.]],
[[0., 0.], [0.5078125, 0.]],
[[0., 0.], [0., 0.5078125]],
],
],
dtype=jnp.float32)
np.testing.assert_allclose(router_mask.combine_array, expected_weights)
# Auxiliary loss is always 0. for experts choose tokens routing.
self.assertEqual(router_mask.auxiliary_loss, 0.)
self.assertEqual(router_mask.router_z_loss, 0.5041504)
def test_scatter_and_mask_dispatch_equal(self):
num_groups = 2
tokens_per_group = 4
hidden_dim = 3
num_experts = 3
num_selected_experts = 1
expert_capacity = 2
rng = jax.random.PRNGKey(0)
router_weights = routing.RouterWeights(name='router_weights')
token_inputs = jax.random.uniform(
rng, (num_groups, tokens_per_group, hidden_dim), minval=0, maxval=1)
router_mask, _ = routing.TokensChooseMaskedRouter(
router_weights,
num_selected_experts=num_selected_experts,
jitter_noise=0.,
batch_prioritized_routing=True,
dtype=jnp.float32,
ignore_padding_tokens=False).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
# Manipulate masked router dispatch and combine arrays to match format of
# scatter router output.
# Ignore capacity. Shape: [NUM_GROUPS, TOKENS_PER_GROUP, NUM_EXPERTS]
masked_router_says_dispatched = jnp.max(router_mask.dispatch_mask, axis=-1)
# Ignore particular expert and capacity for combine array.
# Shape: [NUM_GROUPS, TOKENS_PER_GROUP]
masked_router_combine_array = jnp.max(
router_mask.combine_array, axis=(-1, -2))
router_indices, _ = routing.TokensChooseScatterRouter(
router_weights,
num_selected_experts=num_selected_experts,
jitter_noise=0.,
batch_prioritized_routing=True,
dtype=jnp.float32,
ignore_padding_tokens=False).init_with_output(
jax.random.PRNGKey(0), token_inputs, num_experts, expert_capacity)
# Manipulate scatter router dispatch and combine indices to match format of
# masked router output.
# Shape: [NUM_GROUPS, TOKENS_PER_GROUP, NUM_SELECTED_EXPERTS]
successfully_routed = router_indices.dispatch_indices[...,
1] < expert_capacity
# Shape: [NUM_GROUPS, TOKENS_PER_GROUP, NUM_EXPERTS]
scatter_router_says_dispatched = successfully_routed * jax.nn.one_hot(
router_indices.dispatch_indices[..., 0].squeeze(axis=-1), num_experts)
# Remove trivial selected expert axis.
# Shape: [NUM_GROUPS, TOKENS_PER_GROUP].
scatter_router_combine_array = router_indices.combine_weights.squeeze(
axis=-1)
np.testing.assert_allclose(masked_router_says_dispatched,
scatter_router_says_dispatched)
np.testing.assert_allclose(masked_router_combine_array,
scatter_router_combine_array)
np.testing.assert_allclose(router_mask.auxiliary_loss,
router_indices.auxiliary_loss)
np.testing.assert_allclose(router_mask.router_z_loss,
router_indices.router_z_loss)
def test_router_z_loss(self):
num_groups = 2
num_tokens = 6
num_experts = 4
rng = jax.random.PRNGKey(0)
router_logits = jax.random.uniform(
rng, (num_groups, num_tokens, num_experts), minval=-5, maxval=5)
self.assertEqual(routing._router_z_loss(router_logits), 13.786719)
if __name__ == '__main__':
absltest.main()
| 22,824 | 31.889049 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/moe_parallel_fused_decoder.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse parallel Transformer decoder layer with fused parameters."""
from typing import Callable, Optional, Tuple
from flax import linen as nn
from flax.linen import partitioning as flax_partitioning
from jax import lax
import jax.numpy as jnp
from flaxformer.architectures.common import param_remapping
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
# pylint: disable=not-callable
# pytype: disable=not-callable
class SparseParallelFusedDecoderLayer(
nn.Module, param_remapping.ParameterRemappable
):
"""Sparse parallel Transformer decoder layer with fused parameters.
The projection matrices from the self-attention and MLP models are fused into
three kernels: Q-Wi, KV, and O-Wo, which are applied using `q_wi_fused`,
`kv_fused`, and `o_wo_fused`, respectively. Any of these projections can be
either sparse (MoE) or dense. The attention dot product and MLP activations
are applied outside these projections.
Note that, as for the "regular" SparseDecoderLayer, individual
SparseParallelFusedDecoderLayer(s) cannot be scanned over. Only blocks of MoE
layers are ever scanned; see also moe_architecture.SparseDecoder.
Attributes:
self_attention: An instance of a self-attention module. The projections of
this module are applied indirectly through the fused projections.
mlp: The MLP module. The projections of this module are applied indirectly
through the fused projections.
q_wi_fused: Projection sublayer applying fused attention-MLP Q-Wi kernel.
o_wo_fused: Projection sublayer applying fused attention-MLP O-Wo kernel.
kv_fused: Projection applying fused KV attention kernel.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relative_position_bias_factory: A callable that returns relative position
bias instances. This should only be used for per-layer relative position
biases; please use `shared_relative_position_bias` if they are shared
among layers.
shared_relative_position_bias: An instance of a shared relative position
bias module, usually owned by the Decoder.
sow_intermediates: Whether to track intermediates using Module.sow.
"""
self_attention: nn.Module
mlp: nn.Module
q_wi_fused: nn.Module
o_wo_fused: nn.Module
kv_fused: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relative_position_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relative_position_bias: Optional[nn.Module] = None
sow_intermediates: bool = False
def setup(self):
if (
self.relative_position_bias_factory is not None
and self.shared_relative_position_bias is not None
):
raise ValueError(
'Please set at most one of `relative_position_bias_factory` and '
'`shared_relative_position_bias`. (They can both be None however, '
'e.g. for absolute position embeddings.)'
)
self.relpos_bias = (
self.relative_position_bias_factory()
if self.relative_position_bias_factory is not None
else self.shared_relative_position_bias
)
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
@nn.compact
def __call__(
self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
) -> Array:
"""Applies SparseParallelFusedDecoder1DBlock module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: Must be None, as this block is for Decoder-only models. Only kept
for __call__ signature uniformity.
decoder_mask: Decoder self-attention mask.
encoder_decoder_mask: Must be None, as this block is for Decoder-only
models. Only kept for __call__ signature uniformity.
logit_mask: A mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
Returns:
Output after Transformer decoder block.
"""
assert encoded is None, 'Only pure decoder layer is supported.'
assert encoder_decoder_mask is None, 'Only pure decoder layer is supported.'
layer_input = targets
del targets
# Shared relative position embedding attention biases.
if self.relpos_bias:
if decode and max_decode_length:
decoder_bias = self.relpos_bias(
max_decode_length, max_decode_length, False
)
else:
decoder_bias = self.relpos_bias(
layer_input.shape[-2], layer_input.shape[-2], False
)
else:
decoder_bias = None
assert layer_input.ndim == 3
layer_input = flax_partitioning.with_sharding_constraint(
layer_input, logical_axis_resources=('batch', 'length', 'embed')
)
if prefill and prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(decoder_mask[:, 0, 0, :], axis=-1).astype(
jnp.int32
)
x = self.layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
x = flax_partitioning.with_sharding_constraint(
x, logical_axis_resources=('batch', 'length', 'embed')
)
num_heads = self.self_attention.num_heads
if self.self_attention.head_dim is not None:
head_dim = self.self_attention.head_dim
else:
head_dim = self.self_attention.qkv_features // num_heads
n_activations = len(self.mlp.activations)
mlp_intermediate_dim = self.mlp.intermediate_dim
del logit_mask
# Use local fused Q + W_i to calculate fused results.
# [batch, length, embed], [heads, mlp//heads * n_act + head_dim] ->
# Unpack to [batch, length, heads, mlp//heads * n_act + head_dim].
q_wi = self.q_wi_fused(x)
# Slice out query.
query = lax.dynamic_slice_in_dim(q_wi, 0, head_dim, -1)
# Slice out MLP inputs.
int_size = mlp_intermediate_dim // num_heads
# wi[i]: [batch, length, heads, mlp//heads]
wi = [
lax.dynamic_slice_in_dim(q_wi, head_dim + i * int_size, int_size, -1)
for i in range(n_activations)
]
# Use local fused K + V to calculate fused results.
kv = self.kv_fused(x)
kv = flax_partitioning.with_sharding_constraint(
kv, ('batch', 'length', 'embed', 'heads')
)
# Slice out key.
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, head_dim, -1), -2)
# Slice out value.
value = jnp.squeeze(
lax.dynamic_slice_in_dim(kv, head_dim, head_dim, -1), -2
)
precomputed_qkv = (query, key, value)
# y_att: [batch, length, heads, head_dim]
y_att = self.self_attention(
x,
x,
mask=decoder_mask,
bias=decoder_bias,
precomputed_qkv=precomputed_qkv,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
# y_mlp: [batch, length, heads, mlp//heads]
y_mlp = self.mlp(
wi,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout,
)
# y_fused: [batch, length, heads, mlp//heads + head_dim]
y_fused = jnp.concatenate([y_att, y_mlp], axis=-1)
y_out = self.o_wo_fused(y_fused)
# y *= 2**-0.5
z = layer_input + self.dropout(y_out, deterministic=not enable_dropout)
z = flax_partitioning.with_sharding_constraint(
z, logical_axis_resources=('batch', 'length', 'embed')
)
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
return z
def compute_fused_o_wo_dims(
attention_module: dense_attention.MultiQueryDotProductAttention,
) -> int:
"""Returns the output dimension of the fused O-Wo projection.
Args:
attention_module: Self-attention module used in the fused layer.
Returns:
Fused O-Wo projection dimension.
Raises:
ValueError if `out_features` is not specified on the attention module.
"""
if attention_module.out_features is None:
raise ValueError(
'SparseParallelFusedDecoderLayer requires self-attention'
'with manually specified `out_features`.'
)
return attention_module.out_features
def compute_fused_kv_dims(
attention_module: dense_attention.MultiQueryDotProductAttention,
) -> Tuple[int, int]:
"""Returns the output dimensions for the fused KV projection.
Args:
attention_module: Self-attention module used in the fused layer.
Returns:
Fused KV dimension.
"""
head_dim = _compute_head_dim(attention_module)
return 1, 2 * head_dim
def compute_fused_q_wi_dims(
attention_module: dense_attention.MultiQueryDotProductAttention,
mlp: dense.MlpBlock,
) -> Tuple[int, int]:
"""Returns the output dimensions for the Q-Wi fused projection.
Args:
attention_module: Self-attention module used in the fused layer.
mlp: MLP module used in the fused layer.
Returns:
Q-Wi fused projection dimension.
Raises:
ValueError if number of attention heads does not divide MLP intermediate
dimension.
"""
num_heads = attention_module.num_heads
head_dim = _compute_head_dim(attention_module)
n_activations = len(mlp.activations)
mlp_intermediate_dim = mlp.intermediate_dim
if mlp_intermediate_dim % num_heads != 0:
raise ValueError(
'Number of attention heads does not divide MLP intermediate dimension'
)
return (
num_heads,
(mlp_intermediate_dim // num_heads) * n_activations + head_dim,
)
def _compute_head_dim(
attention_module: dense_attention.MultiQueryDotProductAttention,
) -> int:
"""Returns the head dimension of the attention module."""
if attention_module.head_dim is not None:
return attention_module.head_dim
else:
return attention_module.qkv_features // attention_module.num_heads
| 11,725 | 33.589971 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/moe_layers_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for moe_layers."""
import functools
from typing import Any, Dict, Mapping
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import flax
import jax
from jax import numpy as jnp
import numpy as np
from flaxformer.architectures.moe import moe_layers
from flaxformer.architectures.moe import routing
from flaxformer.components import dense
# Type Stubs
FrozenDict = flax.core.frozen_dict.FrozenDict
MoeLayer = moe_layers.MoeLayer
PRNGKey = Any
NUM_CLASSES = 2
def init_layer_variables(
key: PRNGKey, module: MoeLayer, init_batch: Mapping[str, jnp.ndarray]
) -> Dict[str, Any]:
"""Initialize layer parameters."""
params_key, dropout_key, jitter_key = jax.random.split(key, num=3)
return module.init(
{'params': params_key, 'dropout': dropout_key, 'jitter': jitter_key},
**init_batch,
)
class MoeLayerTest(parameterized.TestCase):
@parameterized.parameters(
# num_experts = max_seq_length.
dict(dispatch='scatter', num_experts=4),
# num_experts = max_seq_length.
dict(dispatch='mask', num_experts=4),
# num_experts > num_tokens.
dict(dispatch='scatter', num_experts=16),
# num_experts > num_tokens.
dict(dispatch='mask', num_experts=32),
)
def test_moe_layer_runs(self, dispatch: str, num_experts: int):
batch_size = 3
max_seq_length = 4
num_tokens = batch_size * max_seq_length
hidden_dim = 2
rng = jax.random.PRNGKey(0)
if dispatch == 'mask':
router = routing.TokensChooseMaskedRouter(
router_weights=routing.RouterWeights(name='router_weights'),
jitter_noise=0.0,
num_selected_experts=2,
batch_prioritized_routing=True,
ignore_padding_tokens=True,
dtype=jnp.float32,
)
else:
router = routing.TokensChooseScatterRouter(
router_weights=routing.RouterWeights(name='router_weights'),
jitter_noise=0.0,
num_selected_experts=2,
batch_prioritized_routing=True,
ignore_padding_tokens=True,
dtype=jnp.float32,
)
expert = dense.MlpBlock(
use_bias=False,
intermediate_dim=2,
activations=('gelu',),
intermediate_dropout_rate=0.1,
)
moe_layer = moe_layers.MoeLayer(
num_experts=num_experts,
num_expert_partitions=num_experts,
max_group_size=num_tokens,
router=router,
train_capacity_factor=1.5,
eval_capacity_factor=1.5,
expert=expert,
num_model_partitions=1,
split_params=False,
) # Ensures all experts start with same params
init_batch = {
'inputs': jnp.ones(
(batch_size, max_seq_length, hidden_dim), jnp.float32
)
}
params = init_layer_variables(rng, moe_layer, init_batch)['params']
expected_keys = {'router', 'expert'}
self.assertEqual(params.keys(), expected_keys)
dropout_rng, jitter_rng, init_rng = jax.random.split(rng, num=3)
inputs = jax.random.uniform(
init_rng,
(batch_size, max_seq_length, hidden_dim),
minval=-10,
maxval=10,
)
actual_outputs, state = moe_layer.apply(
{'params': params},
rngs={'dropout': dropout_rng, 'jitter': jitter_rng},
mutable=['intermediates'],
inputs=inputs,
)
self.assertEqual(
actual_outputs.shape, (batch_size, max_seq_length, hidden_dim)
)
for metric in [
'auxiliary_loss',
'router_z_loss',
'fraction_tokens_left_behind',
'expert_usage',
'router_confidence',
]:
self.assertIn(metric, state['intermediates'])
def test_dense_general_expert(self):
batch_size = 3
max_seq_length = 8
num_tokens = batch_size * max_seq_length
hidden_dims = (2, 3) # 2D hidden_dims
num_experts = 4
rng = jax.random.PRNGKey(0)
output_features = (3, 12)
expert = dense.DenseGeneral(
axis=(-2, -1), # Expects 2D hidden_dims
features=output_features,
use_bias=False,
reshape_kernel=False,
kernel_axis_names=('heads', 'or', 'tails', '?'),
)
router = routing.TokensChooseMaskedRouter(
# Specialized router weights for 2D hidden/output features of expert.
router_weights=routing.RouterWeights(
axis=(-2, -1),
kernel_axis_names=('heads', 'fused', 'unmodeled'),
reshape_kernel=False,
),
num_selected_experts=1,
dtype=jnp.float32,
jitter_noise=0.0,
batch_prioritized_routing=False,
ignore_padding_tokens=False,
)
moe_layer = moe_layers.MoeLayer(
num_experts=num_experts,
num_expert_partitions=num_experts,
max_group_size=num_tokens,
router=router,
train_capacity_factor=1.0,
eval_capacity_factor=1.0,
expert=expert,
num_model_partitions=1,
split_params=False,
) # Ensures all experts start with same params
init_batch = {
'inputs': jnp.ones(
(batch_size, max_seq_length, *hidden_dims), jnp.float32
)
}
params = init_layer_variables(rng, moe_layer, init_batch)['params']
expected_keys = {'router', 'expert'}
self.assertEqual(params.keys(), expected_keys)
dropout_rng, jitter_rng, init_rng = jax.random.split(rng, num=3)
inputs = jax.random.uniform(
init_rng,
(batch_size, max_seq_length, *hidden_dims),
minval=-10,
maxval=10,
)
actual_outputs = moe_layer.apply(
{'params': params},
rngs={'dropout': dropout_rng, 'jitter': jitter_rng},
inputs=inputs,
)
self.assertEqual(
actual_outputs.shape, (batch_size, max_seq_length, *output_features)
)
def test_scatter_mask_dispatch_equal(self):
batch_size = 4
max_seq_length = 4
hidden_dim = 2
num_experts = 2
tokens_per_group = 8
num_groups = batch_size * max_seq_length // tokens_per_group
rng = jax.random.PRNGKey(0)
expert = dense.MlpBlock(
use_bias=True,
intermediate_dropout_rate=0.0,
final_dropout_rate=0.0,
intermediate_dim=2,
name='feed_forward_expert',
)
moe_layer_factory = functools.partial(
moe_layers.MoeLayer,
num_experts=num_experts,
num_expert_partitions=num_experts,
dropout_rate=0.0,
max_group_size=tokens_per_group,
train_capacity_factor=1.0,
eval_capacity_factor=1.0,
expert=expert,
num_model_partitions=1,
split_params=False,
) # Ensures all experts start with same params
router_weights = routing.RouterWeights(name='router_weights')
masked_router = routing.TokensChooseMaskedRouter(
router_weights=router_weights,
jitter_noise=0.0,
num_selected_experts=2,
batch_prioritized_routing=True,
dtype=jnp.float32,
ignore_padding_tokens=False,
)
masked_moe_layer = moe_layer_factory(router=masked_router)
scatter_router = routing.TokensChooseScatterRouter(
router_weights=router_weights,
jitter_noise=0.0,
num_selected_experts=2,
batch_prioritized_routing=True,
dtype=jnp.float32,
ignore_padding_tokens=False,
)
scatter_moe_layer = moe_layer_factory(router=scatter_router)
inputs = jax.random.uniform(
rng, (batch_size, max_seq_length, hidden_dim), minval=-10, maxval=10
)
# Mock the router weights to ensure both layers compute with the same
# logits.
mock_router_logits = jax.random.uniform(
rng, (num_groups, tokens_per_group, num_experts), minval=-1, maxval=1
)
with mock.patch.object(
masked_router, 'router_weights', return_value=mock_router_logits
):
masked_outputs, _ = masked_moe_layer.init_with_output(
rng, inputs, enable_dropout=False
)
with mock.patch.object(
scatter_router, 'router_weights', return_value=mock_router_logits
):
scatter_outputs, _ = scatter_moe_layer.init_with_output(
rng, inputs, enable_dropout=False
)
expected_outputs = jnp.array(
[
[
[-5.4286949e-07, -9.7972497e-07],
[-2.8485384e00, -2.0157995e00],
[-3.3071041e00, -2.3111360e00],
[0.0000000e00, 0.0000000e00],
],
[
[-4.8432793e-07, -8.7407409e-07],
[-5.3204980e-07, -9.6019858e-07],
[-7.4125074e-07, -1.3377468e-06],
[1.3946553e00, 1.0524274e00],
],
[
[-1.5633354e00, -1.0680735e00],
[-6.9210348e00, -4.8458524e00],
[4.6841961e-01, 3.9556113e-01],
[-4.6012778e-07, -8.3039981e-07],
],
[
[0.0000000e00, 0.0000000e00],
[8.1172025e-01, 5.8894420e-01],
[-1.7119075e00, -1.1470584e00],
[-5.5547832e-07, -1.0024803e-06],
],
],
dtype=jnp.float32,
)
np.testing.assert_allclose(
masked_outputs, expected_outputs, rtol=1e-6, atol=1e-6
)
np.testing.assert_allclose(
scatter_outputs, expected_outputs, rtol=1e-6, atol=1e-6
)
@parameterized.parameters(
dict(
max_group_size=8,
num_tokens=32,
num_experts=2,
num_expert_replicas=1,
expected_num_groups=4,
),
dict(
max_group_size=9,
num_tokens=32,
num_experts=2,
num_expert_replicas=1,
expected_num_groups=4,
),
dict(
max_group_size=16,
num_tokens=32,
num_experts=4,
num_expert_replicas=2,
expected_num_groups=8,
),
dict(
max_group_size=32,
num_tokens=32,
num_experts=2,
num_expert_replicas=1,
expected_num_groups=2,
),
dict(
max_group_size=64,
num_tokens=32,
num_experts=2,
num_expert_replicas=1,
expected_num_groups=2,
),
)
def test_num_groups(
self,
max_group_size: int,
num_tokens: int,
num_experts: int,
num_expert_replicas: int,
expected_num_groups: int,
):
self.assertEqual(
moe_layers._num_groups(
num_tokens,
max_group_size,
num_experts,
num_expert_replicas,
strict_group_size=False,
),
expected_num_groups,
)
def test_strict_group_size(self):
with self.assertRaisesRegex(
ValueError, 'Selected group_size=8 is less than the max_group_size=16.'
):
moe_layers._num_groups(
num_tokens=16,
max_group_size=16,
num_experts=2,
num_expert_replicas=1,
strict_group_size=True,
)
@parameterized.parameters(
dict(
num_expert_partitions=1,
num_model_partitions=1,
expected_num_replicas=4,
),
dict(
num_expert_partitions=2,
num_model_partitions=1,
expected_num_replicas=2,
),
dict(
num_expert_partitions=2,
num_model_partitions=2,
expected_num_replicas=1,
),
dict(
num_expert_partitions=4,
num_model_partitions=1,
expected_num_replicas=1,
),
)
@mock.patch('jax.device_count')
def test_num_expert_replicas(
self,
device_count: int,
num_expert_partitions: int,
num_model_partitions: int,
expected_num_replicas: int,
):
device_count.return_value = 4
self.assertEqual(
moe_layers._num_expert_replicas(
num_expert_partitions, num_model_partitions
),
expected_num_replicas,
)
@parameterized.parameters(
dict(
# num_tokens % num_experts * num_expert_replicas = 0.
# No padding required.
num_experts=2,
num_expert_replicas=1,
expected_batch_padding=0,
expected_seq_padding=0,
),
dict(
# num_tokens % num_experts * num_expert_replicas != 0.
num_experts=16,
num_expert_replicas=2,
expected_batch_padding=1,
expected_seq_padding=0,
),
dict(
# num_tokens % num_experts * num_expert_replicas != 0.
num_experts=32,
num_expert_replicas=1,
expected_batch_padding=1,
expected_seq_padding=0,
),
dict(
# num_tokens % num_experts * num_expert_replicas != 0.
num_experts=7,
num_expert_replicas=1,
expected_batch_padding=0,
expected_seq_padding=6,
),
dict(
# num_tokens % num_experts * num_expert_replicas != 0.
num_experts=9,
num_expert_replicas=1,
expected_batch_padding=0,
expected_seq_padding=1,
),
)
def test_maybe_pad(
self,
num_experts: int,
num_expert_replicas: int,
expected_batch_padding: int,
expected_seq_padding: int,
):
original_seq_length = 8
original_batch_size = 3
inputs = jnp.ones(
(original_batch_size, original_seq_length, 2), jnp.float32
)
padded_inputs = moe_layers._maybe_pad(
inputs,
num_experts,
num_expert_replicas,
)
self.assertEqual(
padded_inputs.shape,
(
original_batch_size + expected_batch_padding,
original_seq_length + expected_seq_padding,
2,
),
)
if expected_batch_padding > 0 or expected_seq_padding > 0:
self.assertAlmostEqual(
jnp.sum(
abs(padded_inputs[original_batch_size:, original_seq_length:])
),
0.0,
)
if __name__ == '__main__':
absltest.main()
| 14,536 | 28.132265 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/scatter_utils_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scatter_utils."""
from absl.testing import absltest
from jax import numpy as jnp
import numpy as np
from flaxformer.architectures.moe import scatter_utils
class ScatterNdTest(absltest.TestCase):
def test_scatter_nd_simple(self):
indices = jnp.array([[0, 1]])
updates = jnp.array([[1, -2, 3]], dtype=jnp.float32)
actual_result = scatter_utils.scatter_nd(indices, updates, shape=(1, 2, 3))
expected_result = jnp.array([[[0, 0, 0], [1, -2, 3]]], dtype=jnp.float32)
np.testing.assert_allclose(actual_result, expected_result)
def test_scatter_nd_3d_update(self):
indices = jnp.array([[[0, 1], [1, 0], [1, 1]]])
updates = jnp.array([[[1, -1], [2, -2], [3, -3]]], dtype=jnp.int32)
actual_result = scatter_utils.scatter_nd(indices, updates, shape=(2, 2, 2))
expected_result = jnp.array([[[0, 0], [1, -1]], [[2, -2], [3, -3]]],
dtype=jnp.int32)
np.testing.assert_allclose(actual_result, expected_result)
def test_scatter_nd_ignore_outside_indices(self):
indices = jnp.array([[0, 0], [1, 2], [2, 0]])
updates = jnp.array([1., 2., 3.])
actual_result = scatter_utils.scatter_nd(indices, updates, shape=(3, 2))
expected_result = jnp.array([[1., 0.], [0., 0], [3., 0.]])
np.testing.assert_allclose(actual_result, expected_result)
def test_scatter_nd_cumulative_updates(self):
indices = jnp.array([[1, 1], [1, 1], [1, 1]])
updates = jnp.array([1., 2., 3.])
actual_result = scatter_utils.scatter_nd(indices, updates, shape=(3, 2))
expected_result = jnp.array([[0., 0.], [0., 6.], [0., 0.]])
np.testing.assert_allclose(actual_result, expected_result)
if __name__ == '__main__':
absltest.main()
| 2,302 | 36.145161 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/scatter_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extensions to Jax/Flax core functions for Mixture of Experts training."""
from typing import Sequence
import jax.numpy as jnp
from flaxformer.types import Array
def scatter_nd(indices: Array, updates: Array, shape: Sequence[int]) -> Array:
"""JAX implementation of tf.scatter_nd.
See https://www.tensorflow.org/api_docs/python/tf/scatter_nd, and
https://github.com/google/jax/discussions/3658.
Notes:
- If multiple indices point to the same position, the output value at this
position is accumulated.
- Indices falling outside of the created array are quietly ignored.
Args:
indices: [num_items, n_dims] array of indices to update.
updates: [num_items, ...] array of new data points.
shape: Dimensions of the output array.
Returns:
An array of shape `shape` and the same type as `updates`, with updated
values at given indices.
"""
zeros = jnp.zeros(shape, updates.dtype)
# Following `tf.scatter_nd`'s API, the inner vectors of `indices` have `n_dim`
# values which index into `zeros`. We unpack it into arrays for each
# dimension. This code is equivalent to `tf.unstack(indices, axis=-1)`.
key = tuple(jnp.moveaxis(indices, -1, 0))
return zeros.at[key].add(updates)
| 1,810 | 35.22 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/moe_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides encoders/decoders with Mixture of Experts support."""
import functools
from typing import Callable, Optional, Sequence, Tuple, TypeVar, Union
from flax import linen as nn
from flaxformer import activation_partitioning
from flaxformer import transformer_common as common
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.moe import moe_enums
from flaxformer.architectures.moe import moe_layers
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import embedding
from flaxformer.types import Array
T = TypeVar('T') # Generic type
DecoderLayer = t5_architecture.DecoderLayer
EncoderLayer = t5_architecture.EncoderLayer
LayerLayout = moe_enums.LayerLayout
MakeDecoderLayerFn = t5_architecture.MakeDecoderLayerFn
MakeEncoderLayerFn = t5_architecture.MakeEncoderLayerFn
MoeLayer = moe_layers.MoeLayer
class SparseEncoderLayer(EncoderLayer):
"""Sparse Transformer encoder layer, with optional ST-MoE support.
Dense/sparse encoder layers can be constructed by specifying `mlp` (parent
class attribute) with a dense/sparse MoeLayer/MlpBlock. The `extra_mlp` allows
for inserting an extra MLP dense module after the traditional encoder block,
as in ST-MoE (https://arxiv.org/abs/2202.08906).
Attributes:
extra_mlp: Additional MLP module, applied after `mlp` module.
"""
extra_mlp: Optional[nn.Module] = None
def setup(self):
if self.scanned:
raise ValueError(
'Individual SparseEncoderLayer(s) are never scanned over. Only '
'blocks of MoE layers are ever scanned. Please leave '
'SparseEncoderLayer.scanned = False.')
super().setup()
self.pre_extra_mlp_layer_norm = self.layer_norm_factory()
self.post_extra_mlp_dropout = self.dropout_factory()
def __call__(self,
inputs: Array,
encoder_mask: Optional[Array] = None,
*,
logit_mask: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Applies a SparseEncoderLayer.
Args:
inputs: Input data with shape [batch, length, emb_dim].
encoder_mask: Encoder self-attention mask.
logit_mask: Encoder logits mask.
enable_dropout: Enables dropout if set to True.
Returns:
Output after Transformer encoder block.
"""
y = super().__call__(
inputs,
encoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
if self.extra_mlp is None:
return y # pytype: disable=bad-return-type # jax-ndarray
z = self.pre_extra_mlp_layer_norm(y)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
z = logit_mask * z
z = self.extra_mlp(z, enable_dropout=enable_dropout) # pylint: disable=not-callable
z = y + self.post_extra_mlp_dropout(z, deterministic=not enable_dropout)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'extra_mlp_activations', z)
return z # pytype: disable=bad-return-type # jax-ndarray
class SparseDecoderLayer(DecoderLayer):
"""Sparse Transformer encoder-decoder layer, with optional ST-MoE support.
Dense/sparse encoder layers can be constructed by specifying `mlp` (parent
class attribute) with a dense/sparse MoeLayer/MlpBlock. The `extra_mlp` allows
for inserting an extra MLP dense module after the traditional decoder block,
as in ST-MoE (https://arxiv.org/abs/2202.08906).
Attributes:
extra_mlp: Additional MLP module, applied after `mlp` module.
"""
extra_mlp: Optional[nn.Module] = None
def setup(self):
if self.scanned:
raise ValueError(
'Individual SparseDecoderLayer(s) are never scanned over. Only '
'blocks of MoE layers are ever scanned. Please leave '
'SparseDecoderLayer.scanned = False.')
super().setup()
self.pre_extra_mlp_layer_norm = self.layer_norm_factory()
self.post_extra_mlp_dropout = self.dropout_factory()
def __call__(self,
targets: Array,
encoded: Array,
decoder_mask: Optional[Array] = None,
encoder_decoder_mask: Optional[Array] = None,
*,
logit_mask: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None) -> Array:
"""Applies SparseDecoderLayer module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: Input data from encoder with shape [batch_size,
encoder_seq_length, decoder_hidden_size]. If None, block is Decoder
only.
decoder_mask: Decoder self-attention mask.
encoder_decoder_mask: Encoder-decoder attention mask with shape [
batch_size, 1, decoder_seq_length, encoder_seq_length].
logit_mask: Mask (e.g., padding logit mask) to be applied to the attention
logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
Returns:
Output after Transformer encoder-decoder block.
"""
y = super().__call__(
targets,
encoded,
decoder_mask,
encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.extra_mlp is None:
return y # pytype: disable=bad-return-type # always-use-return-annotations
z = self.pre_extra_mlp_layer_norm(
y, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
z = logit_mask * z
z = self.extra_mlp( # pylint: disable=not-callable
z,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout)
z = y + self.post_extra_mlp_dropout(z, deterministic=not enable_dropout)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'extra_mlp_activations', z)
return z # pytype: disable=bad-return-type # always-use-return-annotations
class MoeEncoderScanBlock(nn.Module, param_remapping.ParameterRemappable):
"""Repeatable block of encoder layers that can be scanned over.
Attributes:
dense_layer_factory: A callable that returns a EncoderLayer containing a
dense MLP sublayer and attention sublayers.
sparse_layer_factory: A callable that returns a EncoderLayer containing a
sparse MLP sublayer and attention sublayers.
num_sparse_layers: Total number of sparse sublayers in encoder.
num_layers: Total number of layers (dense and sparse) in encoder.
sparse_layout: Placement of sparse modules within encoder. All other MLP
sublayers are filled with dense MLP sublayers.
"""
dense_layer_factory: MakeEncoderLayerFn
sparse_layer_factory: MakeEncoderLayerFn
num_layers: int
num_sparse_layers: int
sparse_layout: LayerLayout
def setup(self) -> None:
self.subblock: Sequence[EncoderLayer] = _scan_block_factory( # pytype: disable=wrong-arg-types # re-none
self.dense_layer_factory, self.sparse_layer_factory, self.num_layers,
self.num_sparse_layers, self.sparse_layout)
def __call__(self,
inputs: Array,
encoder_mask: Optional[Array] = None,
*,
logit_mask: Optional[Array] = None,
enable_dropout: bool = True) -> Tuple[Array, Optional[Array]]:
"""Applies a MoeEncoderScanBlock.
Args:
inputs: Input data with shape [batch, length, emb_dim].
encoder_mask: Encoder self-attention mask.
logit_mask: Encoder logits mask.
enable_dropout: Enables dropout if set to True.
Returns:
Output after MoE encoder block.
"""
hidden_state = inputs
for layer in self.subblock:
hidden_state = layer(
hidden_state,
encoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
return hidden_state, None # pytype: disable=bad-return-type # jax-ndarray
class MoeDecoderScanBlock(nn.Module, param_remapping.ParameterRemappable):
"""Repeatable block of decoder layers that can be scanned over.
Attributes:
dense_layer_factory: A callable that returns a DecoderLayer containing a
dense MLP sublayer and attention sublayers.
sparse_layer_factory: A callable that returns a DecoderLayer containing a
sparse MLP sublayer and attention sublayers.
num_sparse_layers: Total number of sparse sublayers in decoder.
num_layers: Total number of layers (dense and sparse) in decoder.
sparse_layout: Placement of sparse modules within decoder. All other MLP
sublayers are filled with dense MLP sublayers.
"""
dense_layer_factory: MakeDecoderLayerFn
sparse_layer_factory: MakeDecoderLayerFn
num_layers: int
num_sparse_layers: int
sparse_layout: LayerLayout
def setup(self) -> None:
self.subblock: Sequence[DecoderLayer] = _scan_block_factory( # pytype: disable=wrong-arg-types # re-none
self.dense_layer_factory, self.sparse_layer_factory, self.num_layers,
self.num_sparse_layers, self.sparse_layout)
def __call__(
self,
targets: Array,
encoded: Array,
decoder_mask: Optional[Array] = None,
encoder_decoder_mask: Optional[Array] = None,
*,
logit_mask: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None) -> Tuple[Array, Optional[Array]]:
"""Applies MoeDecoderScanBlock module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: Input data from encoder with shape [batch_size,
encoder_seq_length, decoder_hidden_size]. If None, block is Decoder
only.
decoder_mask: Decoder self-attention mask.
encoder_decoder_mask: Encoder-decoder attention mask with shape [
batch_size, 1, decoder_seq_length, encoder_seq_length].
logit_mask: Mask (e.g., padding logit mask) to be applied to the attention
logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
Returns:
Output after MoE encoder-decoder block.
"""
hidden_state = targets
for layer in self.subblock:
hidden_state = layer(
hidden_state,
encoded,
decoder_mask,
encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
return hidden_state, None # pytype: disable=bad-return-type # always-use-return-annotations
class SparseEncoder(t5_architecture.Encoder):
"""A stack of encoder layers with configurable dense and sparse MLP modules.
Although some attributes below default to None, they MUST be specified by the
user. We are forced to use defaults here as the parent Encoder class contains
attributes with default values.
Attributes:
sparse_layer_factory: A callable that returns a EncoderLayer containing a
sparse MLP sublayer and an attention sublayer. The "dense" variant of this
factory is named `layer_factory` and is inherited from the super class.
num_sparse_layers: Total number of sparse sublayers in encoder.
sparse_layout: Placement of sparse modules within encoder. All other MLP
sublayers are filled with dense MLP sublayers.
"""
sparse_layer_factory: Optional[MakeEncoderLayerFn] = None
num_sparse_layers: Optional[int] = None
sparse_layout: LayerLayout = LayerLayout.MIXED
def setup(self):
_validate_module_construction(self.sparse_layer_factory,
self.num_sparse_layers)
self.sparse_layer_factory: MakeEncoderLayerFn
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder_factory: Callable[[], embedding.Embed]
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.input_dropout_factory()
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
# `layer_factory` is the "dense" layer factory.
dense_layer_factory = lambda: self.layer_factory( # pylint:disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
sparse_layer_factory = lambda: self.sparse_layer_factory( # pylint:disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
if not self.scan_layers:
layer_factory = functools.partial(
_layer_factory,
dense_layer_factory=dense_layer_factory,
sparse_layer_factory=sparse_layer_factory,
num_layers=self.num_layers,
num_sparse_layers=self.num_sparse_layers,
sparse_layout=self.sparse_layout)
self.layers = [layer_factory(layer) for layer in range(self.num_layers)]
self.encoder = common.TransparentLayerSequence(self.layers)
else:
# Convert to factory to conform with Flaxformer API.
block_factory = lambda: MoeEncoderScanBlock( # pylint:disable=g-long-lambda
dense_layer_factory=dense_layer_factory,
sparse_layer_factory=sparse_layer_factory,
num_layers=self.num_layers,
num_sparse_layers=self.num_sparse_layers,
sparse_layout=self.sparse_layout)
block_factory = t5_architecture.maybe_remat(
block_factory,
self.layer_remat,
self.scan_layers,
static_argnums=(3,))
self.encoder = self._construct_scanned_encoder(
block_factory,
num_layers=_num_scan_blocks(self.num_layers, self.num_sparse_layers,
self.sparse_layout))
self.encoder_norm = self.layer_norm_factory()
self.output_dropout = self.output_dropout_factory()
class SparseDecoder(t5_architecture.Decoder):
"""A stack of decoder layers with configurable dense and sparse MLP modules.
Although some attributes below default to None, they MUST be specified by the
user. We are forced to use defaults here as the parent Decoder class contains
attributes with default values.
Attributes:
sparse_layer_factory: A callable that returns a DecoderLayer containing a
sparse MLP sublayer and attention sublayers. The "dense" variant of this
factory is named `layer_factory` and is inherited from the super class.
num_sparse_layers: Total number of sparse sublayers in decoder.
sparse_layout: Placement of sparse modules within decoder. All other MLP
sublayers are filled with dense MLP sublayers.
"""
sparse_layer_factory: Optional[MakeDecoderLayerFn] = None
num_sparse_layers: Optional[int] = None
sparse_layout: LayerLayout = LayerLayout.MIXED
def setup(self):
_validate_module_construction(self.sparse_layer_factory,
self.num_sparse_layers)
self.sparse_layer_factory: MakeDecoderLayerFn
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder_factory: Callable[[], embedding.Embed]
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.dropout_factory()
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
# `layer_factory` is the "dense" layer factory.
dense_layer_factory = lambda: self.layer_factory( # pylint:disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
sparse_layer_factory = lambda: self.sparse_layer_factory( # pylint:disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
if not self.scan_layers:
layer_factory = functools.partial(
_layer_factory,
dense_layer_factory=dense_layer_factory,
sparse_layer_factory=sparse_layer_factory,
num_layers=self.num_layers,
num_sparse_layers=self.num_sparse_layers,
sparse_layout=self.sparse_layout)
self.layers = [layer_factory(layer) for layer in range(self.num_layers)]
self.decoder = common.TransparentLayerSequence(self.layers)
else:
# Convert to factory to conform with Flaxformer API.
block_factory = lambda: MoeDecoderScanBlock( # pylint:disable=g-long-lambda
dense_layer_factory=dense_layer_factory,
sparse_layer_factory=sparse_layer_factory,
num_layers=self.num_layers,
num_sparse_layers=self.num_sparse_layers,
sparse_layout=self.sparse_layout)
block_factory = t5_architecture.maybe_remat(
block_factory,
self.layer_remat,
self.scan_layers,
static_argnums=(5, 6, 7, 8, 9))
self.decoder = self._construct_scanned_decoder(
block_factory,
num_layers=_num_scan_blocks(self.num_layers, self.num_sparse_layers,
self.sparse_layout),
num_broadcast_args=9)
self.decoder_norm = self.layer_norm_factory()
self.output_dropout = self.dropout_factory()
self.setup_output_logits()
def _validate_module_construction(
sparse_layer_factory: Union[Optional[MakeEncoderLayerFn],
Optional[MakeDecoderLayerFn]],
num_sparse_layers: Optional[int]):
"""Validates that sparse layer attributes are correctly specified."""
if sparse_layer_factory is None:
raise ValueError(
'sparse_layer_factory must be specified but was left as None.')
if num_sparse_layers is None:
raise ValueError(
'num_sparse_layers must be specified but was left as None.')
def _is_sparse_layer(layer: int, num_layers: int, num_sparse_layers: int,
sparse_layout: LayerLayout) -> bool:
"""Returns true if the current layer should be a sparse layer."""
if sparse_layout == LayerLayout.BOTTOM:
return layer < num_sparse_layers
elif sparse_layout == LayerLayout.MIDDLE:
return (num_layers - num_sparse_layers <= 2 * layer <
num_layers + num_sparse_layers)
elif sparse_layout == LayerLayout.MIXED and num_sparse_layers > 0:
if num_layers % num_sparse_layers != 0:
raise ValueError(
'For MIXED sparse (MoE) layer layouts, the number of '
'sparse layers must divide evenly into the total number of '
f'encoder/decoder layers, but num_layers={num_layers} while '
f'num_sparse_layers={num_sparse_layers}')
# Every sparse_index'th layer is sparse.
sparse_index = num_layers // num_sparse_layers
return layer % sparse_index == sparse_index - 1
elif sparse_layout == LayerLayout.TOP:
return layer >= num_layers - num_sparse_layers
else:
return False
def _layer_factory(layer: int, dense_layer_factory: Callable[[], T],
sparse_layer_factory: Callable[[], T], num_layers: int,
num_sparse_layers: int, sparse_layout: Callable[[], T]) -> T:
"""Constructs a sparse or dense layer depending on the model configuration."""
if _is_sparse_layer(layer, num_layers, num_sparse_layers, sparse_layout):
return sparse_layer_factory()
else:
return dense_layer_factory()
def _scan_block_factory(dense_layer_factory: Callable[[], T],
sparse_layer_factory: Callable[[], T], num_layers: int,
num_sparse_layers: int,
sparse_layout: LayerLayout) -> Sequence[T]:
"""Constructs a repeatable block of layers that can be Scanned."""
if num_sparse_layers == 0:
return [dense_layer_factory()]
if num_sparse_layers == num_layers:
return [sparse_layer_factory()]
if sparse_layout in [LayerLayout.BOTTOM, LayerLayout.MIDDLE, LayerLayout.TOP]:
raise ValueError(
'Scan is only supported for MIXED sparse (MoE) layer layouts. '
f'Received: sparse_layout={sparse_layout}.')
elif sparse_layout == LayerLayout.MIXED:
if num_layers % num_sparse_layers != 0:
raise ValueError(
'For MIXED sparse (MoE) layer layouts, the number of '
'sparse layers must divide evenly into the total number of '
f'encoder/decoder layers, but num_layers={num_layers} while '
f'num_sparse_layers={num_sparse_layers}')
# Every sparse_index'th layer is sparse.
sparse_index = num_layers // num_sparse_layers
return ([dense_layer_factory() for _ in range(sparse_index - 1)] +
[sparse_layer_factory()])
else:
raise ValueError('Unrecognized sparse layer layout: %s' % sparse_layout)
def _num_scan_blocks(num_layers: int, num_sparse_layers: int,
sparse_layout: LayerLayout) -> int:
"""Returns number of repeated MoE blocks that can be scanned over."""
block = _scan_block_factory(
dense_layer_factory=lambda: None, # Unused
sparse_layer_factory=lambda: None, # Unused
num_layers=num_layers,
num_sparse_layers=num_sparse_layers,
sparse_layout=sparse_layout)
return num_layers // len(block)
| 25,449 | 40.517129 | 110 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/moe_layers.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Experts layer.
"""
import functools
from typing import Optional, Tuple, Union
from absl import logging
from flax import linen as nn
from flax.linen import partitioning as flax_partitioning
import jax
import jax.numpy as jnp
from flaxformer.architectures.moe import routing
from flaxformer.architectures.moe import scatter_utils
from flaxformer.components import dense
from flaxformer.types import Array
from flaxformer.types import DType
class MoeLayer(nn.Module):
"""Sparse MoE SPMD layer with per-token routing.
Attributes:
num_experts: Number of available experts (feed-forward modules) in this
layer.
max_group_size: The total number of tokens (across the global batch) is
subdivided into groups of this size, on each device. Router computations
are then performed on a per-group basis. A larger group size will result
in slower but more accurate top-k and sorting computations, whereas a
smaller group size will result in faster but more approximate (and
potentially less stable) routing choices. Note that actual group size may
be smaller than max_group_size for consistency with the number of experts
and tokens; see also `strict_group_size` attribute. In practice, we find
that imperfect routing choices are tolerable and recommend choosing a
group size on the order of 4096 tokens, although this number will vary
based on model configuration and size.
train_capacity_factor: Scaling factor to increase the expert token capacity
during training. This factor plays an analogous, but slightly different,
role depending on the routing assignment algorithm: For "tokens choose"
routing, the capacity factor only affects the maximum number of tokens
that an expert will process. It does not affect how many experts a given
token is routed to; see the num_selected_experts attributes of "tokens
choose" routers. For "experts choose" routing, because experts always fill
their buffer, increasing the capacity factor will increase the number of
tokens that an expert will process AND will indirectly increase the number
of experts that a given token is routed to.
eval_capacity_factor: As above, but used during evaluation.
expert: The actual expert. Only MlpBlock and DenseGeneral are currently
supported.
router: Token dispatch router. The router determines which tokens are
dispatched to which expert, and how the expert outputs are combined.
num_expert_partitions: Specifies the upper bound for size of the expert
parallel submesh. This must be <= the number of experts.
num_model_partitions: Size of the model parallel submesh. Model parallelism
is used if num_model_partitions > 1.
min_expert_capacity: Minimum token processing capacity for each expert.
dropout_rate: Dropout rate for each expert.
input_hidden_dims_axes: Logical axis names to use for sharding constraints
applied to hidden dimensions of inputs (before experts are called).
output_hidden_dims_axes: Logical axis names to use for sharding constraints
applied to hidden dimensions of outputs (after experts are called).
dtype: The numeric type (default: bfloat16). We recommend a truncated float
type (e.g. bfloat16) to reduce all-to-all communication overhead. This
numeric type is used for all computations, except the router, which always
uses float32 for stability.
split_params: Whether or not to initialize each expert's parameters
independently.
precision: XLA precision for array computations.
strict_group_size: If True, fail if unable to set the token group size equal
to max_group_size. If False (default), the actual group size may be
smaller than max_group_size for consistency with the number of experts and
tokens.
"""
num_experts: int
max_group_size: int
# TODO: Switch to a single `capacity_factor` once we are using
# Fiddle to build different train vs eval model variants.
train_capacity_factor: float
eval_capacity_factor: float
expert: Union[dense.MlpBlock, dense.DenseGeneral]
router: routing.Router
num_expert_partitions: int
num_model_partitions: int
min_expert_capacity: int = 4
dropout_rate: float = 0.1
input_hidden_dims_axes: Tuple[str, ...] = ('embed',)
output_hidden_dims_axes: Tuple[str, ...] = ('embed',)
dtype: DType = jnp.bfloat16
split_params: bool = True
precision: jax.lax.Precision = jax.lax.Precision.DEFAULT
strict_group_size: bool = False
def setup(self):
"""Verifies that the MoeLayer is correctly configured."""
if self.num_expert_partitions > self.num_experts:
raise ValueError(
f'The number of expert partitions ({self.num_expert_partitions}) '
f'cannot be greater than the number of experts ({self.num_experts}).'
)
self.num_expert_replicas = _num_expert_replicas(
self.num_expert_partitions, self.num_model_partitions
)
@nn.compact
def __call__(
self,
inputs: Array,
decode: bool = False,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
*,
enable_dropout: bool = True,
) -> Array:
"""Applies MoeLayer module.
If the 'intermediates' collection is marked as mutable, this method will sow
diversity metrics.
Args:
inputs: Batch of input embeddings of shape <float>[batch_size, seq_length,
hidden_dims].
decode: Whether to prepare and use an autoregressive cache.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache.
enable_dropout: Enables dropout if set to True.
Returns:
Transformed inputs with same shape as inputs:
<float>[batch_size, seq_length, hidden_dims].
Raises:
ValueError if an unrecognized dispatch algorithm is given.
"""
original_batch_size, original_seq_length, *hidden_dims = inputs.shape
padded_inputs = _maybe_pad(
inputs, self.num_experts, self.num_expert_replicas
)
padded_batch_size, padded_seq_length, *_ = padded_inputs.shape
num_tokens = padded_batch_size * padded_seq_length
num_groups = _num_groups(
num_tokens,
self.max_group_size,
self.num_experts,
self.num_expert_replicas,
self.strict_group_size,
)
tokens_per_group = num_tokens // num_groups
if enable_dropout: # Training
capacity_factor = self.train_capacity_factor
else: # Eval
capacity_factor = self.eval_capacity_factor
# Each group will send expert_capacity tokens to each expert.
expert_capacity = int(
round(capacity_factor * tokens_per_group / self.num_experts)
)
expert_capacity = max(expert_capacity, self.min_expert_capacity)
# Reshape batch and sequence/token dimensions for expert routing.
grouped_inputs = jnp.reshape(
padded_inputs, (num_groups, tokens_per_group, *hidden_dims)
)
grouped_inputs = flax_partitioning.with_sharding_constraint(
grouped_inputs, ('batch', 'length', *self.input_hidden_dims_axes)
)
if isinstance(self.router, routing.ScatterRouter):
outputs = self._scatter_to_experts(
grouped_inputs,
enable_dropout,
expert_capacity,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
elif isinstance(self.router, routing.MaskedRouter):
outputs = self._mask_and_dispatch_to_experts(
grouped_inputs,
enable_dropout,
expert_capacity,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
else:
raise ValueError(f'Unrecognized router type: {self.router}')
# Return to batched shape.
result = outputs.reshape(
(padded_batch_size, padded_seq_length, *outputs.shape[2:])
)
if (
padded_seq_length - original_seq_length > 0
or padded_batch_size - original_batch_size > 0
):
# Inputs were padded in MoE layer. Slice out non-padding tokens.
result = result[:original_batch_size, :original_seq_length]
result = flax_partitioning.with_sharding_constraint(
result, ('batch', 'length', *self.output_hidden_dims_axes)
)
return result
def _scatter_to_experts(
self,
token_inputs: Array,
enable_dropout: bool,
expert_capacity: int,
**kwargs,
) -> Array:
"""Wraps expert scatter routing and dispatching algorithm.
This algorithm takes the following steps:
(1) Compute expert dispatch indices and combine weights using self.router.
(2) Scatter inputs to experts based on dispatch indices.
(3) Recombine individual expert outputs using combine weights.
Args:
token_inputs: <float>[num_groups, tokens_per_group, hidden_dims] inputs to
send to experts.
enable_dropout: If true, apply jitter noise during routing and dropout
during expert computation.
expert_capacity: Each group will send this many tokens to each expert.
**kwargs: Optional keyword arguments to pass to experts.
Returns:
<float>[num_groups, tokens_per_group, hidden_dims] outputs from experts.
"""
num_groups, tokens_per_group, *hidden_dims = token_inputs.shape
router_indices = self.router(
token_inputs,
self.num_experts,
expert_capacity,
apply_jitter=enable_dropout,
)
num_selected_experts = self.router.num_selected_experts
# We need num_selected_experts copies of inputs for dispatching. This is a
# no-op if num_selected_experts = 1.
token_inputs = jnp.repeat(token_inputs, num_selected_experts, axis=1)
# Mask out inputs that should not be routed.
# Shape: [num_groups, tokens_per_group, num_selected_experts].
successfully_routed = jnp.logical_and(
router_indices.dispatch_indices[..., 0] < self.num_experts,
router_indices.dispatch_indices[..., 1] < expert_capacity,
)
successfully_routed = successfully_routed.reshape((num_groups, -1))
# Shape: [num_groups, tokens_per_group * num_selected_experts, hidden_dims].
masked_inputs = jnp.einsum(
'gt...,gt->gt...',
token_inputs,
successfully_routed,
precision=self.precision,
)
# Combine tokens_per_group and num_selected_experts axes.
flattened_dispatch_indices = router_indices.dispatch_indices.reshape(
num_groups, -1, 2
)
# Scatter masked inputs.
shape = (self.num_experts, expert_capacity, *hidden_dims)
# Note: scatter_nd can be slow under pjit on TPUs, presumably due to
# suboptimal SPMD compilations. On TPUs, the recommendation is to use
# MaskedRouter(s) instead.
# Shape: [num_groups, num_experts, expert_capacity, hidden_dims].
expert_inputs = jax.vmap(
lambda i, x: scatter_utils.scatter_nd(i, x, shape)
)(flattened_dispatch_indices, masked_inputs)
expert_outputs = self._call_experts(expert_inputs, enable_dropout, **kwargs)
# Gather outputs.
# Shape: [num_groups, tokens_per_group * num_selected_experts, hidden_dims].
expert_outputs = jax.vmap(lambda i, x: x[i[:, 0], i[:, 1]])(
flattened_dispatch_indices, expert_outputs
)
# Separate out num_selected_experts dimension.
# Shape: [num_groups, tokens_per_group, num_selected_experts, hidden_dims].
expert_outputs = expert_outputs.reshape(
(num_groups, tokens_per_group, num_selected_experts, *hidden_dims)
)
# Shape: [num_groups, tokens_per_group, num_selected_experts, hidden_dims].
# Weighted sum of the outputs from the different experts.
combined_outputs = jnp.einsum(
'gtk...,gtk->gt...',
expert_outputs,
router_indices.combine_weights,
precision=self.precision,
)
# Gather and sow expert metrics.
successfully_routed = successfully_routed.reshape(
(num_groups, tokens_per_group, num_selected_experts)
)
# Number of tokens that were dispatched to at least one expert.
num_tokens_dispatched_somewhere = jnp.max(
successfully_routed, axis=-1
).sum()
if self.router.ignore_padding_tokens:
# Only count non-padding tokens.
# To identify non-padding tokens, we rely on the fact that padding tokens
# in the inputs have already been zeroed out in the default T5
# architecture. See
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L315
# and
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L603.
num_tokens = jnp.array(
(jnp.sum(jnp.abs(token_inputs), axis=-1) > 0),
dtype=token_inputs.dtype,
).sum()
else:
num_tokens = float(num_groups * tokens_per_group)
fraction_tokens_left_behind = (
1.0 - num_tokens_dispatched_somewhere / num_tokens
)
# Total number of tokens that were dispatched (one token could be dispatched
# to multiple experts).
num_tokens_dispatched = successfully_routed.sum()
total_expert_capacity = self.num_experts * expert_capacity * num_groups
expert_usage = num_tokens_dispatched / total_expert_capacity
# Of the tokens dispatched, how confident was the router in its routing.
router_confidence = (
router_indices.combine_weights.sum() / num_tokens_dispatched
)
self._sow_expert_metrics( # pytype: disable=wrong-arg-types # jax-types
router_indices.auxiliary_loss,
router_indices.router_z_loss,
fraction_tokens_left_behind,
router_confidence,
expert_usage,
)
return combined_outputs
def _mask_and_dispatch_to_experts(
self,
token_inputs: Array,
enable_dropout: bool,
expert_capacity: int,
**kwargs,
) -> Array:
"""Wraps expert masked routing and dispatching algorithm.
This algorithm takes the following steps:
(1) Compute dispatch mask and combine array using self.router.
(2) Dispatch inputs to experts based on dispatch mask.
(3) Recombine individual expert outputs using combine array.
Args:
token_inputs: <float>[num_groups, tokens_per_group, hidden_dims] inputs to
send to experts.
enable_dropout: If true, apply jitter noise during routing and dropout
during expert computation.
expert_capacity: Each group will send this many tokens to each expert.
**kwargs: Optional keyword arguments to pass to experts.
Returns:
<float>[num_groups, tokens_per_group, hidden_dims] outputs from experts.
"""
num_groups, tokens_per_group = token_inputs.shape[:2]
router_mask = self.router(
token_inputs,
self.num_experts,
expert_capacity,
apply_jitter=enable_dropout,
)
# Shape: [num_groups, num_experts, expert_capacity, hidden_dims].
expert_inputs = jnp.einsum(
'gt...,gtec->gec...',
token_inputs,
router_mask.dispatch_mask,
precision=self.precision,
)
expert_outputs = self._call_experts(expert_inputs, enable_dropout, **kwargs)
# Shape: [num_groups, tokens_per_group, hidden_dims]
combined_outputs = jnp.einsum(
'gec...,gtec->gt...',
expert_outputs,
router_mask.combine_array,
precision=self.precision,
)
# Gather and sow expert metrics.
# Number of tokens that were dispatched to at least one expert.
num_tokens_dispatched_somewhere = jnp.max(
router_mask.dispatch_mask, axis=(-1, -2)
).sum()
if self.router.ignore_padding_tokens:
# Only count non-padding tokens.
# To identify non-padding tokens, we rely on the fact that padding tokens
# in the inputs have already been zeroed out in the default T5
# architecture. See
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L315
# and
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L603.
num_tokens = jnp.array(
(jnp.sum(jnp.abs(token_inputs), axis=-1) > 0),
dtype=token_inputs.dtype,
).sum()
else:
num_tokens = float(num_groups * tokens_per_group)
fraction_tokens_left_behind = (
1.0 - num_tokens_dispatched_somewhere / num_tokens
)
# Total number of tokens that were dispatched (one token could be
# dispatched to multiple experts).
num_tokens_dispatched = router_mask.dispatch_mask.sum()
# Of the tokens dispatched, how confident was the router in its routing?
router_confidence = router_mask.combine_array.sum() / num_tokens_dispatched
if isinstance(self.router, routing.ExpertsChooseMaskedRouter):
expert_usage = 1.0 # Experts fully utilized when "expert choose tokens"
else:
total_expert_capacity = self.num_experts * expert_capacity * num_groups
expert_usage = num_tokens_dispatched / total_expert_capacity
self._sow_expert_metrics(
router_mask.auxiliary_loss,
router_mask.router_z_loss,
fraction_tokens_left_behind,
router_confidence,
expert_usage,
)
return combined_outputs
def _call_experts(
self, inputs: Array, enable_dropout: bool, **kwargs
) -> Array:
"""Sends and receives inputs to experts using pjit induced all-to-all calls.
Assumes training is distributed using jax.experimental.pjit and induces
all-to-all calls using reshapes and sharding constraints. We use Flax's
lifted vmap to apply the expert transformation.
Input data is ideally partitioned as:
G_ed ** H_m,
where G (num groups) is partitioned along the e (expert) and d (data)
axes, and H (hidden dims) is partitioned along the m (model) axis. "**"
denotes fully replicated axes. By partitioning H along the model parallel
axis, we avoid duplicate information transfer in the all-to-alls between
devices replicating data.
The entire computation is performed using self.dtype. We recommend a
truncated float type (e.g. bfloat16) to reduce all-to-all communication
overhead.
Args:
inputs: <float>[num_groups, num_experts, expert_capacity, hidden_dims]
inputs to be dispatched to experts. Each slice across the first
dimension is passed to a different expert.
enable_dropout: Whether or not experts should apply dropout.
**kwargs: Optional keyword arguments to pass to experts.
Returns:
<float>[num_groups, num_experts, expert_capacity, hidden_dims] outputs
from expert computation.
"""
num_groups, num_experts, capacity, *hidden_dims = inputs.shape
inputs_dtype = inputs.dtype
inputs = jax.lax.convert_element_type(inputs, self.dtype)
# Send examples to their target devices.
# Note that the ordering of the logical mesh axes in these sharding
# constraints should map consistently to the same underlying mesh axes; i.e.
# 'batch' --> ('expert', 'data') and
# ('expert', 'expert_replica') --> ('expert', 'data').
inputs = flax_partitioning.with_sharding_constraint(
inputs, ('batch', 'unmodeled', 'length', *self.input_hidden_dims_axes)
)
if self.num_expert_partitions != num_experts:
# Explicitly extract dimension of size self.num_expert_partitions, along
# which to partition experts.
inputs = inputs.reshape(
self.num_expert_partitions,
num_groups // num_experts,
num_experts // self.num_expert_partitions,
num_experts,
capacity,
*hidden_dims,
)
inputs = jnp.swapaxes(inputs, 1, 2)
# Induce all-to-alls:
# E_ed ** H_m --> E_ed ** H_m,
# where E is the number of experts and H is the hidden dimension. e, d, and
# m denote the expert, data and model axes, respectively.
inputs = inputs.reshape(
num_experts,
num_groups // num_experts,
num_experts,
capacity,
*hidden_dims,
)
inputs = flax_partitioning.with_sharding_constraint(
inputs,
(
'expert',
'expert_replicas',
'unmodeled',
'length',
*self.input_hidden_dims_axes,
),
)
inputs = jnp.swapaxes(inputs, 0, 2)
inputs = flax_partitioning.with_sharding_constraint(
inputs,
(
'expert',
'expert_replicas',
'unmodeled',
'length',
*self.input_hidden_dims_axes,
),
)
inputs = inputs.reshape(num_experts, num_groups * capacity, *hidden_dims)
# Perform all-gather here along hidden dimnension (H) axis:
# E_ed ** H_m --> E_ed ** H.
inputs = flax_partitioning.with_sharding_constraint(
inputs, ('expert', 'expert_replicas', 'unmodeled')
)
# Apply expert transformation.
# Vectorize over the 'expert' axis of `inputs`. We use Flax's Lifted vmap
# to introduce parameters along the mapped `expert` axis.
# The vmapped MLP operation essentially performs:
# E_ed ** H --> E_ed ** F_m --> E_ed ** H,
# where F is the feed-forward dimension.
@functools.partial(
flax_partitioning.vmap_with_axes,
in_axes=(0,),
variable_axes={'params': 0}, # Each expert has its own parameters
# Any mapped sharding constraints should be applied along 'expert' axis.
spmd_axis_name='expert',
split_rngs={
# Whether to initialize each expert's params independently.
'params': self.split_params,
'dropout': True, # Always use different dropout key for each expert
},
partitioning_axis_names={'params': 'expert'},
)
def layer_fn(mapped_expert: nn.Module, expert_inputs: Array) -> Array:
return self._filter_inputs(
mapped_expert, expert_inputs, enable_dropout=enable_dropout, **kwargs
)
outputs = layer_fn(self.expert, inputs)
# Send examples back to their original devices.
output_dims = outputs.shape[2:]
outputs = outputs.reshape(num_experts, num_groups, capacity, *output_dims)
# Reshard over along hidden dimension (H) axis:
# E_ed ** H --> E_ed ** H_m,
# before performing all-to-alls.
outputs = flax_partitioning.with_sharding_constraint(
outputs,
('expert', 'expert_replicas', 'length', *self.output_hidden_dims_axes),
)
outputs = outputs.reshape(
num_experts,
num_groups // num_experts,
num_experts,
capacity,
*output_dims,
)
outputs = flax_partitioning.with_sharding_constraint(
outputs,
(
'expert',
'expert_replicas',
'unmodeled',
'length',
*self.output_hidden_dims_axes,
),
)
outputs = jnp.swapaxes(outputs, 0, 2)
outputs = flax_partitioning.with_sharding_constraint(
outputs,
(
'expert',
'expert_replicas',
'unmodeled',
'length',
*self.output_hidden_dims_axes,
),
)
if self.num_expert_partitions != num_experts:
# Explicitly extract dimension of size self.num_expert_partitions, along
# which to partition experts.
outputs = outputs.reshape(
self.num_expert_partitions,
num_experts // self.num_expert_partitions,
num_groups // num_experts,
num_experts,
capacity,
*output_dims,
)
outputs = jnp.swapaxes(outputs, 1, 2)
outputs = outputs.reshape(num_groups, num_experts, capacity, *output_dims)
outputs = flax_partitioning.with_sharding_constraint(
outputs, ('batch', 'unmodeled', 'length', *self.output_hidden_dims_axes)
)
return jax.lax.convert_element_type(outputs, inputs_dtype)
def _filter_inputs(
self,
mapped_expert: nn.Module,
expert_inputs: Array,
enable_dropout: bool = True,
**kwargs,
) -> Array:
"""Forwards relevant inputs to `mapped_expert`.
We support MLP (dense.MlpBlock) and regular dense layers
(dense.DenseGeneral).
Args:
mapped_expert: Expert function that is vmapped.
expert_inputs: Prepared inputs that are mapped over. Shape:
<float>[num_experts, num_groups // num_experts, num_experts, capacity,
hidden_dims]
enable_dropout: Enables dropout if set to True. Only use for MLP experts.
**kwargs: Optional keyword arguments to pass to experts. Only passed to
MLP experts.
Returns:
Outputs from expert computation.
Raises:
ValueError for unsupported expert classes.
"""
# TODO: Cleaner way of handling different expert call APIs?
if isinstance(self.expert, dense.DenseGeneral):
return mapped_expert(expert_inputs)
elif isinstance(self.expert, dense.MlpBlock):
return mapped_expert(
expert_inputs, enable_dropout=enable_dropout, **kwargs
)
else:
raise ValueError(f'Unsupported expert class: {self.expert}.')
def _sow_expert_metrics(
self,
auxiliary_loss: float,
router_z_loss: float,
fraction_tokens_left_behind: float,
router_confidence: float,
expert_usage: float,
) -> None:
"""Sows metrics to analyze expert routing.
Args:
auxiliary_loss: Load balancing loss.
router_z_loss: Loss to encourage smaller router logits.
fraction_tokens_left_behind: Fraction of tokens NOT routed to any expert.
router_confidence: Normalized sum of combine weights of those tokens which
were routed to experts.
expert_usage: Fraction of total expert capacity used to process tokens.
NOTE: We wrap scalar metric values in into a 2D array to play nicely with
the Flaxformer T5 architecture's scan invocation; see
https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L742
and
https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L973.
"""
for metric, value in [
('auxiliary_loss', auxiliary_loss),
('router_z_loss', router_z_loss),
('fraction_tokens_left_behind', fraction_tokens_left_behind),
('router_confidence', router_confidence),
('expert_usage', expert_usage),
]:
wrapped_metric_value = jnp.asarray(value).reshape((1, 1))
self.sow('intermediates', metric, wrapped_metric_value)
def _num_groups(
num_tokens: int,
max_group_size: int,
num_experts: int,
num_expert_replicas: int,
strict_group_size: bool = False,
) -> int:
"""Returns the number of token routing groups.
Note: For pjit-based training, all quantities are global.
We select the smallest num_groups such that:
- num_groups >= num_tokens / max_group_size (ensuring the group size is no
larger than max_group_size),
- num_tokens % num_groups = 0 (ensuring that the group size evenly divides
into the num_tokens),
- num_groups % (num_expert_replicas * num_experts) = 0 (ensuring that number
of groups can be split across the total number of experts).
Args:
num_tokens: Number of tokens from input batch.
max_group_size: Maximum size of each token routing group. Actual group size
may end up being smaller.
num_experts: Total number of unique experts.
num_expert_replicas: Number of copies of each expert.
strict_group_size: If True, fail if unable to set the token group size equal
to max_group_size.
Returns:
Number of token routing groups.
Raises:
ValueError if we cannot find a group_size satisfying the above requirements.
"""
# For pjit-based partitioning, we manipulated arrays globally. The number of
# experts must evenly divide the number of (global) groups.
min_num_groups = num_tokens // max_group_size
min_num_groups = max(min_num_groups, num_expert_replicas * num_experts)
def viable(n):
"""Returns true iff n is a viable number of groups."""
return num_tokens % n == 0 and n % (num_expert_replicas * num_experts) == 0
# Increase the number of groups (and decrease the group size) until we have
# a viable number of groups.
num_groups = min_num_groups
while num_groups < num_tokens and not viable(num_groups):
num_groups += 1
if num_tokens % num_groups > 0:
raise ValueError(
'Group size and the number of experts must divide evenly into the '
f'global number of tokens, but num_tokens={num_tokens}, while '
f'num_groups={num_groups} for max_group_size={max_group_size} '
f'and num_experts={num_experts}, each with {num_expert_replicas} '
'replicas. Consider increasing the number of tokens (by increasing the '
'batch size, sequence length, or beam size), and/or decreasing the '
'number of expert copies (by increasing the expert parallelism or '
'decreasing the number of experts).'
)
group_size = num_tokens // num_groups
logging.info(
(
'Selected group_size=%d and num_groups=%d for input num_tokens=%d, '
'max_group_size=%d, num_experts=%d and num_expert_replicas=%d'
),
group_size,
num_groups,
num_tokens,
max_group_size,
num_experts,
num_expert_replicas,
)
if strict_group_size and group_size != max_group_size:
raise ValueError(
f'Selected group_size={group_size} is less than the '
f'max_group_size={max_group_size}. Exiting because strict mode is '
'active (strict_group_size=True)'
)
return num_groups
def _num_expert_replicas(
num_expert_partitions: int, num_model_partitions: int
) -> int:
"""Infer the number of expert replicas.
This computation assumes that the underlying mesh has shape ['data', 'expert',
'model']. We assume that experts are replicated along the 'data' axis, whose
dimension is inversely proportional to the size of the expert and model
parallel submeshes.
Args:
num_expert_partitions: Size of expert parallel submesh.
num_model_partitions: Size of model parallel submesh.
Returns:
Number of replicas per expert.
"""
return max(
1, jax.device_count() // (num_expert_partitions * num_model_partitions)
)
def _maybe_pad(
inputs: Array, num_experts: int, num_expert_replicas: int
) -> Array:
"""Pads input array if number of tokens < number of experts.
This function pads the input sequence to ensure that the number of tokens is
divisble by the total number of experts.
Args:
inputs: Batch of input embeddings of shape <float>[batch_size, seq_length,
hidden_dims].
num_experts: Number of unique experts.
num_expert_replicas: Number of copies of each expert.
Returns:
Input embeddings of shape <float>[batch_size + min_batch_padding, seq_length
+ min_seq_padding, hidden_dims]. Only one of min_batch_padding or
min_seq_padding can be nonzero; both will be zero if no padding is required.
"""
batch_size, seq_length, *_ = inputs.shape
num_tokens = batch_size * seq_length
total_num_experts = num_expert_replicas * num_experts
if num_tokens % total_num_experts != 0:
# Let's see how much padding is required if we pad the batch dimension.
min_batch_padding = 1
num_padding_tokens = seq_length
while (num_tokens + num_padding_tokens) % total_num_experts != 0:
# This loop will always yield
# num_padding_tokens <= abs(total_num_experts * seq_length - num_tokens)
# or, equivalently,
# min_batch_padding <= abs(total_num_experts - batch_size).
min_batch_padding += 1
num_padding_tokens += seq_length
# Alternatively, we could pad along the sequence dimension.
min_seq_padding = 1
num_padding_tokens = batch_size
while (num_tokens + num_padding_tokens) % total_num_experts != 0:
# This loop will always yield
# num_padding_tokens <= abs(total_num_experts * batch_size - num_tokens)
# or, equivalently,
# min_seq_padding <= abs(total_num_experts - seq_length).
min_seq_padding += 1
num_padding_tokens += batch_size
# Use the dimension which requires the least padding.
if min_seq_padding * batch_size > min_batch_padding * seq_length:
min_seq_padding = 0
else:
min_batch_padding = 0
# TODO: Rather than relying on one of the dimensions, we
# should select the minimal amount of padding along a mixture of both of
# the sequence and batch dimensions.
result = jnp.pad(
inputs,
((0, min_batch_padding), (0, min_seq_padding), (0, 0)),
'constant',
constant_values=0,
)
logging.warning(
(
'Efficiency warning: Batch size / sequence length temporarily'
' padded by %d tokens in MoE layer to ensure that the total number'
' of tokens is divisible by the total number of experts (%d). For'
' improved efficiency, consider increasing the number of tokens (by'
' increasing the batch size or beam size), and/or decreasing the'
' number of expert copies (by increasing the expert parallelism or'
' decreasing the number of experts).'
),
min_batch_padding * seq_length + min_seq_padding * batch_size,
total_num_experts,
)
return result
else:
return inputs
| 34,130 | 36.839246 | 110 | py |
flaxformer | flaxformer-main/flaxformer/architectures/moe/routing.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Experts routing mechanisms."""
from typing import Any, Iterable, Optional, Sequence, Tuple, Union
import flax
from flax import linen as nn
from flax.linen import partitioning as flax_partitioning
import jax
import jax.numpy as jnp
from flaxformer.components import dense
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
RouterOutput = Any
# Switch Transformer (https://arxiv.org/abs/2101.03961) suggests using
# nn.initializers.variance_scaling(0.1, "fan_in", "truncated_normal")
# scaling throughout MoE models, but we find slightly better results adopting
# typical normally-distributed scaling for the router specifically.
default_kernel_init = nn.initializers.normal(stddev=2e-2)
default_bias_init = nn.initializers.zeros
@flax.struct.dataclass
class RouterIndices:
"""Dispatch indices and combine weights for scatter/gather-based routing.
Attributes:
dispatch_indices: <int32>[num_groups, tokens_per_group,
num_selected_experts, 2] dispatch indices indicating, for each token, its
preferred expert and its priority in that expert's buffer.
combine_weights: <float>[num_groups, tokens_per_group, num_selected_experts]
combine weights used for scaling expert outputs with the router's dispatch
probability/confidence.
auxiliary_loss: Load balancing loss for router.
router_z_loss: Router z-loss. Encourages router logits to remain small in an
effort to improve stability.
"""
dispatch_indices: Array
combine_weights: Array
auxiliary_loss: float
router_z_loss: float = 0.
@flax.struct.dataclass
class RouterMask:
"""Dispatch and combine arrays for expert routing with masked matmuls.
Attributes:
dispatch_mask: <bool>[num_groups, tokens_per_group, num_experts,
expert_capacity] dispatch array that is 1 if the token gets routed to the
corresponding expert, and 0 otherwise.
combine_array: <float>[num_groups, tokens_per_group, num_experts,
expert_capacity] combine array used for combining expert outputs and
scaling with router probability.
auxiliary_loss: Load balancing loss for router.
router_z_loss: Router z-loss. Encourages router logits to remain small in an
effort to improve stability.
"""
dispatch_mask: Array
combine_array: Array
auxiliary_loss: float
router_z_loss: float = 0.
def _favor_one_hot_slices() -> bool:
"""Returns true iff running on TPUs."""
return jax.default_backend() == 'tpu' or jax.devices()[0].platform == 'tpu'
def _take_along_axis(array: Array, indices: Array, axis: int) -> Array:
"""Takes values from the input array by matching 1D index and data slices.
This function serves the same purpose as jax.numpy.take_along_axis, except
that it uses one-hot matrix multiplications under the hood on TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select elements from the
array; this is particularly helpful for avoiding erroneous all-gather ops
when running under pjit.
(2) Otherwise, we fall back to jax.numpy.take_along_axis.
Notes:
- To simplify matters in case (1), we only support slices along the second
or last dimensions.
- We may wish to revisit (1) for very large arrays.
Args:
array: Source array.
indices: Indices to take along each 1D slice of array.
axis: Axis along which to take 1D slices.
Returns:
The indexed result.
"""
if array.ndim != indices.ndim:
raise ValueError(
'indices and array must have the same number of dimensions; '
f'{indices.ndim} vs. {array.ndim}.')
if (axis != -1 and axis != array.ndim - 1 and # Not last dimension
axis != 1 and axis != -array.ndim + 1): # Not second dimension
raise ValueError(
'Only slices along the second or last dimension are supported; '
f'array.ndim = {array.ndim}, while axis = {axis}.')
if _favor_one_hot_slices():
one_hot_length = array.shape[axis]
one_hot_indices = jax.nn.one_hot(indices, one_hot_length, axis=axis)
if axis == -1 or array.ndim == 1:
# Take i elements from last dimension (s).
# We must use HIGHEST precision to accurately reproduce indexing
# operations with matrix multiplications.
result = jnp.einsum(
'...s,...is->...i',
array,
one_hot_indices,
precision=jax.lax.Precision.HIGHEST)
else:
# Take i elements from second dimension (s). We assume here that we always
# want to slice along the second dimension.
# We must use HIGHEST precision to accurately reproduce indexing
# operations with matrix multiplications.
result = jnp.einsum(
'ns...,nis...->ni...',
array,
one_hot_indices,
precision=jax.lax.Precision.HIGHEST)
return jax.lax.convert_element_type(result, array.dtype)
else:
return jnp.take_along_axis(array, indices, axis=axis)
def _top_k(array: Array, k: int) -> Tuple[Array, Array]:
"""Returns top k values and their indices along the last axis of the array.
This function serves the same purpose as jax.lax.top_k, but in a more XLA
friendly manner for TPUs:
(1) On TPUs, we use one-hot matrix multiplications to select the top k values.
This convoluted way of obtaining the top k values is generally faster on
TPUs, and, for pjit in particular, avoids adding extra all-gather ops
during backpropagation.
(2) Otherwise, we fall back to jax.lax.top_k (and its underlying scatter op).
Args:
array: Source array.
k: Number of top values to select.
Returns:
- Top k values
- Associated top k indices.
"""
if _favor_one_hot_slices():
top_k_indices = jax.lax.top_k(array, k)[-1]
top_k_values = _take_along_axis(array, top_k_indices, axis=-1)
return top_k_values, top_k_indices
else:
return jax.lax.top_k(array, k)
class RouterWeights(nn.Module):
"""Router module converting token inputs to router logits.
Attributes:
use_bias: Whether or not to use the bias term in computing the logits.
dtype: Numerical float type for router logit computation.
kernel_init: Initialization scheme for kernel.
bias_init: Initialization scheme for bias.
precision: XLA precision for array computations.
axis: Axes along which to apply the dense router weights transformation.
Defaults to final axis (typically the "hidden dimension").
kernel_axis_names: Logical axis names to use for kernel sharding.
reshape_kernel: Whether to reshape the kernel parameter to 2D for Adafactor.
"""
use_bias: bool = True
dtype: DType = jnp.bfloat16
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
bias_init: Initializer = default_bias_init
precision: jax.lax.Precision = jax.lax.Precision.DEFAULT
axis: Union[Iterable[int], int] = -1
kernel_axis_names: Sequence[str] = ('embed', 'unmodeled')
reshape_kernel: bool = True
@nn.compact
def __call__(self, token_inputs: Array, num_experts: int) -> Array:
"""Applies RouterWeights module.
Args:
token_inputs: Flattened batch of tokens with shape <float>[num_groups,
group_size, hidden_dim].
num_experts: Number of experts.
Returns:
Router logits with shape <float>[num_groups, group_size, num_experts].
"""
return dense.DenseGeneral(
features=num_experts,
axis=self.axis,
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
precision=self.precision,
kernel_axis_names=self.kernel_axis_names,
reshape_kernel=self.reshape_kernel,
name='w')(
token_inputs)
class Router(nn.Module):
"""Abstract base router class, defining router API and inner workings.
Attributes:
router_weights: Configurable module used to compute router logits from token
inputs.
jitter_noise: Amplitude of jitter noise applied to router logits.
dtype: Numeric float type for returned combine array. All actual
computations are performed in float32 of the input for stability.
ignore_padding_tokens: Whether to ignore padding tokens during routing. Note
that some routers (e.g. TokensChooseMaskedRouter) will completely ignore
padding tokens, while others (e.g. TokensChooseScatterRouter and
ExpertsChooseMaskedRouter) will simply down-weight the probability of
selecting padding tokens.
"""
router_weights: RouterWeights
jitter_noise: float
dtype: jnp.dtype
ignore_padding_tokens: bool
def __call__(self,
token_inputs: Array,
num_experts: int,
expert_capacity: int,
apply_jitter: bool = True) -> RouterOutput:
"""Computes dispatch and combine arrays for routing to experts.
Args:
token_inputs: <float>[num_groups, tokens_per_group, hidden_dim] inputs to
send to experts.
num_experts: Number of experts.
expert_capacity: Each group will send this many tokens to each expert.
apply_jitter: If true, apply jitter noise during routing.
Returns:
Router indices or mask arrays (depending on router type).
"""
token_inputs = flax_partitioning.with_sharding_constraint(
token_inputs, ('batch', 'length', 'embed'))
router_probs, router_logits = self._compute_router_probabilities(
token_inputs, num_experts, apply_jitter)
router_probs = flax_partitioning.with_sharding_constraint(
router_probs, ('batch', 'length', 'unmodeled'))
router_logits = flax_partitioning.with_sharding_constraint(
router_logits, ('batch', 'length', 'unmodeled'))
if self.ignore_padding_tokens:
# To identify non-padding tokens, we rely on the fact that padding tokens
# in the inputs have already been masked in the default T5 architecture.
# See
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L315
# and
# https://github.com/google/flaxformer/blob/9712a16/flaxformer/architectures/t5/t5_architecture.py#L603.
padding_mask = jnp.array((jnp.sum(jnp.abs(token_inputs), axis=-1) > 0),
dtype=token_inputs.dtype)
router_logits *= jnp.expand_dims(padding_mask, axis=-1)
else:
padding_mask = None
instructions = self._compute_routing_instructions(router_probs,
padding_mask,
expert_capacity)
return instructions.replace(router_z_loss=_router_z_loss(router_logits))
def _compute_router_probabilities(self, token_inputs: Array, num_experts: int,
apply_jitter: bool) -> Tuple[Array, Array]:
"""Computes router probabilities from input tokens.
Args:
token_inputs: <float>[num_groups, tokens_per_group, hidden_dim] from which
router probabilities are computed.
num_experts: Number of experts.
apply_jitter: If true, apply jitter noise.
Returns:
- <float32>[num_groups, tokens_per_group, num_experts] probabilities for
each token and expert. Used for routing tokens to experts.
- <float>[num_groups, tokens_per_group, num_experts] raw router logits.
Used for computing router z-loss.
"""
# For remainder of routing computation we use float32 to ensure stability.
# See the discussion of "selective precision" in
# https://arxiv.org/abs/2101.03961.
token_inputs = jax.lax.convert_element_type(token_inputs, jnp.float32)
if apply_jitter and self.jitter_noise > 0:
token_inputs *= jax.random.uniform(
self.make_rng('jitter'),
token_inputs.shape,
token_inputs.dtype,
minval=1.0 - self.jitter_noise,
maxval=1.0 + self.jitter_noise)
# Shape: [num_groups, tokens_per_group, num_experts]
router_logits = self.router_weights(token_inputs, num_experts)
router_probabilities = jax.nn.softmax(router_logits, axis=-1)
return router_probabilities, router_logits
def _compute_routing_instructions(self, router_probs: Array,
padding_mask: Optional[Array],
expert_capacity: int) -> RouterOutput:
"""Computes instructions for routing inputs to experts."""
raise NotImplementedError(
'Router is an abstract class that should be subclassed.')
class ScatterRouter(Router):
"""Abstract base router class for scatter dispatch routers.
ScatterRouter(s) return RouterIndices containing dispatch indices and combine
weights for sending token inputs (via scatter) and receiving outputs (via
gather) to and from experts.
Scatter-based routing is generally faster than masked matmul routing on CPUs
and GPUs.
"""
def _compute_routing_instructions(self, router_probs: Array,
padding_mask: Optional[Array],
expert_capacity: int) -> RouterIndices:
"""Computes instructions for routing inputs to experts.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
padding_mask: <float32>[num_groups, tokens_per_group] padding logit mask
used to identify padding tokens that should be ignored by the router.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Router indices containing dispatch indices and combine weights.
"""
raise NotImplementedError(
'ScatterRouter is an abstract class that should be subclassed.')
class MaskedRouter(Router):
"""Abstract base router class for masked matmul dispatch routers.
MaskedRouter(s) return RouterMask(s) containing a dispatch mask and combine
array for sending and receiving (via masked matmuls) inputs and outputs to and
from experts.
Routing using masked matmuls is generally faster than scatter-based routing on
TPUs.
"""
def _compute_routing_instructions(self, router_probs: Array,
padding_mask: Optional[Array],
expert_capacity: int) -> RouterMask:
"""Computes masks for the top-k experts per token.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
padding_mask: <float32>[num_groups, tokens_per_group] padding logit mask
used to identify padding tokens that should be ignored by the router.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Router mask arrays.
"""
raise NotImplementedError(
'MaskedRouter is an abstract class that should be subclassed.')
class TokensChooseScatterRouter(ScatterRouter):
"""Scatter router using tokens choose top-k experts assignment.
This router uses the same mechanism as in Switch Transformer
(https://arxiv.org/abs/2101.03961) and V-MoE
(https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are
sorted by router_probs and then routed to their choice of expert until the
expert's expert_capacity is reached. There is no guarantee that each token is
processed by an expert, or that each expert receives at least one token.
Attributes:
num_selected_experts: Maximum number of experts to which each token is
routed. Tokens may be routed to fewer experts if particular experts are
oversubscribed / reach capacity.
batch_prioritized_routing: Whether or not to use Batch Prioritized Routing
(BPR), originally introduced in V-MoE (https://arxiv.org/abs/2106.05974).
With BPR, we prioritize routing those top-k tokens with the highest
router probability, rather than simply using each tokens left-to-right
ordering in the batch. This prioritization is important because the
expert's have limited capacity.
"""
num_selected_experts: int
batch_prioritized_routing: bool
def _compute_routing_instructions(self, router_probs: Array,
padding_mask: Optional[Array],
expert_capacity: int) -> RouterIndices:
"""Computes dispatch indices and combine weights for the top-k experts.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
padding_mask: <float32>[num_groups, tokens_per_group] padding logit mask
used to identify padding tokens that should be down-weighted by the
router.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Dispatch indices and combine weights for scatter/gather-based routing.
"""
num_groups, tokens_per_group, num_experts = router_probs.shape
if padding_mask is not None:
# Because `expert_indices` are directly used for scatter-based routing, we
# mask probabilities corresponding to tokens before the top-k operation.
# Note that, unlike for mask-based tokens-choose routing, the
# (down-weighted) padding tokens may still be selected.
router_probs *= jnp.expand_dims(padding_mask, axis=-1)
# Top-k router probability and corresponding expert indices for each token.
# Shape: [num_groups, tokens_per_group, num_selected_experts].
combine_weights, expert_indices = _top_k(
router_probs, k=self.num_selected_experts)
auxiliary_loss = _load_balancing_loss(router_probs, expert_indices)
if self.batch_prioritized_routing:
# Sort tokens according to their routing probability per token group, so
# that the highest probability tokens are routed first.
token_ordering = jnp.argsort(-combine_weights[..., 0], axis=-1)
expert_indices = _take_along_axis(
expert_indices, jnp.expand_dims(token_ordering, axis=-1), axis=-2)
# Identify each token's preferred expert.
# Make num_selected_experts the leading axis to ensure that top-1 choices
# have priority over top-2 choices, which have priority over top-3
# choices...
preferred_experts = jnp.swapaxes(expert_indices, 1, 2)
# Shape: [num_groups, num_selected_experts * tokens_per_group]
preferred_experts = preferred_experts.reshape(num_groups, -1)
# Shape: [num_groups, tokens_per_group * num_selected_experts, num_experts].
expert_mask = jax.nn.one_hot(
preferred_experts, num_experts, dtype=jnp.int32)
# Experts have a fixed capacity that we cannot exceed. A token's priority
# within the expert's buffer is given by the masked, cumulative capacity of
# its target expert.
# Shape: [num_groups, tokens_per_group * num_selected_experts, num_experts].
token_priority = jnp.cumsum(expert_mask, axis=1) * expert_mask - 1.0
# Shape: [num_groups, num_selected_experts, tokens_per_group, num_experts].
token_priority = token_priority.reshape(
(num_groups, self.num_selected_experts, -1, num_experts))
# Shape: [num_groups, tokens_per_group, num_selected_experts, num_experts].
token_priority = jnp.swapaxes(token_priority, 1, 2)
# For each token, across all experts, select the only non-negative
# (unmasked) priority. Shape: [num_groups, tokens_per_group,
# num_selected_experts].
token_priority = jnp.max(token_priority, axis=-1)
# Return to original index shape.
preferred_experts = preferred_experts.reshape(num_groups,
self.num_selected_experts,
tokens_per_group)
# Shape: [num_groups, tokens_per_group, num_selected_experts]
preferred_experts = jnp.swapaxes(preferred_experts, 1, 2)
if self.batch_prioritized_routing:
# Place tokens in their original ordering.
inverse_token_ordering = jnp.argsort(token_ordering, axis=-1)
preferred_experts = _take_along_axis(
preferred_experts,
jnp.expand_dims(inverse_token_ordering, axis=-1),
axis=-2)
token_priority = _take_along_axis(
token_priority,
jnp.expand_dims(inverse_token_ordering, axis=-1),
axis=-2)
# Mask out tokens that overflow the maximum expert capacities.
# Shape: [num_groups, tokens_per_group, num_selected_experts].
combine_weights *= token_priority < expert_capacity
# Expert index and priority within the expert capacity buffer.
# Shape: [num_groups, tokens_per_group, num_selected_experts, 2].
dispatch_indices = jnp.stack([preferred_experts, token_priority], axis=-1)
# Return to default dtype now that router computation is complete.
combine_weights = jax.lax.convert_element_type(combine_weights, self.dtype)
dispatch_indices = jax.lax.convert_element_type(dispatch_indices, jnp.int32)
return RouterIndices(dispatch_indices, combine_weights, auxiliary_loss)
class TokensChooseMaskedRouter(MaskedRouter):
"""Masked matmul router using tokens choose top-k experts assignment.
This router uses the same mechanism as in Switch Transformer
(https://arxiv.org/abs/2101.03961) and V-MoE
(https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are
sorted by router_probs and then routed to their choice of expert until the
expert's expert_capacity is reached. There is no guarantee that each token is
processed by an expert, or that each expert receives at least one token.
Attributes:
num_selected_experts: Maximum number of experts to which each token is
routed. Tokens may be routed to fewer experts if particular experts are
oversubscribed / reach capacity.
batch_prioritized_routing: Whether or not to use Batch Prioritized Routing
(BPR), originally introduced in V-MoE (https://arxiv.org/abs/2106.05974).
With BPR, we prioritize routing those top-k tokens with the highest
router probability, rather than simply using each tokens left-to-right
ordering in the batch. This prioritization is important because the
experts have limited capacity.
"""
num_selected_experts: int
batch_prioritized_routing: bool
def _compute_routing_instructions(self, router_probs: Array,
padding_mask: Optional[Array],
expert_capacity: int) -> RouterMask:
"""Computes masks for the top-k experts per token.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
padding_mask: <float32>[num_groups, tokens_per_group] padding logit mask
used to identify padding tokens that should be ignored by the router.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Dispatch and combine arrays for routing with masked matmuls.
"""
num_groups, _, num_experts = router_probs.shape
# Top-k router probability and corresponding expert indices for each token.
# Shape: [num_groups, tokens_per_group, num_selected_experts].
expert_gate, expert_index = _top_k(
router_probs, k=self.num_selected_experts)
if padding_mask is not None:
# Mask applied to gate. Exclude choices corresponding to padding tokens.
gate_mask = jnp.expand_dims(padding_mask, axis=-1)
expert_gate *= gate_mask
# Set `expert_index` elements corresponding to padding to negative
# numbers. Negative `expert_index` elements will ultimately be dropped in
# the one_hot conversion to the `expert_mask`.
# First convert nonzero padding elements to negative values.
expert_index *= 2 * gate_mask - 1.
# Handle zero padding elements by negatively shifting all padding.
expert_index += jnp.repeat(
gate_mask - 1., self.num_selected_experts, axis=-1)
# To correctly compute load balancing loss, we also mask out probs.
router_probs *= gate_mask
auxiliary_loss = _load_balancing_loss(router_probs, expert_index)
if self.batch_prioritized_routing:
# Sort tokens according to their routing probability per group, so that
# the highest probability tokens are routed first.
permutation = jnp.argsort(-expert_gate[..., 0], axis=-1)
# Shape: [num_groups, tokens_per_group, num_selected_experts]
expert_index = _take_along_axis(
expert_index, jnp.expand_dims(permutation, axis=-1), axis=-2)
# Make num_selected_experts the leading axis to ensure that top-1 choices
# have priority over top-2 choices, which have priority over top-3 choices,
# etc.
expert_index = jnp.swapaxes(expert_index, 1, 2)
# Shape: [num_groups, num_selected_experts * tokens_per_group]
expert_index = expert_index.reshape(num_groups, -1)
# Create mask out of indices.
# Shape: [num_groups, tokens_per_group * num_selected_experts, num_experts].
expert_mask = jax.nn.one_hot(expert_index, num_experts, dtype=jnp.int32)
# Experts have a fixed capacity that we cannot exceed. A token's priority
# within the expert's buffer is given by the masked, cumulative capacity of
# its target expert.
# Shape: [num_groups, tokens_per_group * num_selected_experts, num_experts].
token_priority = jnp.cumsum(expert_mask, axis=1) * expert_mask - 1.0
# Shape: [num_groups, num_selected_experts, tokens_per_group, num_experts].
token_priority = token_priority.reshape(
(num_groups, self.num_selected_experts, -1, num_experts))
# Shape: [num_groups, tokens_per_group, num_selected_experts, num_experts].
token_priority = jnp.swapaxes(token_priority, 1, 2)
# For each token, across all selected experts, select the only non-negative
# (unmasked) priority. Now, for group G routing to expert E, token T has
# non-negative priority (i.e. token_priority[G,T,E] >= 0) if and only if E
# is its targeted expert.
# Shape: [num_groups, tokens_per_group, num_experts].
token_priority = jnp.max(token_priority, axis=2)
if self.batch_prioritized_routing:
# Place token priorities in original ordering of tokens.
inv_permutation = jnp.argsort(permutation, axis=-1)
token_priority = _take_along_axis(
token_priority, jnp.expand_dims(inv_permutation, axis=-1), axis=-2)
# Token T can only be routed to expert E if its priority is positive and
# less than the expert capacity. One-hot matrix will ignore indices outside
# the range [0, expert_capacity).
# Shape: [num_groups, tokens_per_group, num_experts, expert_capacity].
dispatch_mask = jax.nn.one_hot(
token_priority, expert_capacity, dtype=jnp.bool_)
# The combine array will be used for combining expert outputs, scaled by the
# router probabilities. Shape: [num_groups, tokens_per_group, num_experts,
# expert_capacity].
combine_array = jnp.einsum(
'...te,...tec->...tec',
router_probs,
dispatch_mask,
precision=jax.lax.Precision.DEFAULT)
# Return to default dtype now that router computation is complete.
combine_array = jax.lax.convert_element_type(combine_array, self.dtype)
return RouterMask(dispatch_mask, combine_array, auxiliary_loss)
class ExpertsChooseMaskedRouter(MaskedRouter):
"""Masked matmul router using experts choose tokens assignment.
This router uses the same mechanism as in Mixture-of-Experts with Expert
Choice (https://arxiv.org/abs/2202.09368): each expert selects its top
expert_capacity tokens. An individual token may be processed by multiple
experts or none at all.
Note: "experts choose routing" should not be used in decoder blocks because it
breaks the autoregressive behavior -- the model will learn to cheat by using
future token information to improve current token predictions.
"""
def _compute_routing_instructions(self, router_probs: Array,
padding_mask: Optional[Array],
expert_capacity: int) -> RouterMask:
"""Computes masks for the highest probability token per expert.
Args:
router_probs: <float32>[num_groups, tokens_per_group, num_experts]
probabilities used to determine the routing of tokens to the experts.
padding_mask: <float32>[num_groups, tokens_per_group] padding logit mask
used to identify padding tokens that should be down-weighted by the
router.
expert_capacity: Each group will send this many tokens to each expert.
Returns:
Dispatch and combine arrays for routing with masked matmuls.
"""
tokens_per_group = router_probs.shape[1]
if padding_mask is not None:
# Because experts choose tokens, we mask probabilities corresponding to
# tokens before the top-k operation. Note that, unlike for masked-based
# tokens-choose routing, the experts here may still choose to select the
# (down-weighted) padding tokens.
router_probs *= jnp.expand_dims(padding_mask, axis=-1)
# vmap over group dimension.
router_probs_t = jax.vmap(lambda m: m.transpose())(router_probs)
# Top expert_capacity router probability and corresponding token indices for
# each expert. Shapes: [num_groups, num_experts, expert_capacity].
expert_gate, expert_index = _top_k(router_probs_t, k=expert_capacity)
# Convert to one-hot mask of expert indices for each token in each group.
# Shape: [num_groups, num_experts, expert_capacity, tokens_per_group].
dispatch_mask = jax.nn.one_hot(
expert_index, tokens_per_group, dtype=jnp.int32)
# Move axes to conform with shape expected by MoeLayer API.
# Shape: [num_groups, tokens_per_group, num_experts, expert_capacity]
dispatch_mask = jnp.moveaxis(dispatch_mask, 3, 1)
# The combine array will be used for combining expert outputs, scaled by the
# router probabilities. Shape: [num_groups, num_experts, tokens_per_group,
# expert_capacity].
combine_array = jnp.einsum(
'...ec,...tec->...tec',
expert_gate,
dispatch_mask,
precision=jax.lax.Precision.DEFAULT)
# Return to default dtype now that router computation is complete.
combine_array = jax.lax.convert_element_type(combine_array, self.dtype)
# Each expert is choosing tokens until it reaches full capacity, so we don't
# need an auxiliary loading balancing loss for expert choice routing.
auxiliary_loss = 0.0
return RouterMask(dispatch_mask, combine_array, auxiliary_loss)
def _load_balancing_loss(router_probs: Array, expert_indices: Array) -> float:
"""Computes auxiliary load balancing loss as in Switch Transformer.
See Switch Transformer (https://arxiv.org/abs/2101.03961). This function
implements the loss function presented in equations (4) - (6). It aims to
penalize those cases where the routing between experts is unbalanced.
Args:
router_probs: Probability assigned to each expert per token. Shape:
<float32>[num_groups, tokens_per_group, num_experts].
expert_indices: <int>[num_groups, tokens_per_group, num_selected_experts]
indices identifying the top num_selected_experts for a given token.
Returns:
The auxiliary loss.
"""
num_experts = router_probs.shape[-1]
# Shape: [num_groups, tokens_per_group, num_selected_experts, num_experts].
expert_mask = jax.nn.one_hot(expert_indices, num_experts, dtype=jnp.int32)
# For a given token, determine if it was routed to a given expert.
# Shape: [num_groups, tokens_per_group, num_experts]
expert_mask = jnp.max(expert_mask, axis=-2)
tokens_per_group_and_expert = jnp.mean(
expert_mask, dtype=jnp.float32, axis=-2)
router_prob_per_group_and_expert = jnp.mean(
router_probs, dtype=jnp.float32, axis=-2)
return jnp.mean(
tokens_per_group_and_expert * router_prob_per_group_and_expert,
dtype=jnp.float32) * num_experts**2
def _router_z_loss(router_logits: Array) -> float:
"""Compute router z-loss.
The router z-loss was introduced in Designing Effective Sparse Expert Models
(https://arxiv.org/abs/2202.08906). It encourages router logits to remain
small in an effort to improve stability.
Args:
router_logits: <float>[num_groups, tokens_per_group, num_experts] router
logits.
Returns:
Scalar router z-loss.
"""
num_groups, tokens_per_group, _ = router_logits.shape
log_z = jax.nn.logsumexp(router_logits, axis=-1)
z_loss = log_z**2
return jnp.sum(z_loss, dtype=jnp.float32) / (num_groups * tokens_per_group)
| 33,403 | 42.325551 | 110 | py |
flaxformer | flaxformer-main/flaxformer/architectures/bert/bert.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains a BERT model implementation including its layers."""
from __future__ import annotations
from typing import Optional, Tuple
import chex
from flax import linen as nn
import jax.numpy as jnp
from flaxformer import transformer_common as common
from flaxformer.architectures.bert import heads
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import initializers
from flaxformer.components.attention import dense_attention
from flaxformer.types import Activation
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
_DEFAULT_LAYER_NORM = 1e-12
_DEFAULT_INIT_RANGE = 0.02
class FullEncoder(nn.Module):
"""An encoder that embeds inputs and then encodes those representations.
Note that the submodules are responsible for their own dropout and layer norm.
"""
embedder_block: EmbedderBlock
encoder_block: EncoderBlock
def __call__(self,
input_ids: Array,
*,
input_mask: Array,
position_ids: Optional[Array] = None,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Embeds the inputs and then encodes those representations.
Args:
input_ids: <int>[batch..., seq_len]. The first position in the sequence
should correspond to a "beginning of input" symbol, generally `[CLS]`.
input_mask: <int>[batch..., seq_len]. Indicates which positions in
`input_ids` are non-padding (0 for padding, 1 otherwise).
position_ids: <int>[batch..., seq_len]. The position of each input ID
within its sequence. This is typically just `range(seq_len)` for each
sequence, but custom behavior may be desired if, for instance, multiple
examples are packed into each sequence.
segment_ids: <int>[batch..., seq_len]. Indicates the "type" of each input
position. For a traditional BERT-style model with two segments, valid
values would be {0, 1}.
enable_dropout: Enables dropout when set to True.
Returns:
<float>[batch..., seq_len, hidden_size].
"""
# Validate inputs and create variables for dimension sizes.
chex.assert_shape(input_ids, (..., None))
chex.assert_type(input_ids, int)
*batch_sizes, seq_len = input_ids.shape
chex.assert_shape(input_mask, (*batch_sizes, seq_len))
if position_ids is not None:
chex.assert_shape(position_ids, (*batch_sizes, seq_len))
chex.assert_type(position_ids, int)
if segment_ids is not None:
chex.assert_shape(segment_ids, (*batch_sizes, seq_len))
chex.assert_type(segment_ids, int)
embeddings = self.embedder_block(
input_ids=input_ids,
position_ids=position_ids,
segment_ids=segment_ids,
enable_dropout=enable_dropout)
chex.assert_shape(embeddings, (*batch_sizes, seq_len, None))
attention_mask = dense_attention.make_attention_mask(input_mask, input_mask)
chex.assert_shape(attention_mask, (*batch_sizes, 1, seq_len, seq_len))
result = self.encoder_block(
embeddings,
attention_mask=attention_mask,
enable_dropout=enable_dropout)
chex.assert_shape(result, (*batch_sizes, seq_len, None))
return result
class EmbedderBlock(nn.Module):
"""Embeds the inputs, then applies dropout and layer norm."""
embedder: embedding.MultiEmbed
dropout: nn.Dropout
layer_norm: nn.LayerNorm
def __call__(self,
input_ids: Array,
*,
position_ids: Optional[Array] = None,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Embeds the inputs, then applies dropout and layer norm.
Args:
input_ids: <int>[batch..., seq_len]. The first position in the sequence
should correspond to a "beginning of input" symbol, generally `[CLS]`.
position_ids: <int>[batch..., seq_len]. The position of each input ID
within its sequence. This is typically just `range(seq_len)` for each
sequence, but custom behavior may be desired if, for instance, multiple
examples are packed into each sequence.
segment_ids: <int>[batch..., seq_len]. Indicates the "type" of each input
position. For a traditional BERT-style model with two segments, valid
values would be {0, 1}.
enable_dropout: Enables dropout when set to True.
Returns:
<float>[batch..., seq_len, hidden_size].
"""
# Validate inputs and create variables for dimension sizes.
chex.assert_shape(input_ids, (..., None))
chex.assert_type(input_ids, int)
*batch_sizes, seq_len = input_ids.shape
if position_ids is not None:
chex.assert_shape(position_ids, (*batch_sizes, seq_len))
chex.assert_type(position_ids, int)
if segment_ids is not None:
chex.assert_shape(segment_ids, (*batch_sizes, seq_len))
chex.assert_type(segment_ids, int)
embeddings = self.embedder( # pytype: disable=wrong-arg-types # jax-ndarray
input_ids=input_ids,
position_ids=position_ids,
segment_ids=segment_ids,
)
chex.assert_shape(embeddings, (*batch_sizes, seq_len, None))
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings, deterministic=not enable_dropout)
chex.assert_shape(embeddings, (*batch_sizes, seq_len, None))
return embeddings
class EncoderBlock(nn.Module):
"""A BERT encoder block: a sequence of transformer layers.
Passes inputs through the full stack of encoder layers. Note that dropout and
layer norm are performed within and at the end of each encoder layer.
"""
layer_sequence: common.LayerSequence
def __call__(self,
inputs: Array,
*,
attention_mask: Array,
enable_dropout: bool = True) -> Array:
"""Applies all the layers starting with the inputs.
Args:
inputs: The inputs, <float>[..., seq_len, hidden_size].
attention_mask: The mask over input positions, <bool>[..., num_heads,
seq_len, seq_len].
enable_dropout: Enables dropout when set to True.
Returns:
The encoded inputs, <float>[..., seq_len, hidden_size].
"""
return self.layer_sequence(
inputs, attention_mask=attention_mask, enable_dropout=enable_dropout)
class EncoderLayer(nn.Module):
"""Transformer-based encoder layer used in BERT.
Performs the following:
1. An attention block, which ends with dropout and layer norm.
2. An MLP block, which ends with dropout and layer norm.
"""
attention_block: AttentionBlock
mlp_block: MlpBlock
def __call__(self,
inputs: Array,
*,
attention_targets: Optional[Array] = None,
attention_mask: Optional[Array] = None,
attention_bias: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Applies one attention block and one MLP block.
Args:
inputs: <float>[batch..., seq_len, features]. Sequences of inputs. These
may attend to values in `attention_targets`.
attention_targets: <float>[batch..., target_seq_len, target_features]:
Sequence of values that the `inputs` positions may attend to. If `None`,
`inputs` will be used.
attention_mask: <bool>[batch..., num_heads, seq_len, target_seq_len].
Attention mask where True indicates that the position in `inputs` may
attend to the position in `attention_targets`.
attention_bias: <float>[batch..., num_heads, seq_len, target_seq_len].
Bias for the attention weights. This can be used for incorporating
causal masks, padding masks, proximity bias, etc.
enable_dropout: Enables dropout if set to True.
Returns:
[batch..., seq_len, features]
"""
result = self.attention_block(
inputs=inputs,
attention_targets=(attention_targets
if attention_targets is not None else inputs),
mask=attention_mask,
bias=attention_bias,
enable_dropout=enable_dropout)
chex.assert_equal_shape((inputs, result))
result = self.mlp_block(result, enable_dropout=enable_dropout)
chex.assert_equal_shape((inputs, result))
return result
class AttentionBlock(nn.Module):
"""A single transformer attention block.
Performs the following:
1. Attention.
2. Dense projection back to the input shape.
3. Dropout.
4. Residual connection.
5. LayerNorm.
Attributes:
attention_layer: The attention layer.
dense_layer: The dense layer for projecting the attention layer's output
back to the shape of `inputs`.
dropout: Performs dropout.
layer_norm: Performs layer normalization.
"""
attention_layer: dense_attention.DenseAttention
dense_layer: dense.DenseGeneral
dropout: nn.Dropout
layer_norm: nn.LayerNorm
def __call__(self,
inputs: Array,
attention_targets: Array,
*,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Applies a single transformer attention block.
Args:
inputs: <float>[batch..., seq_len, features]. Sequences of inputs. These
may attend to values in `attention_targets`.
attention_targets: <float>[batch..., target_seq_len, target_features]:
Sequence of values that the `inputs` positions may attend to.
mask: <bool>[batch..., num_heads, seq_len, target_seq_len]. Attention mask
where True indicates that the position in `inputs` may attend to the
position in `attention_targets`.
bias: <float>[batch..., num_heads, seq_len, target_seq_len]. Bias for the
attention weights. This can be used for incorporating causal masks,
padding masks, proximity bias, etc.
enable_dropout: Enables dropout if set to True.
Returns:
<float>[batch..., seq_len, features]
"""
# Validate inputs and create variables for dimension sizes.
chex.assert_shape(inputs, (..., None, self.dense_layer.features))
*batch_sizes, seq_len, features = inputs.shape
chex.assert_shape(attention_targets, (*batch_sizes, None, None))
target_seq_len = attention_targets.shape[-2]
if mask is not None:
chex.assert_shape(mask, (*batch_sizes, None, seq_len, target_seq_len))
if bias is not None:
chex.assert_shape(bias, (*batch_sizes, None, seq_len, target_seq_len))
# TODO: Do we want the dropout that's in the attention layer?
# [batch..., seq_len, num_heads, head_dim]
attention_output = self.attention_layer(
inputs,
attention_targets,
mask=mask,
bias=bias,
enable_dropout=enable_dropout)
chex.assert_shape(attention_output, (*batch_sizes, seq_len, None, None))
num_heads = attention_output.shape[-2]
chex.assert_is_divisible(features, num_heads)
if mask is not None:
chex.assert_shape(mask,
(*batch_sizes, {num_heads, 1}, seq_len, target_seq_len))
if bias is not None:
chex.assert_shape(bias,
(*batch_sizes, {num_heads, 1}, seq_len, target_seq_len))
# Project back to input shape.
# [batch..., seq_len, features]
result = self.dense_layer(attention_output)
chex.assert_equal_shape((inputs, result))
result = self.dropout(result, deterministic=not enable_dropout)
result = result + inputs
result = self.layer_norm(result)
return result
class MlpBlock(nn.Module):
"""A single transformer MLP block.
Performs the following:
1. MLP.
2. Dense projection back to the input shape.
3. Dropout.
4. Residual connection.
5. LayerNorm.
Attributes:
mlp: The MLP layer.
dense_layer: The dense layer for projecting the MLP layer's output back to
the shape of `inputs`.
dropout: Performs dropout.
layer_norm: Performs layer normalization.
"""
mlp: Mlp
dense_layer: dense.DenseGeneral
dropout: nn.Dropout
layer_norm: nn.LayerNorm
def __call__(self, inputs: Array, *, enable_dropout: bool = True) -> Array:
"""Applies a single transformer MLP block.
Args:
inputs: <float>[batch..., seq_len, hidden_size]. Sequences of inputs.
enable_dropout: Enables dropout if set to True.
Returns:
[batch..., seq_len, hidden_size]
"""
chex.assert_shape(inputs, (..., None, self.dense_layer.features))
# [batch..., seq_len, intermediate_dim]
mlp_output = self.mlp(inputs)
# Project back to input shape.
# [batch..., seq_len, hidden_size]
result = self.dense_layer(mlp_output)
chex.assert_equal_shape((inputs, result))
result = self.dropout(result, deterministic=not enable_dropout)
result = result + inputs
result = self.layer_norm(result)
return result
class Mlp(nn.Module):
dense_layer: dense.DenseGeneral
activation: Activation
def __call__(self, inputs: Array) -> Array:
result = self.dense_layer(inputs)
return self.activation(result)
def make_full_encoder(
hidden_size: int,
intermediate_dim: int,
vocab_size: int,
max_length: int,
num_segments: int,
num_hidden_layers: int,
num_attention_heads: int,
dropout_rate: float = 0.0,
dtype: jnp.dtype = jnp.float32,
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE),
bias_init: Initializer = nn.initializers.zeros,
layer_norm_epsilon: float = _DEFAULT_LAYER_NORM,
) -> FullEncoder:
"""Returns a newly constructed Encoder.
Args:
hidden_size: The size of the embeddings and the BERT layers.
intermediate_dim: Size of the feed-forward layer in the TransformerLayer's
MLP block. Corresponds to `dff` in the transformer paper.
vocab_size: The vocabulary size.
max_length: The number of supported token positions.
num_segments: The number of segments (token types).
num_hidden_layers: Total number of hidden BertTransformer layers. 12 in the
BERT-base model, 24 in the BERT-large model.
num_attention_heads: Total number of self-attention heads. 12 in the
BERT-base model, 16 in the BERT-large model.
dropout_rate: Dropout probability used across all the model layers.
dtype: The dtype of the computation (float16/float32/float64).
kernel_init: Initializer method for attention and mlp layers kernels.
bias_init: Initializer method for attention and mlp layers biases.
layer_norm_epsilon: The layer norm epsilon parameter.
"""
return FullEncoder(
embedder_block=EmbedderBlock(
embedder=embedding.MultiEmbed({
'input_ids':
embedding.Embed(
num_embeddings=vocab_size,
features=hidden_size,
embedding_init=kernel_init),
'position_ids':
embedding.Embed(
num_embeddings=max_length,
features=hidden_size,
embedding_init=kernel_init),
'segment_ids':
embedding.Embed(
num_embeddings=num_segments,
features=hidden_size,
embedding_init=kernel_init)
}),
layer_norm=nn.LayerNorm(epsilon=layer_norm_epsilon),
dropout=nn.Dropout(rate=dropout_rate)),
encoder_block=make_encoder_block(
hidden_size=hidden_size,
intermediate_dim=intermediate_dim,
num_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
dropout_rate=dropout_rate,
dtype=dtype,
kernel_init=kernel_init,
bias_init=bias_init,
layer_norm_epsilon=layer_norm_epsilon))
def make_encoder_block(
*,
hidden_size: int,
intermediate_dim: int,
num_layers: int,
num_attention_heads: int,
dropout_rate: float = 0.0,
dtype: jnp.dtype = jnp.float32,
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE
),
bias_init: Initializer = nn.initializers.zeros,
layer_norm_epsilon: float = _DEFAULT_LAYER_NORM,
sow_attention_intermediates: bool = False,
) -> EncoderBlock:
"""Returns a newly constructed EncoderBlock.
Args:
hidden_size: The size of the embeddings and the BERT layers.
intermediate_dim: Size of the feed-forward layer in the BertLayer MlpBlock.
Corresponds to `dff` in the transformer paper.
num_layers: Total number of hidden BertTransformer layers. 12 in the
BERT-base model, 24 in the BERT-large model.
num_attention_heads: Total number of self-attention heads. 12 in the
BERT-base model, 16 in the BERT-large model.
dropout_rate: Dropout probability used across all the model layers.
dtype: The dtype of the computation (float16/float32/float64).
kernel_init: Initializer method for attention and mlp layers kernels.
bias_init: Initializer method for attention and mlp layers biases.
layer_norm_epsilon: The layer norm epsilon parameter.
sow_attention_intermediates: Whether to track attention intermediates using
Module.sow.
"""
def make_layer() -> EncoderLayer:
return make_encoder_layer(
make_attention_layer(
num_heads=num_attention_heads,
dropout_rate=dropout_rate,
kernel_init=kernel_init,
bias_init=bias_init,
dtype=dtype,
sow_intermediates=sow_attention_intermediates,
),
hidden_size=hidden_size,
intermediate_dim=intermediate_dim,
dtype=dtype,
dropout_rate=dropout_rate,
kernel_init=kernel_init,
bias_init=bias_init,
layer_norm_epsilon=layer_norm_epsilon,
)
return EncoderBlock(
common.LayerSequence(num_layers=num_layers, make_layer=make_layer), # pytype: disable=wrong-keyword-args
)
class BertEncoder(nn.Module):
"""A BERT encoder model that embeds inputs and encodes them with BERT layers.
Note that dropout and layer norm are performed within and at the end of each
encoder layer.
Attributes:
hidden_size: The size of the embeddings and the BERT layers.
intermediate_dim: Size of the feed-forward layer in the BertLayer MlpBlock.
Corresponds to `dff` in the transformer paper.
vocab_size: The vocabulary size.
max_length: The number of supported token positions.
num_segments: The number of segments (token types).
num_hidden_layers: Total number of hidden BertTransformer layers. 12 in the
BERT-base model, 24 in the BERT-large model.
num_attention_heads: Total number of self-attention heads. 12 in the
BERT-base model, 16 in the BERT-large model.
dropout_rate: Dropout probability used across all the model layers.
dtype: The dtype of the computation (float16/float32/float64).
kernel_init: Initializer method for attention and mlp layers kernels.
bias_init: Initializer method for attention and mlp layers biases.
layer_norm_epsilon: The layer norm epsilon parameter.
enable_dropout: Enables dropout when set to True.
"""
hidden_size: int
intermediate_dim: int
vocab_size: int
max_length: int
num_segments: int
num_hidden_layers: int
num_attention_heads: int
dropout_rate: float = 0.0
dtype: jnp.dtype = jnp.float32
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
bias_init: Initializer = nn.initializers.zeros
layer_norm_epsilon: float = _DEFAULT_LAYER_NORM
enable_dropout: Optional[bool] = None
def setup(self):
self.embedder = embedding.MultiEmbed({
'token_ids':
embedding.Embed(
num_embeddings=self.vocab_size,
features=self.hidden_size,
embedding_init=self.kernel_init),
'position_ids':
embedding.Embed(
num_embeddings=self.max_length,
features=self.hidden_size,
embedding_init=self.kernel_init),
'segment_ids':
embedding.Embed(
num_embeddings=self.num_segments,
features=self.hidden_size,
embedding_init=self.kernel_init)
})
self.layer_norm = nn.LayerNorm(epsilon=self.layer_norm_epsilon)
self.embeddings_dropout = nn.Dropout(rate=self.dropout_rate)
self.encoder_block = make_encoder_block(
hidden_size=self.hidden_size,
intermediate_dim=self.intermediate_dim,
num_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
layer_norm_epsilon=self.layer_norm_epsilon)
def __call__(self,
token_ids: Array,
position_ids: Array,
segment_ids: Array,
input_mask: Array,
enable_dropout: Optional[bool] = None) -> Array:
"""Embeds the inputs and encodes them with BERT layers.
Args:
token_ids: The token IDs, <int>[..., seq_len].
position_ids: The position IDs, <int>[..., seq_len]. Should broadcast over
token_ids, or have the same shape.
segment_ids: The segment (token type) IDs, <int>[..., seq_len].
input_mask: The mask over token IDs, <bool>[..., seq_len].
enable_dropout: Enables dropout when set to True.
Returns:
The encoded inputs.
"""
embedded_inputs = (
self.embed_and_combine_inputs(token_ids, position_ids, segment_ids))
return self.encode_from_embedded(
embedded_inputs, input_mask, enable_dropout=enable_dropout)
def embed_and_combine_inputs(self, token_ids: Array, position_ids: Array,
segment_ids: Array) -> Array:
"""Embeds the inputs and combines them for further processing."""
embedded_inputs = self.embedder( # pytype: disable=wrong-arg-types # jax-ndarray
token_ids=token_ids, position_ids=position_ids, segment_ids=segment_ids
)
return embedded_inputs
def finalize_embeddings(self,
embedded_inputs: Array,
*,
enable_dropout: Optional[bool] = None) -> Array:
"""Finalize embedded inputs to be sent to the first transformer layer."""
enable_dropout = nn.module.merge_param('enable_dropout',
self.enable_dropout, enable_dropout)
embedded_inputs = self.layer_norm(embedded_inputs)
if enable_dropout is not None:
deterministic = not enable_dropout
else:
deterministic = None
embedded_inputs = self.embeddings_dropout(embedded_inputs, deterministic)
return embedded_inputs
def encode_from_embedded(self,
embedded_inputs: Array,
input_mask: Array,
*,
enable_dropout: Optional[bool] = None) -> Array:
"""Runs the encoder on embedded inputs."""
embedded_inputs = self.finalize_embeddings(
embedded_inputs, enable_dropout=enable_dropout)
attention_mask = dense_attention.make_attention_mask(input_mask, input_mask)
return self.encoder_block(
embedded_inputs,
attention_mask=attention_mask,
enable_dropout=enable_dropout)
class BertMlmNsp(nn.Module):
"""A BERT encoder with a pooler, MLM-head and NSP-head.
Attributes:
encoder: An encoder that returns a sequence of input representations.
pooler: A sequence pooler. In the original BERT model the pooler is
parameterized: it selects the first position (the CLS token) and applies a
dense layer with activation.
mlm_head: A masked language modeling head.
nsp_head: A next sentence prediction head.
"""
encoder: BertEncoder
pooler: heads.BertPooler
mlm_head: heads.MLMHead
nsp_head: heads.NSPHead
def __call__(self,
token_ids: Array,
*,
position_ids: Array,
segment_ids: Array,
input_mask: Array,
masked_positions: Optional[Array] = None,
enable_dropout: bool = True) -> Tuple[Array, Array]:
"""Encodes the inputs with the encoder and applies the MLM and NSP heads."""
encoded_inputs = self.encoder(
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=input_mask,
enable_dropout=enable_dropout)
return self.mlm_head(
encoded_inputs,
masked_positions=masked_positions), self.nsp_head(encoded_inputs)
class BertClassifier(nn.Module):
"""A BERT encoder with a pooler and classification head.
Attributes:
encoder: An encoder that returns a sequence of input representations.
pooler: A sequence pooler. In the original BERT model the pooler is
parameterized: it selects the first position (the CLS token) and applies a
dense layer with activation. Note that this module is not actually used
by `BertClassifier`, but some heads might depend on it. For example, the
default `heads.ClassifierHead` does have its own pooler params and does
not need the params provided in `pooler`.
classifier_head: A classification head. Operates on pooled encodings.
"""
encoder: BertEncoder
# TODO: Reduce redundancy of `pooler` and `heads.ClassifierHead`.
pooler: heads.BertPooler
classifier_head: heads.ClassifierHead
def encode(self,
token_ids,
*,
position_ids,
segment_ids,
input_mask,
enable_dropout: bool = True):
"""Encodes the inputs with the encoder."""
return self.encoder(
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=input_mask,
enable_dropout=enable_dropout)
def classify(self, encoded_inputs, enable_dropout: bool = True):
"""Classifies the encoded inputs."""
return self.classifier_head(encoded_inputs, enable_dropout=enable_dropout)
def __call__(self,
token_ids: Array,
*,
position_ids: Array,
segment_ids: Array,
input_mask: Array,
enable_dropout: bool = True) -> Array:
"""Encodes the inputs with the encoder and applies the classifier head."""
encoded_inputs = self.encode(
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=input_mask,
enable_dropout=enable_dropout)
return self.classify(encoded_inputs, enable_dropout=enable_dropout)
def make_encoder_layer(
attention_layer: dense_attention.DenseAttention,
*,
hidden_size: int,
intermediate_dim: int,
dtype: DType = jnp.float32,
dropout_rate: float = 0.0,
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE),
bias_init: Initializer = nn.initializers.zeros,
layer_norm_epsilon: float = _DEFAULT_LAYER_NORM,
name: Optional[str] = None,
) -> EncoderLayer:
"""Returns a Bert-style transformer layer."""
return EncoderLayer(
attention_block=make_attention_block(
attention_layer=attention_layer,
hidden_size=hidden_size,
dtype=dtype,
dropout_rate=dropout_rate,
kernel_init=kernel_init,
bias_init=bias_init,
layer_norm_epsilon=layer_norm_epsilon),
mlp_block=make_mlp_block(
hidden_size=hidden_size,
intermediate_dim=intermediate_dim,
dtype=dtype,
dropout_rate=dropout_rate,
kernel_init=kernel_init,
bias_init=bias_init,
layer_norm_epsilon=layer_norm_epsilon),
name=name)
def make_attention_block(
attention_layer: dense_attention.DenseAttention,
*,
hidden_size: int,
dtype: DType = jnp.float32,
dropout_rate: float = 0.0,
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE),
bias_init: Initializer = nn.initializers.zeros,
layer_norm_epsilon: float = _DEFAULT_LAYER_NORM,
name: Optional[str] = None,
) -> AttentionBlock:
"""Returns a Bert-style transformer attention block."""
return AttentionBlock(
attention_layer=attention_layer,
dense_layer=dense.DenseGeneral(
features=hidden_size,
axis=(-2, -1),
kernel_init=kernel_init,
bias_init=bias_init,
use_bias=True,
kernel_axis_names=('heads', 'kv', 'embed'),
dtype=dtype),
# We chose to not broadcast dropout (compared to T5),
# because of a lack of evidence that it was used by BERT).
dropout=nn.Dropout(rate=dropout_rate),
layer_norm=nn.LayerNorm(epsilon=layer_norm_epsilon, dtype=dtype),
name=name)
def make_attention_layer(
*,
num_heads: int,
dropout_rate: float = 0.0,
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE
),
bias_init: Initializer = nn.initializers.zeros,
dtype: DType = jnp.float32,
sow_intermediates: bool = False,
name: Optional[str] = None,
) -> dense_attention.DenseAttention:
"""Returns a Bert-style attention layer."""
return dense_attention.MultiHeadDotProductAttention(
num_heads=num_heads,
dtype=dtype,
broadcast_dropout=False,
dropout_rate=dropout_rate,
kernel_init=kernel_init,
bias_init=bias_init,
use_bias=True,
rescale_logits=True,
output_projection=False,
sow_intermediates=sow_intermediates,
name=name,
)
def make_mlp_block(
*,
hidden_size: int,
intermediate_dim: int,
dtype: DType = jnp.float32,
dropout_rate: float = 0.0,
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE),
bias_init: Initializer = nn.initializers.zeros,
layer_norm_epsilon: float = _DEFAULT_LAYER_NORM,
name: Optional[str] = None,
) -> MlpBlock:
"""Returns a Bert-style transformer MLP block."""
return MlpBlock(
mlp=Mlp(
dense_layer=dense.DenseGeneral(
features=intermediate_dim,
kernel_axis_names=('embed', 'mlp'),
use_bias=True,
dtype=dtype,
kernel_init=kernel_init,
bias_init=bias_init),
activation=nn.gelu),
dense_layer=dense.DenseGeneral(
features=hidden_size,
use_bias=True,
dtype=dtype,
kernel_init=kernel_init,
kernel_axis_names=('mlp', 'embed'),
bias_init=bias_init),
dropout=nn.Dropout(
rate=dropout_rate,
broadcast_dims=(-2,), # Broadcast along sequence length.
),
layer_norm=nn.LayerNorm(
epsilon=layer_norm_epsilon,
dtype=dtype,
),
name=name)
| 31,705 | 35.824623 | 111 | py |
flaxformer | flaxformer-main/flaxformer/architectures/bert/bert_checkpoint_converter.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT TF checkpoints to Flaxformer's Encoder format."""
from typing import Any, Dict, Tuple
import flax
from flax.core import frozen_dict
import tensorflow as tf
from flaxformer import param_conversion_util
def load_params_from_tf_checkpoint(
checkpoint_path: str) -> Tuple[flax.core.FrozenDict, flax.core.FrozenDict]:
"""Load Flax BERT Encoder params from a TF BERT checkpoint.
The method loads the provided TF checkpoint, stores it in the dictionary
with tf parameter names as keys and the weight tensors as values. Unlike Flax
TF doesn't return the nested structure of the model weights, i.e. keys are
recorded in the following format:
"outer_layer_name/..middle_layer_name/...inner_tensor". Since we know the
mapping between the TF keys and the Flax keys we recursively assemble
the Flax params dictionary starting from the most outer layer.
Args:
checkpoint_path: Path to the TF checkpoint.
Returns:
A tuple of frozen dicts that are the parameters for the bert.BertEncoder and
bert.heads.BertPooler.
"""
ckpt_reader = tf.train.load_checkpoint(checkpoint_path)
tf_params = {
tf_name: ckpt_reader.get_tensor(tf_name)
for tf_name in ckpt_reader.get_variable_to_dtype_map()
}
encoder_params = convert_bert_encoder_params(tf_params, 'bert/')
pooler_params = param_conversion_util.convert_tf_params(
tf_params, {
('dense', 'bias'): 'bias',
('dense', 'kernel'): 'kernel'
}, 'bert/pooler/dense/')
# TODO: Update this when BertEncoder is deleted.
return frozen_dict.freeze(encoder_params), frozen_dict.freeze(pooler_params)
# TODO: Delete this when BertEncoder is deleted.
def convert_bert_encoder_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax BertEncoder parameters from the TF params."""
return {
'embedder':
param_conversion_util.convert_tf_params(
tf_params, {
('embedders_position_ids', 'embedding'):
'position_embeddings',
('embedders_segment_ids', 'embedding'):
'token_type_embeddings',
('embedders_token_ids', 'embedding'):
'word_embeddings',
}, f'{prefix}embeddings/'),
'layer_norm':
convert_layer_norm_params(tf_params,
f'{prefix}embeddings/LayerNorm/'),
'encoder_block':
convert_encoder_block_params(tf_params, f'{prefix}encoder/'),
}
def convert_full_encoder_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax Encoder parameters from the TF params."""
return {
'embedder_block':
convert_embedder_block_params(tf_params, f'{prefix}embeddings/'),
'encoder_block':
convert_encoder_block_params(tf_params, f'{prefix}encoder/'),
}
def convert_embedder_block_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax EmbedderBlock parameters from the TF params."""
return {
'embedder': convert_embedder_params(tf_params, prefix),
'layer_norm': convert_layer_norm_params(tf_params, f'{prefix}LayerNorm/'),
}
def convert_embedder_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax Embedder parameters from the TF params."""
return param_conversion_util.convert_tf_params(
tf_params, {
('embedders_input_ids', 'embedding'): 'word_embeddings',
('embedders_position_ids', 'embedding'): 'position_embeddings',
('embedders_segment_ids', 'embedding'): 'token_type_embeddings',
}, prefix)
def convert_encoder_block_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax EncoderBlock parameters from the TF params."""
layer_indices = param_conversion_util.get_int_regex_matches(
f'{prefix}layer_(\\d+)/', tf_params)
if not layer_indices:
raise ValueError(f'No layers found with prefix {prefix!r}')
layers = {}
for i in layer_indices:
layers[f'layers_{i}'] = (
convert_encoder_layer_params(tf_params, f'{prefix}layer_{i}/'))
return {'layer_sequence': layers}
def convert_encoder_layer_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax EncoderLayer parameters from the TF params."""
return {
'attention_block':
convert_self_attention_block_params(tf_params, f'{prefix}attention/'),
'mlp_block':
convert_mlp_block_params(tf_params, prefix),
}
def convert_self_attention_block_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax AttentionBlock parameters from the TF params."""
return {
'attention_layer':
convert_attention_layer_params(tf_params, f'{prefix}self/'),
'dense_layer':
convert_dense_layer_params(tf_params, f'{prefix}output/dense/'),
'layer_norm':
convert_layer_norm_params(tf_params, f'{prefix}output/LayerNorm/'),
}
def convert_mlp_block_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax MlpBlock parameters from the TF params."""
return {
'mlp': {
'dense_layer':
convert_dense_layer_params(tf_params,
f'{prefix}intermediate/dense/'),
},
'dense_layer':
convert_dense_layer_params(tf_params, f'{prefix}output/dense/'),
'layer_norm':
convert_layer_norm_params(tf_params, f'{prefix}output/LayerNorm/'),
}
def convert_attention_layer_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax MultiHeadDotProductAttention parameters from the TF params."""
return {
'query': convert_dense_layer_params(tf_params, f'{prefix}query/'),
'key': convert_dense_layer_params(tf_params, f'{prefix}key/'),
'value': convert_dense_layer_params(tf_params, f'{prefix}value/'),
}
def convert_dense_layer_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax DenseGeneral parameters from the TF params."""
return param_conversion_util.convert_tf_params(tf_params, {
('kernel',): 'kernel',
('bias',): 'bias',
}, prefix)
def convert_layer_norm_params(tf_params: Dict[str, tf.Tensor],
prefix: str) -> Dict[str, Any]:
"""Loads all Flax LayerNorm parameters from the TF params."""
return param_conversion_util.convert_tf_params(tf_params, {
('scale',): 'gamma',
('bias',): 'beta',
}, prefix)
| 7,555 | 37.55102 | 82 | py |
flaxformer | flaxformer-main/flaxformer/architectures/bert/bert_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flaxformer.architecture.bert.bert."""
import dataclasses
import json
import pathlib
from absl.testing import absltest
from flax.core import unfreeze
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.bert import bert
from flaxformer.architectures.bert import configs as bert_configs
from flaxformer.architectures.bert import heads
from flaxformer.components.attention import dense_attention
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
def _testdata_dir() -> pathlib.Path:
return (
pathlib.Path(absltest.get_default_test_srcdir()) /
'flaxformer/architectures/bert/testdata')
def make_bert_inputs():
"""Returns inputs that can be fed to a BERT encoder."""
token_ids = np.array([[5, 6, 7], [4, 2, 9]], dtype=np.int32)
position_ids = np.array([[0, 1, 2]], dtype=np.int32)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int32)
mask = np.ones((2, 3))
return token_ids, position_ids, segment_ids, mask
class BertEncoderTest(absltest.TestCase):
def test_output_shape(self):
"""Tests that BertEncoder outputs are of the correct shape."""
token_ids = np.array([[5, 6, 7], [4, 2, 9]], dtype=np.int32)
position_ids = np.array([[0, 1, 2]], dtype=np.int32)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int32)
mask = np.ones((2, 3))
hidden_size = 4
model = bert.BertEncoder(
hidden_size=hidden_size,
intermediate_dim=5,
vocab_size=10,
max_length=11,
num_segments=2,
num_hidden_layers=3,
num_attention_heads=2)
output, _ = model.init_with_output(
jax.random.PRNGKey(0),
token_ids,
position_ids,
segment_ids,
mask,
enable_dropout=False)
self.assertEqual(token_ids.shape + (hidden_size,), output.shape)
def test_param_shapes(self):
token_ids = np.array([[5, 6, 7], [4, 2, 9]], dtype=np.int32)
position_ids = np.array([[0, 1, 2]], dtype=np.int32)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int32)
mask = np.ones((2, 3))
hidden_size = 4
model = bert.BertEncoder(
hidden_size=hidden_size,
intermediate_dim=5,
vocab_size=10,
max_length=11,
num_segments=2,
num_hidden_layers=2,
num_attention_heads=2)
params = model.init(
random.PRNGKey(0),
token_ids,
position_ids,
segment_ids,
mask,
enable_dropout=False)['params']
self.assertSameStructure(
testing_utils.param_shapes(params),
json.load(open(_testdata_dir() / 'model_param_shapes.json')),
'Full params = ' +
testing_utils.format_params_shapes(testing_utils.param_shapes(params)))
def test_bert_from_bert_base_config(self):
"""Tests that BertEncoder can be constructed from a config object."""
token_ids = np.array([[5, 6, 7], [4, 2, 9]], dtype=np.int32)
position_ids = np.array([[0, 1, 2]], dtype=np.int32)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int32)
mask = np.ones((2, 3))
# A BERT Base config but smaller.
config = bert_configs.BertBaseConfig(
hidden_size=4,
intermediate_dim=8,
num_hidden_layers=3,
num_attention_heads=2,
vocab_size=11,
max_length=13,
num_segments=3)
model = bert.BertEncoder(**dataclasses.asdict(config))
output, _ = model.init_with_output(
jax.random.PRNGKey(0),
token_ids,
position_ids,
segment_ids,
mask,
enable_dropout=False)
self.assertEqual(token_ids.shape + (config.hidden_size,), output.shape)
class BertMlmNspTest(absltest.TestCase):
def test_output_shapes(self):
hidden_size = 4
vocab_size = 10
encoder = bert.BertEncoder(
hidden_size=hidden_size,
intermediate_dim=5,
vocab_size=vocab_size,
max_length=11,
num_segments=2,
num_hidden_layers=2,
num_attention_heads=2)
pooler = heads.BertPooler()
mlm_head = heads.MLMHead(
encoder=encoder, hidden_size=hidden_size, vocab_size=vocab_size)
nsp_head = heads.NSPHead(pooler=pooler)
model = bert.BertMlmNsp(
encoder=encoder, pooler=pooler, mlm_head=mlm_head, nsp_head=nsp_head)
token_ids, position_ids, segment_ids, mask = make_bert_inputs()
output, _ = model.init_with_output(
jax.random.PRNGKey(0),
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=mask,
enable_dropout=False)
mlm_output, nsp_output = output
self.assertEqual(token_ids.shape + (vocab_size,), mlm_output.shape)
self.assertEqual(token_ids.shape[:1] + (2,), nsp_output.shape)
def test_params(self):
hidden_size = 4
vocab_size = 10
encoder = bert.BertEncoder(
hidden_size=hidden_size,
intermediate_dim=5,
vocab_size=vocab_size,
max_length=11,
num_segments=2,
num_hidden_layers=2,
num_attention_heads=2)
pooler = heads.BertPooler()
mlm_head = heads.MLMHead(
encoder=encoder, hidden_size=hidden_size, vocab_size=vocab_size)
nsp_head = heads.NSPHead(pooler=pooler)
model = bert.BertMlmNsp(
encoder=encoder, pooler=pooler, mlm_head=mlm_head, nsp_head=nsp_head)
token_ids, position_ids, segment_ids, mask = make_bert_inputs()
params = model.init(
jax.random.PRNGKey(0),
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=mask,
enable_dropout=False)['params']
param_shapes = testing_utils.param_shapes(params)
expected_encoder_param_shapes = json.load(
open(_testdata_dir() / 'model_param_shapes.json'))
expected_param_shapes = {
'encoder': expected_encoder_param_shapes,
'mlm_head': {
'bias': (10,),
'dense': {
'bias': (4,),
'kernel': (4, 4)
},
'layer_norm': {
'bias': (4,),
'scale': (4,)
}
},
'nsp_head': {
'dense': {
'bias': (2,),
'kernel': (4, 2)
}
},
'pooler': {
'dense': {
'bias': (4,),
'kernel': (4, 4)
}
}
}
self.assertSameStructure(param_shapes, expected_param_shapes)
class BertClassifierTest(absltest.TestCase):
def test_output_shapes(self):
hidden_size = 4
num_classes = 3
encoder = bert.BertEncoder(
hidden_size=hidden_size,
intermediate_dim=5,
vocab_size=10,
max_length=11,
num_segments=2,
num_hidden_layers=2,
num_attention_heads=2)
pooler = heads.BertPooler()
classifier_head = heads.ClassifierHead(
pooler=pooler, num_classes=num_classes)
model = bert.BertClassifier(
encoder=encoder, pooler=pooler, classifier_head=classifier_head)
token_ids, position_ids, segment_ids, mask = make_bert_inputs()
output, _ = model.init_with_output(
jax.random.PRNGKey(0),
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=mask,
enable_dropout=False)
self.assertEqual((token_ids.shape[0], num_classes), output.shape)
def test_params(self):
hidden_size = 4
num_classes = 3
encoder = bert.BertEncoder(
hidden_size=hidden_size,
intermediate_dim=5,
vocab_size=10,
max_length=11,
num_segments=2,
num_hidden_layers=2,
num_attention_heads=2)
pooler = heads.BertPooler()
classifier_head = heads.ClassifierHead(
pooler=pooler, num_classes=num_classes, use_bias=True)
model = bert.BertClassifier(
encoder=encoder, pooler=pooler, classifier_head=classifier_head)
token_ids, position_ids, segment_ids, mask = make_bert_inputs()
params = model.init(
jax.random.PRNGKey(0),
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=mask,
enable_dropout=False)['params']
param_shapes = testing_utils.param_shapes(params)
expected_encoder_param_shapes = json.load(
open(_testdata_dir() / 'model_param_shapes.json'))
expected_param_shapes = {
'encoder': expected_encoder_param_shapes,
'classifier_head': {
'dense': {
'bias': (3,),
'kernel': (4, 3)
},
},
'pooler': {
'dense': {
'bias': (4,),
'kernel': (4, 4)
}
}
}
self.assertSameStructure(param_shapes, expected_param_shapes)
class EncoderLayerTest(absltest.TestCase):
def test_wrong_inputs_dimension(self):
"""Tests if the encoder exception is raised for wrong `inputs` dimension."""
params_key, dropout_key = random.split(random.PRNGKey(0), 2)
input_shape = (4,)
inputs = random.uniform(random.PRNGKey(0), input_shape, dtype=jnp.float32)
encoder_layer = bert.make_encoder_layer(
bert.make_attention_layer(num_heads=2),
hidden_size=4,
intermediate_dim=14)
with self.assertRaisesRegex(AssertionError, r'.+ shape \(4,\).*'):
encoder_layer.init_with_output(
{
'params': params_key,
'dropout': dropout_key
},
inputs=inputs,
attention_targets=inputs)
class AttentionBlockTest(absltest.TestCase):
def test_output_shape(self):
"""Tests that BERT attention block's output is of correct shape."""
params_key, dropout_key = random.split(random.PRNGKey(0), 2)
input_shape = (2, 3, 4)
inputs = random.uniform(random.PRNGKey(0), input_shape, dtype=jnp.float32)
layer = bert.make_attention_block(
bert.make_attention_layer(num_heads=2), hidden_size=4)
result, variables = layer.init_with_output(
{
'params': params_key,
'dropout': dropout_key
},
inputs=inputs,
attention_targets=inputs,
enable_dropout=False)
self.assertEqual(input_shape, result.shape)
# Note: The layernorm has no axes annotations.
params = unfreeze(variables['params'])
del params['layer_norm']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'attention_layer': {
'key': {
'kernel': ['float32', 'embed=4', 'joined_kv=4'],
'bias': ['float32', 'joined_kv=4']
},
'query': {
'kernel': ['float32', 'embed=4', 'joined_kv=4'],
'bias': ['float32', 'joined_kv=4']
},
'value': {
'kernel': ['float32', 'embed=4', 'joined_kv=4'],
'bias': ['float32', 'joined_kv=4']
},
},
'dense_layer': {
'kernel': ['float32', 'joined_kv=4', 'embed=4'],
'bias': ['float32', 'embed=4']
},
})
def test_output_shape_with_mask(self):
"""Tests that attention block's output is of correct shape with a mask."""
params_key, dropout_key = random.split(random.PRNGKey(0), 2)
batch_size, max_seq_len, emb_dims = (2, 3, 4)
input_shape = (batch_size, max_seq_len, emb_dims)
inputs_mask = jnp.array([[1, 0, 0], [1, 1, 0]])
inputs = random.uniform(random.PRNGKey(0), input_shape, dtype=jnp.float32)
mask = dense_attention.make_attention_mask(inputs_mask, inputs_mask)
layer = bert.make_attention_block(
bert.make_attention_layer(num_heads=2), hidden_size=4)
result, _ = layer.init_with_output(
{
'params': params_key,
'dropout': dropout_key
},
inputs=inputs,
attention_targets=inputs,
mask=mask,
enable_dropout=False)
self.assertEqual(input_shape, result.shape)
def test_wrong_head_count(self):
"""Tests that exception is raised for wrong head count."""
params_key, dropout_key = random.split(random.PRNGKey(0), 2)
input_shape = (2, 3, 4)
inputs = random.uniform(random.PRNGKey(0), input_shape, dtype=jnp.float32)
layer = bert.make_attention_block(
bert.make_attention_layer(num_heads=5), hidden_size=4)
with self.assertRaisesRegex(AssertionError, '.* 4 is not divisible by 5'):
layer.init_with_output({
'params': params_key,
'dropout': dropout_key
},
inputs=inputs,
attention_targets=inputs,
enable_dropout=False)
class MlpBlockTest(absltest.TestCase):
def test_output_shape(self):
"""Tests that BERT mlp block's output is of correct shape."""
params_key, dropout_key = random.split(random.PRNGKey(0), 2)
input_shape = (2, 3, 4)
inputs = random.uniform(random.PRNGKey(0), input_shape, dtype=jnp.float32)
layer = bert.make_mlp_block(hidden_size=4, intermediate_dim=14)
result, variables = layer.init_with_output(
{
'params': params_key,
'dropout': dropout_key
},
inputs,
enable_dropout=False)
self.assertEqual(input_shape, result.shape)
# Note: The layernorm has no axes annotations.
params = unfreeze(variables['params'])
del params['layer_norm']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'mlp': {
'dense_layer': {
'kernel': ['float32', 'embed=4', 'mlp=14'],
'bias': ['float32', 'mlp=14']
},
},
'dense_layer': {
'kernel': ['float32', 'mlp=14', 'embed=4'],
'bias': ['float32', 'embed=4']
},
})
if __name__ == '__main__':
absltest.main()
| 14,782 | 32.751142 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/bert/heads.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various BERT heads."""
from typing import Optional
from typing import Sequence
from flax import linen as nn
from flax.linen.initializers import zeros
import jax.numpy as jnp
from flaxformer.components import dense
from flaxformer.components import initializers
from flaxformer.types import Activation
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
_DEFAULT_LAYER_NORM = 1e-12
_DEFAULT_INIT_RANGE = 0.02
class BertPooler(nn.Module):
"""Pools the CLS embedding and passes it though the `Dense` layer.
Attributes:
kernel_init: Initializer for the dense layer kernel.
dtype: The dtype of the computation (default: float32).
"""
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
dtype: DType = jnp.float32
def _extract_cls_embedding(self, encoded_inputs: Array) -> Array:
"""Slice all tokens embeddings to get CLs embedding."""
# We need to slice the dimension 1 (counting from zero) of the inputs and
# extract the embedding at the position 0, which corresponds to the CLS
# token emebedding. This operations returns the tensor slice of size
# [batch_size, hidden_size].
return encoded_inputs[:, 0]
@nn.compact
def __call__(self, encoded_inputs: Array, **unused_kwargs):
"""Pools the CLS embedding and applies MLP to it.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the BERT encoder. <float32>[batch_size, seq_length,
hidden_size].
**unused_kwargs: unused.
Returns:
An array of logits <float32>[batch_size, hidden_size].
"""
cls_embedding = self._extract_cls_embedding(encoded_inputs)
cls_embedding = dense.DenseGeneral(
features=cls_embedding.shape[-1],
use_bias=True,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axis_names=('embed', 'mlp'),
name='dense')(
cls_embedding)
return nn.tanh(cls_embedding)
class ClassifierHead(nn.Module):
"""Classification head.
Attributes:
pooler: An instance of the BertPooler class.
num_classes: The output layer size, which is the number of classes.
kernel_init: Initializer for the classifier dense layer kernel.
dropout_rate: Dropout probability used across all the model layers.
dtype: The dtype of the computation (default: float32).
enable_dropout: Enables dropout when set to True.
use_bias: Use bias or not in the dense layer.
"""
pooler: BertPooler
num_classes: int
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
dropout_rate: float = 0.
dtype: DType = jnp.float32
enable_dropout: Optional[bool] = None
use_bias: bool = True
def setup(self):
if self.enable_dropout is not None:
deterministic = not self.enable_dropout
else:
deterministic = None
self.dropout_layer = nn.Dropout(
rate=self.dropout_rate, deterministic=deterministic)
self.dense = dense.DenseGeneral(
features=self.num_classes,
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axis_names=('embed', 'mlp'),
name='dense')
def __call__(self,
encoded_inputs: Array,
*,
enable_dropout: Optional[bool] = None,
**unused_kwargs) -> Array:
"""Pools the CLS emebdding and projects into the logits.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the BERT encoder. <float32>[batch_size, seq_length,
hidden_size].
enable_dropout: Enables dropout when set to True.
**unused_kwargs: unused.
Returns:
An array of logits <float32>[batch_size, num_classes].
"""
if enable_dropout is not None:
deterministic = not enable_dropout
else:
deterministic = None
cls_embedding = self.pooler(encoded_inputs)
cls_embedding = self.dropout_layer(cls_embedding, deterministic)
logits = self.dense(cls_embedding)
return logits
class NSPHead(nn.Module):
"""Next sentence prediction head.
Attributes:
pooler: An instance of the BertPooler class.
kernel_init: Initializer for the classifier dense layer kernel.
dtype: The dtype of the computation (default: float32).
"""
pooler: BertPooler
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
dtype: DType = jnp.float32
def setup(self):
self.mlp = dense.DenseGeneral(
features=2,
use_bias=True,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axis_names=('embed', 'mlp'),
name='dense')
def __call__(self, encoded_inputs: Array, **unused_kwargs) -> Array:
"""Pools the CLS embedding and projects it into 2 logits.
Args:
encoded_inputs: The inputs (e.g., token's embeddings) that come from the
final layer of the BERT encoder. <float32>[batch_size, seq_length,
hidden_size].
**unused_kwargs: unused.
Returns:
An array of logits <float32>[batch_size, 2].
"""
cls_embedding = self.pooler(encoded_inputs)
return self.mlp(cls_embedding)
def gather_indices(inputs: Array, indices: Array) -> Array:
"""Gathers the vectors at the specific indices over a minibatch.
Example:
inputs = [[[0], [1], [2]],
[[3], [4], [5]],
[[6], [7], [8]]]
indices = [[0, 1],
[1, 2],
[0, 2]]
gather_indices(inputs, indices) = [[[0], [1]],
[[4], [5]],
[[6], [8]]]
Args:
inputs: A 3-D input array shaped [batch_size, seq_length, features].
indices: A 2-D indices array with the positions that need to be selected,
with shape <int>[batch_size, indices_seq_length].
Returns:
The inputs, but only those positions (on axis 1) that are given by the
indices.
"""
# We can't index a 3D array with a 2D array, so we have to flatten the inputs.
# This way, we are indexing a 2D input array with a 1D indices array, and then
# we reshape it back.
batch_size, seq_length, features = inputs.shape
flat_offsets = (jnp.arange(batch_size) * seq_length).reshape([-1, 1])
flat_indices = (indices + flat_offsets).reshape([-1])
flat_inputs = inputs.reshape([batch_size * seq_length, features])
gathered_inputs = jnp.take(flat_inputs, flat_indices, axis=0, mode='clip')
# Reshape back into [batch_size, indices_seq_length, features].
return gathered_inputs.reshape([batch_size, -1, features])
class MLMHead(nn.Module):
"""Masked Language Model head.
Attributes:
embed: `Embed` module of the BertEncoder for token_ids. We need it to
extract the word embeddings. See tests on how to access this submodule.
hidden_size: The output layer size, which is the number of classes.
vocab_size: The vocabulary size.
kernel_init: Initializer for the classifier dense layer kernel.
dropout_rate: Dropout probability used across all the model layers.
activation: Activation function.
dtype: The dtype of the computation (default: float32).
"""
encoder: nn.Module
hidden_size: int
vocab_size: int
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
dropout_rate: float = 0.
activation: Activation = nn.gelu
dtype: DType = jnp.float32
def setup(self):
self.mlm_hidden_layer = dense.DenseGeneral(
features=self.hidden_size,
kernel_init=self.kernel_init,
dtype=self.dtype,
use_bias=True,
kernel_axis_names=('embed', 'mlp'),
name='dense')
self.layer_norm = nn.LayerNorm(
epsilon=_DEFAULT_LAYER_NORM, name='layer_norm')
self.bias = self.param('bias', zeros, (self.vocab_size,))
def __call__(self, encoded_inputs: Array, *,
masked_positions: Optional[Array]) -> Array:
"""Transforms the encodings and computes logits for each token.
Args:
encoded_inputs: The inputs (e.g., token representations) that come from
the final layer of the BERT encoder. <float32>[batch_size, seq_length,
hidden_size].
masked_positions: The positions on which to apply the MLM head. Typically
only 15% of the positions are masked out, and we don't want to predict
other positions to save computation. This array may contain padding
values, and could be shorter than `encoded_inputs`.
Returns:
Predicted logits across vocab for each token in the sentence
<float32>[batch_size, seq_length, vocab_size].
"""
if masked_positions is not None:
# Only predict for the provided masked positions.
masked_out_inputs = gather_indices(encoded_inputs, masked_positions)
else:
masked_out_inputs = encoded_inputs # Predict for all positions.
mlm_hidden = self.mlm_hidden_layer(masked_out_inputs)
mlm_hidden_activated = self.activation(mlm_hidden)
mlm_hidden_normalized = self.layer_norm(mlm_hidden_activated)
embedder = self.encoder.embedder.embedders['token_ids']
mlm_decoded = embedder.attend(mlm_hidden_normalized)
mlm_decoded += self.bias
return mlm_decoded
class MLP(nn.Module):
"""Multi-layer perceptron.
An MLP with a variable amount of hidden layers and configurable activations.
Attributes:
features: Sequence of model layer sizes.
kernel_init: Initializer for the classifier dense layer kernel.
dropout_rate: Dropout probability used across all the model layers.
activations: Activation functions to be used after the hidden layers.
Activations are applied to each intermediate hidden layer, but not to the
last one, hence the length of this argument should be the length of
`features` minus one. If set to None, will apply a gelu to intermediate
layers.
enable_dropout: Enables dropout when set to True.
dtype: The dtype of the computation (default: float32).
use_bias: Use bias or not in the dense layer.
"""
features: Sequence[int]
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
dropout_rate: float = 0.
activations: Optional[Sequence[Activation]] = None
enable_dropout: Optional[bool] = None
dtype: DType = jnp.float32
use_bias: bool = True
@nn.compact
def __call__(self,
inputs: Array,
*,
enable_dropout: Optional[bool] = None,
**unused_kwargs) -> Array:
"""Applies the MLP to the inputs.
Args:
inputs: The model inputs. <float32>[batch_size, seq_length, hidden_size].
enable_dropout: Enables dropout when set to True.
**unused_kwargs: unused.
Returns:
The output of the model, of size <float32>[batch_size, seq_length,
num_classes].
"""
if enable_dropout is not None:
deterministic = not enable_dropout
elif self.enable_dropout is not None:
deterministic = not self.enable_dropout
else:
deterministic = True
activations = self.activations
if activations is None:
activations = [nn.gelu] * (len(self.features) - 1)
elif len(self.activations) != len(self.features) - 1:
raise ValueError('`activations` must be of length `len(features) - 1`. '
f'Got {len(self.activations)}, expected '
f'{len(self.features) - 1}.')
x = inputs
for i, feat in enumerate(self.features):
x = dense.DenseGeneral(
features=feat,
kernel_init=self.kernel_init,
dtype=self.dtype,
use_bias=self.use_bias,
kernel_axis_names=('embed', 'mlp'),
name=f'dense_{i}')(
x)
if i != len(self.features) - 1:
x = activations[i](x)
x = nn.Dropout(rate=self.dropout_rate, deterministic=deterministic)(x)
return x
class TokenClassifierHead(nn.Module):
"""Token classification head.
A classification head that can be used for per-token classification (i.e.
sequence classification) tasks, such as POS tagging, NER, and BIO tagging.
Attributes:
features: Sequence of MLP layer sizes.
kernel_init: Initializer for the classifier dense layer kernel.
dropout_rate: Dropout probability used across all the model layers.
activations: Activation functions to be used after the MLP hidden layers,
except for the final layer.
enable_dropout: Enables dropout when set to True.
dtype: The dtype of the computation (default: float32).
use_bias: Use bias or not in the dense layer.
"""
features: Sequence[int]
kernel_init: Initializer = initializers.truncated_normal(
stddev=_DEFAULT_INIT_RANGE)
dropout_rate: float = 0.
activations: Optional[Sequence[Activation]] = None
enable_dropout: Optional[bool] = None
dtype: DType = jnp.float32
use_bias: bool = True
@nn.compact
def __call__(self,
encoded_inputs: Array,
*,
enable_dropout: Optional[bool] = None,
**unused_kwargs) -> Array:
"""Transforms the encodings and computes logits for each token.
Args:
encoded_inputs: The inputs (e.g., token representations) that come from
the final layer of the BERT encoder. <float32>[batch_size, seq_length,
hidden_size].
enable_dropout: Enables dropout when set to True.
**unused_kwargs: unused.
Returns:
Predicted logits across classes for each token in the sentence
<float32>[batch_size, seq_length, num_classes].
"""
return MLP(
self.features,
self.kernel_init,
self.dropout_rate,
self.activations,
self.enable_dropout,
self.dtype,
self.use_bias,
name='mlp')(
inputs=encoded_inputs, enable_dropout=enable_dropout)
| 14,583 | 34.312349 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/bert/heads_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flaxformer.architectures.bert.heads."""
import json
import pathlib
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
import flax
from flax import linen as nn
import jax
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.bert import bert
from flaxformer.architectures.bert import heads
from flaxformer.types import Array
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
def _testdata_dir() -> pathlib.Path:
return (
pathlib.Path(absltest.get_default_test_srcdir()) /
'flaxformer/architectures/bert/testdata')
class BertHeadsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.token_ids = np.array([[5, 6, 7], [4, 2, 9]], dtype=np.int32)
self.position_ids = np.array([[0, 1, 2]], dtype=np.int32)
self.segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int32)
self.mask = np.ones((2, 3))
self.vocab_size = 10
self.hidden_size = 4
self.model = bert.BertEncoder(
hidden_size=self.hidden_size,
intermediate_dim=5,
num_hidden_layers=2,
num_attention_heads=2,
vocab_size=self.vocab_size,
max_length=11,
num_segments=2)
def test_bert_pooler(self):
"""Test whether Bert Pooler returns correct shape."""
bert_output, _ = self.model.init_with_output(
jax.random.PRNGKey(0),
self.token_ids,
self.position_ids,
self.segment_ids,
self.mask,
enable_dropout=False)
pooler = heads.BertPooler()
output, variables = pooler.init_with_output(
jax.random.PRNGKey(0), bert_output)
self.assertEqual((2, self.hidden_size), output.shape)
params = variables['params']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'dense': {
'bias': ['float32', 'mlp=4'],
'kernel': ['float32', 'embed=4', 'mlp=4']
}
})
def test_bert_classifier(self):
"""Tests whether Classifier returns correct shape."""
num_classes = 10
bert_output, _ = self.model.init_with_output(
jax.random.PRNGKey(0),
self.token_ids,
self.position_ids,
self.segment_ids,
self.mask,
enable_dropout=False)
pooler = heads.BertPooler()
classifier_head = heads.ClassifierHead(
pooler, num_classes, enable_dropout=False)
output, variables = classifier_head.init_with_output(
jax.random.PRNGKey(0), bert_output)
self.assertEqual((2, num_classes), output.shape)
params = variables['params']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'dense': {
'bias': ['float32', 'mlp=10'],
'kernel': ['float32', 'embed=4', 'mlp=10']
},
'pooler': {
'dense': {
'bias': ['float32', 'mlp=4'],
'kernel': ['float32', 'embed=4', 'mlp=4']
}
}
})
def test_bert_nsp(self):
"""Tests whether NSP returns correct shape."""
bert_output, _ = self.model.init_with_output(
jax.random.PRNGKey(0),
self.token_ids,
self.position_ids,
self.segment_ids,
self.mask,
enable_dropout=False)
pooler = heads.BertPooler()
nsp_head = heads.NSPHead(pooler)
output, variables = nsp_head.init_with_output(
jax.random.PRNGKey(0), bert_output)
self.assertEqual((2, 2), output.shape)
params = variables['params']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'dense': {
'bias': ['float32', 'mlp=2'],
'kernel': ['float32', 'embed=4', 'mlp=2']
},
'pooler': {
'dense': {
'bias': ['float32', 'mlp=4'],
'kernel': ['float32', 'embed=4', 'mlp=4']
}
}
})
def test_gather_indices(self):
"""Tests that gather indices selects the right items in a batch."""
inputs = np.array([[[0], [1], [2]], [[3], [4], [5]], [[6], [7], [8]]],
dtype=np.float32)
indices = np.array([[0, 1], [1, 2], [0, 2]], dtype=np.int32)
expected = np.array([[[0], [1]], [[4], [5]], [[6], [8]]], dtype=np.float32)
result = heads.gather_indices(inputs, indices) # pytype: disable=wrong-arg-types # jax-ndarray
np.testing.assert_array_equal(result, expected)
@parameterized.named_parameters(
('without_masked_positions', None),
('with_masked_positions', np.array([[1, 2], [0, 1]], dtype=np.int32)))
def test_bert_mlm(self, masked_positions):
"""Tests whether MLM returns correct shape."""
class BertMlm(nn.Module):
encoder: bert.BertEncoder
mlm_head: heads.MLMHead
def __call__(self,
token_ids: Array,
*,
position_ids: Array,
segment_ids: Array,
input_mask: Array,
masked_positions: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
bert_output = self.encoder(
token_ids,
position_ids=position_ids,
segment_ids=segment_ids,
input_mask=input_mask,
enable_dropout=enable_dropout)
return self.mlm_head(bert_output, masked_positions=masked_positions)
mlm_head = heads.MLMHead(self.model, self.hidden_size, self.vocab_size)
encoder_with_mlm = BertMlm(self.model, mlm_head)
output, variables = encoder_with_mlm.init_with_output(
jax.random.PRNGKey(0),
self.token_ids,
position_ids=self.position_ids,
segment_ids=self.segment_ids,
input_mask=self.mask,
masked_positions=masked_positions,
enable_dropout=False)
params = variables['params']
param_shapes = testing_utils.param_shapes(params)
expected_encoder_param_shapes = json.load(
open(_testdata_dir() / 'model_param_shapes.json'))
expected_param_shapes = {
'encoder': expected_encoder_param_shapes,
'mlm_head': {
'bias': [10],
'dense': {
'bias': [4],
'kernel': [4, 4]
},
'layer_norm': {
'bias': [4],
'scale': [4]
}
}
}
batch_size, seq_length = self.token_ids.shape
if masked_positions is not None:
seq_length = masked_positions.shape[1]
self.assertSameStructure(param_shapes, expected_param_shapes)
self.assertEqual((batch_size, seq_length, self.vocab_size), output.shape)
params = flax.core.unfreeze(variables['params'])['mlm_head']
del params['layer_norm']
del params['bias']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(
params, variables['params_axes']['mlm_head']), {
'dense': {
'bias': ['float32', 'mlp=4'],
'kernel': ['float32', 'embed=4', 'mlp=4']
}
})
def test_MLP(self):
"""Tests whether Token Classifier returns correct shape."""
num_classes = 10
bert_output, _ = self.model.init_with_output(
jax.random.PRNGKey(0),
self.token_ids,
self.position_ids,
self.segment_ids,
self.mask,
enable_dropout=False)
mlp = heads.MLP(features=[self.hidden_size, num_classes])
output, variables = mlp.init_with_output(
jax.random.PRNGKey(0), bert_output, enable_dropout=False)
# We have batch_size=2 and seq_length=3
self.assertEqual((2, 3, num_classes), output.shape)
params = variables['params']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'dense_0': {
'bias': ['float32', 'mlp=4'],
'kernel': ['float32', 'embed=4', 'mlp=4']
},
'dense_1': {
'bias': ['float32', 'mlp=10'],
'kernel': ['float32', 'embed=4', 'mlp=10']
}
})
def test_bert_token_classifier(self):
"""Tests whether Token Classifier returns correct shape."""
num_classes = 10
bert_output, _ = self.model.init_with_output(
jax.random.PRNGKey(0),
self.token_ids,
self.position_ids,
self.segment_ids,
self.mask,
enable_dropout=False)
token_classifier_head = heads.TokenClassifierHead(
features=[self.hidden_size, num_classes])
output, variables = token_classifier_head.init_with_output(
jax.random.PRNGKey(0), bert_output, enable_dropout=False)
# We have batch_size=2 and seq_length=3
self.assertEqual((2, 3, num_classes), output.shape)
params = variables['params']
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(params,
variables['params_axes']),
{
'mlp': {
'dense_0': {
'bias': ['float32', 'mlp=4'],
'kernel': ['float32', 'embed=4', 'mlp=4']
},
'dense_1': {
'bias': ['float32', 'mlp=10'],
'kernel': ['float32', 'embed=4', 'mlp=10']
}
}
})
if __name__ == '__main__':
absltest.main()
| 10,381 | 33.491694 | 100 | py |
flaxformer | flaxformer-main/flaxformer/architectures/bert/configs.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Base configuration."""
import abc
import dataclasses
from flax import linen as nn
from jax import numpy as jnp
from flaxformer.components import initializers
from flaxformer.types import Initializer # pylint: disable=g-multiple-import
@dataclasses.dataclass
class BertConfig(abc.ABC):
"""BERT configuration base dataclass."""
# The size of embeddings/hidden layers, and the size of MLP intermediates.
hidden_size: int
intermediate_dim: int
# The total number of layers and the number of attention heads in each layer.
num_hidden_layers: int
num_attention_heads: int
# The size of the input/output vocabulary, the maximum supported length, and
# the number of segments (type
vocab_size: int
max_length: int # `max_position_embeddings` in legacy BertConfig.
num_segments: int # `type_vocab_size` in legacy BertConfig.
# Initializers, activations and dtypes for all the layers.
# Legacy BertConfig has `initializer_range` which can be matched using
# initializers.truncated_normal(stddev=initializer_range).
bias_init: Initializer
kernel_init: Initializer
layer_norm_epsilon: float
dtype: jnp.dtype
# TODO: Support a `hidden_activation` config for the MLP.
# `hidden_act` in legacy BertConfig.
dropout_rate: float
# TODO: Support a `attention_probs_dropout_rate` config.
@dataclasses.dataclass
class BertBaseConfig(BertConfig):
"""BERT Base configuration."""
hidden_size: int = 768
intermediate_dim: int = 3072
num_hidden_layers: int = 12
num_attention_heads: int = 12
vocab_size: int = 30522
max_length: int = 512
num_segments: int = 2
bias_init: Initializer = nn.initializers.zeros
kernel_init: Initializer = initializers.truncated_normal(stddev=0.02)
layer_norm_epsilon: float = 1e-12
dtype: jnp.dtype = jnp.float32
# TODO: Set `hidden_activation` to jax.nn.gelu.
dropout_rate: float = 0.1
# TODO: Set `attention_probs_dropout_rate` to 0.1.
@dataclasses.dataclass
class BertLargeConfig(BertConfig):
"""BERT Large configuration."""
hidden_size: int = 1024
intermediate_dim: int = 4096
num_hidden_layers: int = 24
num_attention_heads: int = 16
vocab_size: int = 30522
max_length: int = 512
num_segments: int = 2
bias_init: Initializer = nn.initializers.zeros
kernel_init: Initializer = initializers.truncated_normal(stddev=0.02)
layer_norm_epsilon: float = 1e-12
dtype: jnp.dtype = jnp.float32
# TODO: Set `hidden_activation` to jax.nn.gelu.
dropout_rate: float = 0.1
# TODO: Set `attention_probs_dropout_rate` to 0.1.
| 3,132 | 29.417476 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_1_0.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of t5_base version 1.0."""
import abc
import dataclasses
import functools
from jax import numpy as jnp
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.architectures.t5 import t5_common_layers
DROPOUT_RATE = 0.0
ACTIVATIONS = ('relu',)
VOCAB_SIZE = 32128
@dataclasses.dataclass(frozen=True)
class Config(abc.ABC):
"""T5 configuration base dataclass."""
# The size of the embeddings, hidden layers, and intermediates.
embedding_dim: int
mlp_dim: int
head_dim: int
# The total number of layers and the number of attention heads in each layer.
num_heads: int
num_encoder_layers: int
num_decoder_layers: int
SMALL_CONFIG = Config(
embedding_dim=512,
head_dim=64,
mlp_dim=2048,
num_heads=8,
num_encoder_layers=6,
num_decoder_layers=6,
)
BASE_CONFIG = Config(
embedding_dim=768,
head_dim=64,
mlp_dim=3072,
num_heads=12,
num_encoder_layers=12,
num_decoder_layers=12,
)
LARGE_CONFIG = Config(
embedding_dim=1024,
head_dim=64,
mlp_dim=4096,
num_heads=16,
num_encoder_layers=24,
num_decoder_layers=24,
)
# Also known as T5-3B.
XL_CONFIG = Config(
embedding_dim=1024,
head_dim=128,
mlp_dim=16384,
num_heads=32,
num_encoder_layers=24,
num_decoder_layers=24,
)
# Also known as T5-11B.
XXL_CONFIG = Config(
embedding_dim=1024,
head_dim=128,
mlp_dim=65536,
num_heads=128,
num_encoder_layers=24,
num_decoder_layers=24,
)
def encoder_decoder(embedding_dim,
mlp_dim,
num_heads,
num_encoder_layers,
num_decoder_layers,
head_dim,
vocabulary_size=VOCAB_SIZE,
dropout_rate=DROPOUT_RATE,
activations=ACTIVATIONS,
dtype=jnp.bfloat16):
"""Create a T5-1.0 style encoder-decoder stack.
Args:
embedding_dim: The size of the embedding for this stack.
mlp_dim: The dimension of the multilayer perceptron.
num_heads: The number of attention heads.
num_encoder_layers: The number of encoder layers to create.
num_decoder_layers: The number of decoder layers to create.
head_dim: The dimension of the attention head.
vocabulary_size: The size of the embedding vocabulary.
dropout_rate: The dropout rate. Set to 0.0 to turn off dropout.
activations: The activations to use for the MLP.
dtype: The dtype for all layers in this encoder-decoder.
Returns:
A T5-style encoder-decoder.
"""
decoder_factory = functools.partial(
t5_common_layers.decoder,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
num_layers=num_decoder_layers,
dropout_rate=dropout_rate,
activations=activations,
dtype=dtype)
encoder_factory = functools.partial(
t5_common_layers.encoder,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
num_layers=num_encoder_layers,
dropout_rate=dropout_rate,
activations=activations,
dtype=dtype)
embedding_factory = functools.partial(
t5_common_layers.embedding,
vocabulary_size=vocabulary_size,
embedding_dim=embedding_dim,
dtype=dtype)
return t5_architecture.EncoderDecoder(
encoder_factory=encoder_factory,
decoder_factory=decoder_factory,
shared_token_embedder_factory=embedding_factory,
dtype=dtype) # pytype: disable=wrong-keyword-args
| 4,088 | 26.628378 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_architecture_test_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties for t5_architecture_test, and related tests."""
from typing import Any, Optional
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from jax import numpy as jnp
from flaxformer.architectures.t5 import parallel_fused_decoder
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
EMBEDDING_INIT = nn.initializers.normal(stddev=1.0)
RELPOS_BIAS_INIT = nn.initializers.variance_scaling(1.0, 'fan_avg', 'uniform')
ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal'
)
MLP_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal'
)
FINAL_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal'
)
BIAS_INIT = nn.initializers.normal(stddev=1e-6)
def make_token_emb1(vocab_size, dtype, features=13):
"""First test configuration for token embeddings."""
return embedding.Embed( # pytype: disable=wrong-arg-types # jax-types
num_embeddings=vocab_size,
features=features,
cast_input_dtype=jnp.int32,
dtype=dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=EMBEDDING_INIT,
name='token_embedder',
)
def make_attention1(num_attn_heads, dtype, use_rotary_embedding=False):
"""First test configuration for attention."""
return dense_attention.MultiHeadDotProductAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
use_rotary_embedding=use_rotary_embedding,
)
def make_mlp1(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
activations=('relu',),
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype,
)
def _make_relative_position_bias(
num_attn_heads: int, dtype: Any
) -> relative_position_biases.RelativePositionBiases:
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=128,
num_heads=num_attn_heads,
dtype=dtype,
embedding_init=RELPOS_BIAS_INIT,
)
def make_config1(
scan_layers=False, layer_remat='legacy', sow_intermediates=False
) -> t5_architecture.EncoderDecoder:
"""Returns an EncoderDecoder."""
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
scanned=scan_layers,
sow_intermediates=sow_intermediates,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
scanned=scan_layers,
sow_intermediates=sow_intermediates,
)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
scan_layers=scan_layers,
layer_remat=layer_remat,
sow_intermediates=sow_intermediates,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
scan_layers=scan_layers,
layer_remat=layer_remat,
sow_intermediates=sow_intermediates,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
scan_layers=scan_layers,
)
def make_parallel_transformer_config() -> t5_architecture.EncoderDecoder:
"""Returns an EncoderDecoder with parallel=True."""
dtype = jnp.bfloat16
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
parallel=True,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
parallel=True,
)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
def make_parallel_fused_transformer_config(
use_aqt: bool = False,
weight_params: Optional[aqt.QuantOps.WeightParams] = None,
possibly_use_quantized_vars: bool = False,
is_quant_finetune_mode: bool = False,
) -> t5_architecture.DecoderOnly:
"""Returns an EncoderDecoder with parallel=True."""
dtype = jnp.bfloat16
num_attn_heads = 8
num_features = 13
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_mq_attention(num_attn_heads, dtype):
"""First test configuration for attention."""
return dense_attention.MultiQueryDotProductAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
out_features=num_features,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
rescale_logits=True,
)
def _make_fusion_mlp(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
out_dim=13,
precomputed_intermediates=True,
fuse_kernels=False,
activations=('swish', 'linear'),
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return parallel_fused_decoder.ParallelFusedDecoderLayer(
self_attention=_make_mq_attention(num_attn_heads, dtype),
mlp=_make_fusion_mlp(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
use_aqt=use_aqt,
weight_params=weight_params,
possibly_use_quantized_vars=possibly_use_quantized_vars,
is_quant_finetune_mode=is_quant_finetune_mode,
)
def _make_output_logits():
return dense.DenseGeneral( # pytype: disable=wrong-arg-types # jax-types
4,
dtype=dtype,
kernel_init=FINAL_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
)
def _embedder():
return make_token_emb1(2_000, dtype, num_features)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=_embedder,
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=_make_output_logits,
dtype=dtype,
)
return t5_architecture.DecoderOnly(
decoder_factory=_make_decoder,
)
# TODO: DRY up with above configs.
def make_config2_shared_relative_position_bias() -> (
t5_architecture.EncoderDecoder
):
"""Returns an EncoderDecoder with shared relative position biases."""
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is not None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relative_position_bias=shared_relative_position_bias,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is not None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relative_position_bias=shared_relative_position_bias,
)
def _make_encoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
dtype=dtype,
)
def _make_decoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
shared_relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
dtype=dtype,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
# TODO: DRY up with above configs.
def make_config3_shared_token_embedder() -> t5_architecture.EncoderDecoder:
"""Returns an EncoderDecoder with a shared token embedder."""
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
sow_intermediates = True
capture_gradients = True
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
sow_intermediates=sow_intermediates,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
sow_intermediates=sow_intermediates,
)
def _make_encoder(*, shared_token_embedder=None):
return t5_architecture.Encoder(
num_layers=3,
shared_token_embedder=shared_token_embedder,
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
sow_intermediates=sow_intermediates,
capture_gradients=capture_gradients,
)
def _make_decoder(*, shared_token_embedder=None):
return t5_architecture.Decoder(
num_layers=2,
shared_token_embedder=shared_token_embedder,
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
sow_intermediates=sow_intermediates,
capture_gradients=capture_gradients,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: make_token_emb1(71, dtype),
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
def test_make_decoder_only1() -> t5_architecture.DecoderOnly:
"""Returns a DecoderOnly."""
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=None,
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
)
def make_output_logits():
return dense.DenseGeneral( # pytype: disable=wrong-arg-types # jax-types
4,
dtype=dtype,
kernel_init=FINAL_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
)
def _make_decoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(4, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=make_output_logits,
dtype=dtype,
)
return t5_architecture.DecoderOnly(decoder_factory=_make_decoder)
| 17,616 | 33.408203 | 105 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_1_1.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of t5_base version 1.1."""
import abc
import dataclasses
import functools
from jax import numpy as jnp
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.architectures.t5 import t5_common_layers
from flaxformer.components import dense
DROPOUT_RATE = 0.0
ACTIVATIONS = ('gelu', 'linear')
VOCAB_SIZE = 32128
HEAD_DIM = 64
@dataclasses.dataclass(frozen=True)
class Config(abc.ABC):
"""T5 configuration base dataclass."""
# The size of the embeddings, hidden layers, and intermediates.
embedding_dim: int
mlp_dim: int
# The total number of layers and the number of attention heads in each layer.
num_heads: int
num_encoder_layers: int
num_decoder_layers: int
SMALL_CONFIG = Config(
embedding_dim=512,
mlp_dim=1024,
num_heads=6,
num_encoder_layers=8,
num_decoder_layers=8,
)
BASE_CONFIG = Config(
embedding_dim=768,
mlp_dim=2048,
num_heads=12,
num_encoder_layers=12,
num_decoder_layers=12,
)
LARGE_CONFIG = Config(
embedding_dim=1024,
mlp_dim=2816,
num_heads=16,
num_encoder_layers=24,
num_decoder_layers=24,
)
# Also known as T5-3B.
XL_CONFIG = Config(
embedding_dim=2048,
mlp_dim=5120,
num_heads=32,
num_encoder_layers=24,
num_decoder_layers=24,
)
# Also known as T5-11B.
XXL_CONFIG = Config(
embedding_dim=4096,
mlp_dim=10240,
num_heads=64,
num_encoder_layers=24,
num_decoder_layers=24,
)
def encoder_decoder(embedding_dim,
mlp_dim,
num_heads,
num_encoder_layers,
num_decoder_layers,
head_dim=HEAD_DIM,
vocabulary_size=VOCAB_SIZE,
dropout_rate=DROPOUT_RATE,
activations=ACTIVATIONS,
dtype=jnp.bfloat16):
"""Create a T5-1.1 style encoder-decoder stack.
Args:
embedding_dim: The size of the embedding for this stack.
mlp_dim: The dimension of the multilayer perceptron.
num_heads: The number of attention heads.
num_encoder_layers: The number of encoder layers to create.
num_decoder_layers: The number of decoder layers to create.
head_dim: The dimension of the attention head.
vocabulary_size: The size of the embedding vocabulary.
dropout_rate: The dropout rate. Set to 0.0 to turn off dropout.
activations: The activations to use for the MLP.
dtype: The dtype for all layers in this encoder-decoder.
Returns:
A T5-style encoder-decoder.
"""
# T5 1.1 has decoupled embeddings, so we create a separate output logits
# factory.
output_logits_factory = functools.partial(
dense.DenseGeneral,
use_bias=False,
features=vocabulary_size,
dtype='float32',
kernel_init=t5_common_layers.MLP_KERNEL_INIT,
bias_init=t5_common_layers.BIAS_INIT,
)
decoder_factory = functools.partial(
t5_common_layers.decoder,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
num_layers=num_decoder_layers,
dropout_rate=dropout_rate,
activations=activations,
output_logits_factory=output_logits_factory,
dtype=dtype)
encoder_factory = functools.partial(
t5_common_layers.encoder,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
num_layers=num_encoder_layers,
dropout_rate=dropout_rate,
activations=activations,
dtype=dtype)
embedding_factory = functools.partial(
t5_common_layers.embedding,
vocabulary_size=vocabulary_size,
embedding_dim=embedding_dim,
dtype=dtype)
return t5_architecture.EncoderDecoder(
encoder_factory=encoder_factory,
decoder_factory=decoder_factory,
shared_token_embedder_factory=embedding_factory,
dtype=dtype) # pytype: disable=wrong-keyword-args
| 4,442 | 27.664516 | 79 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5_architecture."""
import functools
from absl.testing import absltest
import jax
from jax import random
import numpy as np
from flaxformer import sharding
from flaxformer import testing_utils
from flaxformer.architectures.t5 import t5_architecture_test_utils as t5_test_utils
expected_files = testing_utils.ExpectedJsonFiles(
'flaxformer/architectures/t5/testdata')
check_params = expected_files.check_params_shapes_only
class EncoderDecoderTest(absltest.TestCase):
def test_encoder_shapes_with_relative_attention_per_layer(self):
transformer = t5_test_utils.make_config1()
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer.encode,
)
reformatted = transformer.apply({},
variables['params'],
method=transformer.to_save_format)
check_params(reformatted, 'encoder_shapes_per_layer_relpos_bias.json')
self.assertEqual(output.shape, (2, 4, 13))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
inputs,
enable_dropout=False,
method=transformer.encode,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_parallel_transformer_config(self):
transformer = t5_test_utils.make_parallel_transformer_config()
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer.encode,
)
reformatted = transformer.apply({},
variables['params'],
method=transformer.to_save_format)
expected_files.check_params(reformatted,
'parallel_transformer_encoder_shapes.json')
self.assertEqual(output.shape, (2, 4, 13))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
inputs,
enable_dropout=False,
method=transformer.encode,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_encode_shared_relative_position_bias(self):
transformer = t5_test_utils.make_config2_shared_relative_position_bias()
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer.encode,
)
reformatted = transformer.apply({},
variables['params'],
method=transformer.to_save_format)
check_params(reformatted, 'encoder_shapes_shared_relpos_bias.json')
self.assertEqual(output.shape, (2, 4, 13))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
inputs,
enable_dropout=False,
method=transformer.encode,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_encoder_example_packing(self):
transformer = t5_test_utils.make_config1()
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 0],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer.encode,
)
encoder_input_tokens_packed = np.array([[101, 183, 20, 75, 101, 392, 19]],
dtype=np.int32)
encoder_segment_ids = np.array([[0, 0, 0, 0, 1, 1, 1]], dtype=np.int32)
encoder_input_positions = np.array([[0, 1, 2, 3, 0, 1, 2]], dtype=np.int32)
output_packed = transformer.apply(
variables,
encoder_input_tokens_packed,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_input_positions,
enable_dropout=False,
method=transformer.encode,
)
# Check that the first element matches, which is entire first batch of the
# padded setup, and the first 3 "tokens" of the packed example.
np.testing.assert_allclose(
output[0, :, :], output_packed[0, 0:4, :], rtol=1e-4)
# Check that the second element matches, which is the first 3 "tokens" of
# the padded example's second batch, and the last 3 of tokens the packed
# example's first batch.
np.testing.assert_allclose(
output[1, 0:3, :], output_packed[0, 4:7, :], rtol=1e-4, atol=1e-4)
def test_scan_and_remat(self):
"""Tests if encoder returns the same output for different scan/remat."""
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
transformer1 = t5_test_utils.make_config1(
scan_layers=False, layer_remat='none')
output1, _ = transformer1.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer1.encode,
)
transformer2 = t5_test_utils.make_config1(
scan_layers=False, layer_remat='minimal')
output2, _ = transformer2.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer2.encode,
)
transformer3 = t5_test_utils.make_config1(
scan_layers=False, layer_remat='full')
output3, _ = transformer3.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer3.encode,
)
transformer4 = t5_test_utils.make_config1(
scan_layers=True, layer_remat='minimal')
output4, _ = transformer4.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer4.encode,
)
transformer5 = t5_test_utils.make_config1(
scan_layers=True, layer_remat='full')
output5, _ = transformer5.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer5.encode,
)
# Check scan_layers=False results
np.testing.assert_allclose(output1, output2, rtol=2e-4)
np.testing.assert_allclose(output1, output3, atol=1e-5, rtol=1.5e-5)
# Check scan_layers=True results
np.testing.assert_allclose(output4, output5, rtol=1.5e-5)
def test_scan_axis_annotations(self):
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
transformer = t5_test_utils.make_config1(
scan_layers=True, layer_remat='minimal')
variables = transformer.init(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer.encode,
)
# Check that the code can run when `params_axes` is not mutable too.
transformer.apply(
variables,
encoder_input_tokens,
enable_dropout=False,
method=transformer.encode,
)
sharding.check_params_and_axis_names_match(variables)
for axis_names in jax.tree_leaves(sharding.get_axis_names(variables)):
for name in axis_names:
self.assertIn(
name, {
'embed', 'joined_kv', 'heads', 'head_dim', 'relpos_buckets',
'mlp', 'vocab', 'layers'
},
msg='unrecognized axis in variable')
expected_files.check_params_and_axes(
variables['params'],
variables['params_axes'],
'encoder_scanned_per_layer_relpos_bias.json',
)
def test_entire_transformer_shared_embeds(self):
encoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_target_tokens = np.zeros((16, 8), dtype=np.float32)
transformer = t5_test_utils.make_config3_shared_token_embedder()
output, variables = transformer.init_with_output(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
params = variables['params']
reformatted = transformer.apply({},
params,
method=transformer.to_save_format)
check_params(reformatted, 'encoder_decoder_shared_embedding_shapes.json')
self.assertEqual(output.shape, (16, 8, 71))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_axis_names(self):
encoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_target_tokens = np.zeros((16, 8), dtype=np.float32)
transformer = t5_test_utils.make_config3_shared_token_embedder()
variables = jax.eval_shape(
functools.partial(transformer.init, enable_dropout=False),
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
)
sharding.check_params_and_axis_names_match(variables)
for axis_names in jax.tree_leaves(sharding.get_axis_names(variables)):
for name in axis_names:
self.assertIn(
name, {
'embed', 'joined_kv', 'heads', 'head_dim', 'relpos_buckets',
'mlp', 'vocab'
},
msg='unrecognized axis in variable')
def test_sow_intermediates(self):
"""Tests intermediate tracking using `Module.sow` in the EncoderDecoder."""
encoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_target_tokens = np.zeros((16, 8), dtype=np.float32)
transformer = t5_test_utils.make_config3_shared_token_embedder()
variables = transformer.init(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
_, modified_variables = transformer.apply(
{'params': variables['params']},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
mutable='intermediates',
)
# Note: the 'intermediates' collection must be set to mutable in order to
# get the tracked values back in `modified_variables`.
# Check the shape of tracked intermediates.
intermediates = modified_variables['intermediates']
encoder_input_tokens = intermediates['encoder']['input_tokens_ids']
self.assertLen(encoder_input_tokens, 1)
self.assertEqual(encoder_input_tokens[0].shape, (16, 8))
final_encoder_outputs = intermediates['encoder']['final_encoder_outputs']
self.assertLen(final_encoder_outputs, 1)
self.assertEqual(final_encoder_outputs[0].shape, (16, 8, 13))
pre_logits = intermediates['decoder']['pre_logits_layer']
self.assertLen(pre_logits, 1)
self.assertEqual(pre_logits[0].shape, (16, 8, 13))
logits = intermediates['decoder']['logits']
self.assertLen(logits, 1)
self.assertEqual(logits[0].shape, (16, 8, 71))
encoder_embedded_inputs = intermediates['encoder']['embedder']['output']
self.assertLen(encoder_embedded_inputs, 1)
self.assertEqual(encoder_embedded_inputs[0].shape, (16, 8, 13))
decoder_embedded_inputs = intermediates['decoder']['embedder']['output']
self.assertLen(decoder_embedded_inputs, 1)
self.assertEqual(decoder_embedded_inputs[0].shape, (16, 8, 13))
encoder_num_layers = 3
decoder_num_layers = 2
for i in range(encoder_num_layers):
activations = intermediates['encoder'][f'layers_{i}']['activations']
self.assertLen(activations, 1)
self.assertEqual(activations[0].shape, (16, 8, 13))
for i in range(decoder_num_layers):
activations = intermediates['decoder'][f'layers_{i}']['activations']
self.assertLen(activations, 1)
self.assertEqual(activations[0].shape, (16, 8, 13))
def test_sow_intermediates_with_scan_model(self):
"""Tests if we obtain intermediates when using scan."""
rs = np.random.RandomState(0)
encoder_input_tokens = rs.randint(0, 71, size=(16, 8), dtype=np.int32)
decoder_input_tokens = rs.randint(0, 71, size=(16, 7), dtype=np.int32)
decoder_target_tokens = rs.randint(0, 71, size=(16, 7), dtype=np.int32)
model = t5_test_utils.make_config1(
scan_layers=True, layer_remat='full', sow_intermediates=True)
variables = model.init(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
_, modified_variables = model.apply(
{'params': variables['params']},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
mutable=['intermediates'])
intermediates = modified_variables['intermediates']
encoder_layer_outputs = intermediates['encoder']['encoder']['activations']
# Shape: [batch_size, seq_len, num_layers, hidden_size]
self.assertEqual(encoder_layer_outputs[0].shape, (16, 8, 3, 13))
decoder_layer_outputs = intermediates['decoder']['decoder']['activations']
# Shape: [batch_size, seq_len, num_layers, hidden_size]
self.assertEqual(decoder_layer_outputs[0].shape, (16, 7, 2, 13))
def test_capture_input_gradients(self):
"""Tests that the input grads are captured."""
rs = np.random.RandomState(0) # Need nonzero inputs to get nonzero grads.
encoder_input_tokens = rs.randint(0, 71, size=(16, 8), dtype=np.int32)
decoder_input_tokens = rs.randint(0, 71, size=(16, 8), dtype=np.int32)
decoder_target_tokens = rs.randint(0, 71, size=(16, 8), dtype=np.int32)
transformer = t5_test_utils.make_config3_shared_token_embedder()
variables = transformer.init(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
# On initialization there should be empty grads.
self.assertContainsSubset(('grads',), variables)
def fake_loss(variables, encoder_input_tokens, decoder_input_tokens,
decoder_target_tokens):
"""Returns a loss."""
output, _ = transformer.apply(
variables,
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
mutable=['grads']) # Needed to enable gradient capture.
return output.sum()
grad_fn = jax.grad(fake_loss)
grads_variables = grad_fn(variables, encoder_input_tokens,
decoder_input_tokens, decoder_target_tokens)
grads = grads_variables['grads']
encoder_embedder_grad = grads['encoder']['embedder']['output_grad']
self.assertEqual(encoder_embedder_grad.shape, (16, 8, 13))
self.assertNotAlmostEqual(encoder_embedder_grad.sum(), 0.0)
decoder_embedder_grad = grads['decoder']['embedder']['output_grad']
self.assertEqual(decoder_embedder_grad.shape, (16, 8, 13))
self.assertNotAlmostEqual(decoder_embedder_grad.sum(), 0.0)
class DecoderOnlyTest(absltest.TestCase):
def test_decoder_shapes_per_layer_relpos_bias(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = t5_test_utils.test_make_decoder_only1()
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_per_layer_relpos_bias.json')
self.assertEqual(output.shape, (2, 3, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_decoder_shapes_fused_parallel(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = t5_test_utils.make_parallel_fused_transformer_config()
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32,
)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_fused_parallel.json')
self.assertEqual(output.shape, (2, 3, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
output = output.astype(np.float32)
output2 = output2.astype(np.float32)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_decoder_shapes_explicit_attention_map(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = t5_test_utils.make_parallel_fused_transformer_config()
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32,
)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=None, # not needed if attention mask is provided.
# By specifying the attention mask explicitly we can mix e.g., prefix
# LM with bidirectional LM, as done below.
decoder_attention_mask=[
[[
[1, 1, 0],
[1, 1, 0],
[1, 1, 1],
]],
[[
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
]],
],
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_fused_parallel.json')
self.assertEqual(output.shape, (2, 3, 4))
if __name__ == '__main__':
absltest.main()
| 21,698 | 35.653716 | 83 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_common_layers_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5_layers."""
from absl.testing import absltest
from flax import linen as nn
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer.architectures.t5 import t5_common_layers
from flaxformer.components import embedding
EMBEDDING_DIM = 7
MLP_DIM = 32
NUM_HEADS = 2
NUM_LAYERS = 3
ACTIVATIONS = ('gelu',)
DROPOUT_RATE = 0.14
HEAD_DIM = 4
class T5BaseTest(absltest.TestCase):
def test_encoder_layer(self):
layer = t5_common_layers.encoder_layer(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
activations=ACTIVATIONS,
dropout_rate=DROPOUT_RATE)
inputs = np.array(
[
# Batch 1.
[[101, 183, 20, 75, 10]],
# Batch 2.
[[101, 392, 19, 7, 20]],
],
dtype=np.int32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
)
input_inner_dim = 5
# Validate that the QKV dims are being set appropriately.
attention_params = variables['params']['attention']
expected_qkv_shape = [input_inner_dim, HEAD_DIM * NUM_HEADS]
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being set appropriately.
mlp_params = variables['params']['mlp']
np.testing.assert_equal([input_inner_dim, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, input_inner_dim],
np.shape(mlp_params['wo']['kernel']))
# Validate that the activations are being set.
self.assertEqual(ACTIVATIONS, layer.mlp.activations)
# Validate the dropout rate is being respected.
self.assertEqual(DROPOUT_RATE, layer.attention.dropout_rate)
self.assertEqual(DROPOUT_RATE, layer.mlp.intermediate_dropout_rate)
self.assertEqual(0.0, layer.mlp.final_dropout_rate)
def test_decoder_layer(self):
layer = t5_common_layers.decoder_layer(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
activations=ACTIVATIONS,
dropout_rate=DROPOUT_RATE)
targets = np.array(
# Batch 1.
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]],
# Batch 2.
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]
],
dtype=np.float32)
encoded = np.array(
# Batch 1.
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
# Batch 2.
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]
],
dtype=np.float32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
targets,
enable_dropout=False,
encoded=encoded,
)
input_inner_dim = 2
# Validate that the QKV dims are being set appropriately.
expected_qkv_shape = [input_inner_dim, HEAD_DIM * NUM_HEADS]
attention_params = variables['params']['self_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
attention_params = variables['params']['encoder_decoder_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being set appropriately.
mlp_params = variables['params']['mlp']
np.testing.assert_equal([input_inner_dim, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, input_inner_dim],
np.shape(mlp_params['wo']['kernel']))
# Validate that the activations are being set.
self.assertEqual(ACTIVATIONS, layer.mlp.activations)
# Validate the dropout rate is being respected.
self.assertEqual(DROPOUT_RATE, layer.self_attention.dropout_rate)
self.assertEqual(DROPOUT_RATE, layer.mlp.intermediate_dropout_rate)
self.assertEqual(0.0, layer.mlp.final_dropout_rate)
def test_encoder(self):
shared_embedder = embedding.Embed(
num_embeddings=5,
features=EMBEDDING_DIM,
cast_input_dtype=jnp.int32,
dtype=jnp.float32,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
name='token_embedder')
layer = t5_common_layers.encoder(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
num_layers=NUM_LAYERS,
shared_token_embedder=shared_embedder,
activations=ACTIVATIONS,
dropout_rate=DROPOUT_RATE)
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
)
# Validate that there are 3 encoder layers.
self.assertContainsSubset(['layers_0', 'layers_1', 'layers_2'],
list(variables['params'].keys()))
# Validate that the QKV dims are being passed appropriately.
attention_params = variables['params']['layers_2']['attention']
expected_qkv_shape = [EMBEDDING_DIM, HEAD_DIM * NUM_HEADS]
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being passed appropriately.
mlp_params = variables['params']['layers_2']['mlp']
np.testing.assert_equal([EMBEDDING_DIM, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, EMBEDDING_DIM],
np.shape(mlp_params['wo']['kernel']))
def test_decoder(self):
shared_embedder = embedding.Embed(
num_embeddings=5,
features=EMBEDDING_DIM,
cast_input_dtype=jnp.int32,
dtype=jnp.float32,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
name='token_embedder')
layer = t5_common_layers.decoder(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
num_layers=NUM_LAYERS,
shared_token_embedder=shared_embedder,
activations=('relu',),
dropout_rate=0.1)
decoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75, 10],
# Batch 2.
[101, 392, 19, 7, 20],
],
dtype=np.int32)
encoder_outputs = np.array(
# Batch 1.
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]],
# Batch 2.
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]
],
dtype=np.float32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
encoder_outputs,
decoder_input_tokens,
enable_dropout=False,
)
# Validate that there are 3 encoder layers.
self.assertContainsSubset(['layers_0', 'layers_1', 'layers_2'],
list(variables['params'].keys()))
# Validate that the QKV dims are being passed appropriately.
expected_qkv_shape = [EMBEDDING_DIM, HEAD_DIM * NUM_HEADS]
attention_params = variables['params']['layers_2']['self_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
encoder_inner_dim = 2
expected_encoder_kv_shape = [encoder_inner_dim, HEAD_DIM * NUM_HEADS]
attention_params = variables['params']['layers_2'][
'encoder_decoder_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_encoder_kv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_encoder_kv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being passed appropriately.
mlp_params = variables['params']['layers_2']['mlp']
np.testing.assert_equal([EMBEDDING_DIM, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, EMBEDDING_DIM],
np.shape(mlp_params['wo']['kernel']))
if __name__ == '__main__':
absltest.main()
| 10,371 | 36.444043 | 74 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains "architecture" classes for T5 models.
These are combinators which assemble components (LayerNorm, MLP, etc.) into
networks.
"""
from __future__ import annotations
import enum
import inspect
from typing import Any, Callable, Optional, Tuple, Union
from flax import linen as nn
import jax
import jax.numpy as jnp
from typing_extensions import Protocol
from flaxformer import activation_partitioning
from flaxformer import transformer_common as common
from flaxformer.architectures.common import param_remapping
from flaxformer.components import embedding
from flaxformer.components import relative_position_biases
from flaxformer.components import rich_attention_position_scores
from flaxformer.components import transforms
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
# pylint: disable=not-callable
# pytype: disable=not-callable
class MakeEncoderLayerFn(Protocol):
"""Signature for functions that make an encoder layer."""
def __call__(
self, *,
shared_relative_position_bias: Optional[nn.Module]) -> EncoderLayer:
"""Makes an encoder layer.
Args:
shared_relative_position_bias: Relative position bias shared for all
layers within the encoder, which is the result of calling
`shared_relative_position_bias_factory` at the top-level model. Due to
Flax limitations, we need to pass this in as an attribute to modules.
Please use this argument instead of using a Python closure.
Returns:
Encoder layer.
"""
class MakeDecoderLayerFn(Protocol):
"""Signature for functions that make a decoder layer."""
def __call__(
self, *,
shared_relative_position_bias: Optional[nn.Module]) -> DecoderLayer:
"""Makes a decoder layer.
Args:
shared_relative_position_bias: Relative position bias shared for all
layers within the decoder, which is the result of calling
`shared_relative_position_bias_factory` at the top-level model. Due to
Flax limitations, we need to pass this in as an attribute to modules.
Please use this argument instead of using a Python closure.
Returns:
Decoder layer.
"""
class MakeEncoderFn(Protocol):
"""Signature for functions that will make a low-level Encoder."""
def __call__(
self,
*,
shared_token_embedder: Optional[embedding.Embedder[Array]] = None,
spmd_annotations: Any = None,
) -> Encoder:
"""Makes a low-level Encoder instance.
Args:
shared_token_embedder: Shared token embedder instance, which should be
passed to the returned module. If this is non-None, you should use it
instead of providing your own token embedder.
spmd_annotations: Optional SPMD annotations for scanned layers.
Returns:
Encoder instance.
"""
class MakeDecoderFn(Protocol):
"""Signature for functions that will make a low-level Decoder."""
def __call__(
self,
*,
shared_token_embedder: Optional[embedding.Embedder[Array]] = None,
spmd_annotations: Any = None,
) -> Decoder:
"""Makes a low-level Decoder instance.
Args:
shared_token_embedder: Shared token embedder instance, which should be
passed to the returned module. If this is non-None, you should use it
instead of providing your own token embedder.
spmd_annotations: Optional SPMD annotations for scanned layers.
Returns:
Decoder instance.
"""
@enum.unique
class LayerRemat(enum.Enum):
"""How to apply per-layer jax.remat for recomputation in the backward pass.
Attributes:
NONE: No use of jax.remat.
LEGACY: Reverts to prior behavior for compatibility with existing configs,
i.e., use FULL when scanning over layers and NONE otherwise.
FULL: Recompute the whole layer in backprop.
MINIMAL: Recompute only non-matmul ops in backprop.
"""
NONE = 'none'
LEGACY = 'legacy'
FULL = 'full'
MINIMAL = 'minimal'
_LayerRematOrStr = Union[LayerRemat, str]
def maybe_remat(
lyrf: Callable[[], nn.Module],
layer_remat: _LayerRematOrStr,
scan_layers: bool,
static_argnums: Tuple[int, ...],
) -> Callable[[], nn.Module]:
"""Maybe apply jax.remat with the indicated policy to a layer factory.
Args:
lyrf: Encoder or decoder layer factory.
layer_remat: Config for per-layer remat.
scan_layers: Whether to use jax.lax.scan for the stack of layers.
static_argnums: The static_argnums to use for the jax.remat call.
Returns:
Potentially remat-wrapped layer factory.
"""
# TODO: remove this conversion after all callers use the enum
layer_remat = LayerRemat(layer_remat)
if layer_remat == LayerRemat.LEGACY:
layer_remat = LayerRemat.FULL if scan_layers else LayerRemat.NONE
if layer_remat == LayerRemat.NONE:
return lyrf
if layer_remat == LayerRemat.FULL:
remat_policy = None
elif layer_remat == LayerRemat.MINIMAL:
remat_policy = jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims
else:
raise ValueError(f'Unknown LayerRemat value: {layer_remat}')
return transforms.factory_remat(
lyrf,
concrete=False,
prevent_cse=False,
static_argnums=static_argnums,
policy=remat_policy,
)
class EncoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""Transformer encoder layer.
Attributes:
attention: The attention module.
mlp: The MLP module, applied after attention.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relative_position_bias_factory: A callable that returns relative position
bias instances. This should only be used for per-layer relative position
biases; please use `shared_relative_position_bias` if they are shared
among layers.
shared_relative_position_bias: Shared relative position bias module, usually
owned by the Encoder.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the encoder layer.
parallel: whether to call attention and mlp in parallel
sow_intermediates: whether to track intermediates using Module.sow.
scanned: whether this layer is being scanned over.
"""
attention: nn.Module
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relative_position_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relative_position_bias: Optional[nn.Module] = None
activation_partitioning_dims: int = 1
parallel: bool = False
sow_intermediates: bool = False
scanned: bool = False
def setup(self):
if (self.relative_position_bias_factory is not None and
self.shared_relative_position_bias is not None):
raise ValueError(
'Please set at most one of relative_position_bias_factory and'
' shared_relative_position_bias. (They can both be None however, e.g.'
' for absolute position embeds.)'
)
self.relpos_bias = (
self.relative_position_bias_factory()
if self.relative_position_bias_factory is not None else
self.shared_relative_position_bias)
if self.parallel:
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
else:
self.pre_attention_layer_norm = self.layer_norm_factory()
self.pre_mlp_layer_norm = self.layer_norm_factory()
self.post_attention_dropout = self.dropout_factory()
self.post_mlp_dropout = self.dropout_factory()
def get_bias(self, layer_input: Array) -> Optional[Array]:
if not self.relpos_bias:
return None
if isinstance(
self.relpos_bias, relative_position_biases.RelativePositionBiases
):
encoder_bias = self.relpos_bias(
layer_input.shape[-2], layer_input.shape[-2], bidirectional=True
)
elif isinstance(
self.relpos_bias, rich_attention_position_scores.RichAttentionApi
):
encoder_bias = self.relpos_bias(
layer_input, layer_input, bidirectional=True
)
else:
raise TypeError(
f'{type(self.relpos_bias)} is not a supported relative position '
f'bias factory.\nInstance value: {self.relpos_bias}'
)
return encoder_bias
def __call__(self,
inputs: Array,
encoder_mask: Optional[Array] = None,
*,
logit_mask: Optional[Array] = None,
enable_dropout: bool = True):
"""Applies a single T5 encoder layer.
Args:
inputs: input data [batch, length, emb_dim].
encoder_mask: encoder self-attention mask.
logit_mask: encoder logits mask.
enable_dropout: Enables dropout if set to True.
Returns:
output after transformer encoder block.
"""
layer_input = inputs
del inputs
# Shared relative position embedding attention biases.
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.parallel:
x = self.layer_norm(layer_input)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
encoder_bias = self.get_bias(layer_input=x)
y = (
self.attention(
x, x, encoder_mask, encoder_bias, enable_dropout=enable_dropout) +
self.mlp(x, enable_dropout=enable_dropout))
y *= 2**-0.5
y = layer_input + self.dropout(y, deterministic=not enable_dropout)
else:
# Attention block.
x = self.pre_attention_layer_norm(layer_input)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
x = logit_mask * x
encoder_bias = self.get_bias(layer_input=x)
# The shape should be maintained for the residual connection.
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = self.attention(
x, x, encoder_mask, encoder_bias, enable_dropout=enable_dropout)
x = layer_input + self.post_attention_dropout(
x, deterministic=not enable_dropout)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# MLP block.
y = self.pre_mlp_layer_norm(x)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
y = logit_mask * y
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = self.mlp(y, enable_dropout=enable_dropout)
y = x + self.post_mlp_dropout(y, deterministic=not enable_dropout)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'activations', y)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return y, None
else:
return y
class DecoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""Transformer encoder-decoder layer.
Attributes:
self_attention: An instance of a self-attention module.
encoder_decoder_attention: Encoder-decoder attention module. This must be
non-None if attending to encoded representations.
mlp: The MLP module, applied after both attention modules.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relative_position_bias_factory: A callable that returns relative position
bias instances. This should only be used for per-layer relative position
biases; please use `shared_relative_position_bias` if they are shared
among layers.
shared_relative_position_bias: An instance of a shared relative position
bias module, usually owned by the Decoder.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the decoder layer.
parallel: whether to call attention and mlp in parallel
sow_intermediates: whether to track intermediates using Module.sow.
"""
self_attention: nn.Module
encoder_decoder_attention: Optional[nn.Module]
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relative_position_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relative_position_bias: Optional[nn.Module] = None
activation_partitioning_dims: int = 1
parallel: bool = False
sow_intermediates: bool = False
scanned: bool = False
def setup(self):
if (self.relative_position_bias_factory is not None and
self.shared_relative_position_bias is not None):
raise ValueError(
'Please set at most one of relative_position_bias_factory and'
' shared_relative_position_bias. (They can both be None however, e.g.'
' for absolute position embeds.)'
)
self.relpos_bias = (
self.relative_position_bias_factory()
if self.relative_position_bias_factory is not None else
self.shared_relative_position_bias)
if self.parallel:
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
else:
self.pre_self_attention_layer_norm = self.layer_norm_factory()
self.post_self_attention_dropout = self.dropout_factory()
self.pre_cross_attention_layer_norm = self.layer_norm_factory()
self.post_cross_attention_dropout = self.dropout_factory()
self.pre_mlp_layer_norm = self.layer_norm_factory()
self.post_mlp_dropout = self.dropout_factory()
def get_bias(self, max_decode_length: Optional[int], decode: bool,
layer_input: Array,
encoded: Array) -> Tuple[Optional[Array], Optional[Array]]:
if not self.relpos_bias:
return None, None
if isinstance(
self.relpos_bias, relative_position_biases.RelativeAttentionAPI
):
if max_decode_length:
relpos_length = max_decode_length
else:
relpos_length = layer_input.shape[-2]
# during decoding, the layer will be called with decode=True first to
# initialize the decoder cache, including a cached relpos bias cache.
# the prefill codepath will call this once again with decode=False,
# which is slightly wasteful but generally harmless. During subsequent
# decode steps, this will be called with decode=True and will reuse the
# cached bias. this significantly improves performance during decoding
# with many decode steps.
decoder_bias = self.relpos_bias(
relpos_length, relpos_length, False, decode=decode
)
encoder_decoder_bias = None
elif isinstance(
self.relpos_bias, rich_attention_position_scores.RichAttentionApi
):
decoder_bias = self.relpos_bias(
layer_input,
layer_input,
bidirectional=False,
is_cross_attention=False,
)
encoder_decoder_bias = self.relpos_bias(
layer_input, encoded, bidirectional=False, is_cross_attention=True
)
else:
raise TypeError(
f'{type(self.relpos_bias)} is not a supported relative position '
f'bias factory.\nInstance value: {self.relpos_bias}'
)
return decoder_bias, encoder_decoder_bias
def _create_residuals_and_queries(self, layer_input: Array, x: Array,
logit_mask: Array,
**kwargs) -> Tuple[Array, Array, Array]:
"""Slice layer inputs to get versions to use as queries."""
# This is a no-op unless overridden by a subclass.
return layer_input, x, logit_mask
def __call__(self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
**kwargs):
"""Applies a single T5 decoder layer.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: Input data from encoder with shape [batch_size,
encoder_seq_length, decoder_hidden_size]. If None, block is Decoder
only.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask with shape [
batch_size, 1, decoder_seq_length, encoder_seq_length].
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
**kwargs: Remaining keyword arguments. Passed to
_create_residuals_and_queries.
Returns:
output after transformer encoder-decoder block.
"""
layer_input = targets
del targets
# Decoder block.
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if prefill and prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(
decoder_mask[:, 0, 0, :], axis=-1).astype(jnp.int32)
if self.parallel:
x = self.layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Normally a no-op unless overridden by a subclass.
layer_input_residual, x_queries, logit_mask_queries = (
self._create_residuals_and_queries(layer_input, x, logit_mask,
**kwargs))
# Shared relative position embedding attention biases.
decoder_bias, encoder_decoder_bias = self.get_bias(
max_decode_length, decode, layer_input=x, encoded=encoded)
y = (
self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths) + self.mlp(
x,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout))
if encoded is not None:
y += self.encoder_decoder_attention(
x,
encoded,
encoder_decoder_mask,
encoder_decoder_bias,
enable_dropout=enable_dropout)
y *= (3 if encoded is not None else 2)**-0.5
z = layer_input_residual + self.dropout(
y, deterministic=not enable_dropout)
else:
# layer_input is derived from decoder_input_tokens.
x = self.pre_self_attention_layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Normally a no-op unless overridden by a subclass.
layer_input_residual, x_queries, logit_mask_queries = (
self._create_residuals_and_queries(layer_input, x, logit_mask,
**kwargs))
if logit_mask is not None:
# When using QKV fusion, x and x_queries must be the exact same
# Python object, so reuse the object if possible.
if x is x_queries and logit_mask is logit_mask_queries:
x = logit_mask * x
x_queries = x
else:
x = logit_mask * x
x_queries = logit_mask_queries * x_queries
# Shared relative position embedding attention biases.
decoder_bias, encoder_decoder_bias = self.get_bias(
max_decode_length, decode, layer_input=x, encoded=encoded)
# The first and second arguments to the attention are the same,
# i.e., this is a self-attention layer.
x = self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = layer_input_residual + self.post_self_attention_dropout(
x, deterministic=not enable_dropout)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Encoder-Decoder block.
if encoded is None:
# If encoder outputs not provided, skip attending from decoder to
# encoder. This results in a decoder only block.
y = x
else:
if self.encoder_decoder_attention is None:
raise ValueError('Expected encoder_decoder_attention to be populated '
'when called with `encoded` inputs.')
y = self.pre_cross_attention_layer_norm(
x, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
y = logit_mask_queries * y
y = self.encoder_decoder_attention(
y,
encoded,
encoder_decoder_mask,
encoder_decoder_bias,
enable_dropout=enable_dropout)
y = x + self.post_cross_attention_dropout(
y, deterministic=not enable_dropout)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# MLP block.
z = self.pre_mlp_layer_norm(
y, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
z = logit_mask_queries * z
z = self.mlp(
z,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout)
z = y + self.post_mlp_dropout(z, deterministic=not enable_dropout)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return z, None
else:
return z
class Encoder(nn.Module, param_remapping.ParameterRemappable):
"""A stack of encoder layers.
Attributes:
layer_factory: A callable that returns an EncoderLayer.
input_dropout_factory: A callable that returns the dropout to apply to the
input.
output_dropout_factory: A callable that returns the dropout to apply to the
output. Perhaps for legacy rather than essential reasons, the broadcasting
pattern is sometimes different from input_dropout_factory().
layer_norm_factory: A callable that returns a layer norm.
num_layers: Number of layers to generate.
dtype: DType to cast the embedded inputs.
layer_remat: whether and how to apply jax.remat to each layer to perform
recomputation in the backward pass. See documentation for LayerRemat enum.
spmd_annotations: spmd annotations needed for scanned layers.
shared_relative_position_bias_factory: A callable that returns a relative
position bias instance which will be shared for all encoder layers. Only
set this if using shared relative position biases.
token_embedder_factory: A callable that returns a token embedder. Please
provide either this or `shared_token_embedder`.
shared_token_embedder: A callable that returns a token embedder shared
between both encoder and decoder.
position_embedder_factory: A callable that returns an absolute position
embedder. Only provide this if you want absolute position embeddings.
scan_axis: axis over which to do scan over layers.
sow_intermediates: whether to track intermediates using Module.sow.
capture_gradients: whether to track input gradients using a variable in the
`grads` collection. This captures the gradient of the (combined) embedded
inputs, i.e. the input to the first encoder layer.
"""
layer_factory: MakeEncoderLayerFn
input_dropout_factory: Callable[[], nn.Module]
output_dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
dtype: DType = jnp.float32
layer_remat: _LayerRematOrStr = LayerRemat.LEGACY
scan_layers: bool = False
spmd_annotations: Any = None
shared_relative_position_bias_factory: Optional[Callable[[],
nn.Module]] = None
scan_axis: int = 1
# Embedders: Either a token_embedder_factory factory or shared token embedder
# must be provided. The position embedder is optional and provided when
# absolute position embeddings are desired.
token_embedder_factory: Optional[Callable[[],
embedding.Embedder[Array]]] = None
shared_token_embedder: Optional[embedding.Embedder[Array]] = None
position_embedder_factory: Optional[Callable[
[], embedding.Embedder[Array]]] = None
sow_intermediates: bool = False
capture_gradients: bool = False
def setup(self):
# Set up the embedders.
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.input_dropout_factory()
if self.scan_layers and self.shared_relative_position_bias_factory:
raise ValueError("Scanned layer mode doesn't support shared relative "
'position biases.')
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
lyrf = maybe_remat(
lyrf, self.layer_remat, self.scan_layers, static_argnums=(3,))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
self.encoder = common.TransparentLayerSequence(self.layers)
else:
self.encoder = self._construct_scanned_encoder(lyrf, self.num_layers)
self.encoder_norm = self.layer_norm_factory()
self.output_dropout = self.output_dropout_factory()
def _construct_scanned_encoder(self, lyrf: Callable[[], nn.Module],
num_layers: int) -> nn.Module:
"""Constructs encoder from layer factory using scan."""
initializing = self.is_mutable_collection('params')
# We scan the parameters along axis scan_axis (default=1)
# as an XLA layout optimization.
params_spec = self.scan_axis if initializing else transforms.ScanIn(
self.scan_axis)
cache_spec = 0
intermediates_spec = 2 # Stacks intermediate layer outputs in dimension 2.
scan_annotation = (
self.spmd_annotations['encoder']
if self.spmd_annotations is not None else None)
lyrf = transforms.factory_scan(
lyrf,
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': intermediates_spec,
},
split_rngs={
'params': True,
'dropout': True
},
length=num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation,
self.scan_axis),
axes_collections=('params', 'cache'),
)
return lyrf()
def embed_and_combine_inputs(self,
inputs,
inputs_positions=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True):
"""Returns the combined embedded inputs for further encoding."""
assert inputs.ndim == 2 # (batch, len)
embedder_inputs = {'token_ids': inputs}
if 'position_ids' in self.embedder.embedders:
if inputs_positions is None:
seq_length = inputs.shape[-1]
inputs_positions = jnp.arange(seq_length)[None, :]
embedder_inputs['position_ids'] = inputs_positions
# TODO: Pass `deterministic=not enable_dropout`?
embedded_inputs = self.embedder(segment_ids=segment_ids, **embedder_inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
# TODO: Revert this cast or move to embedder.
embedded_inputs = embedded_inputs.astype(self.dtype)
return embedded_inputs
def encode_from_continuous_inputs(self,
inputs,
encoder_mask=None,
logit_mask=None,
*,
enable_dropout: bool = True):
"""Applies all the layers starting from the continuous (embedded) inputs."""
# Apply all encoder layers. Because of residual connection, the width of the
# network is kept at `cfg.emb_dim` throughout.
encoder_outputs = self.encoder(
inputs,
encoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
if self.scan_layers:
encoder_outputs = encoder_outputs[0]
# Post-process the outputs of the final encoder layer.
# TODO: We could do this in the common encoder.
encoder_outputs = self.encoder_norm(encoder_outputs)
encoder_outputs = self.output_dropout(
encoder_outputs, deterministic=not enable_dropout)
if logit_mask is not None:
encoder_outputs = logit_mask * encoder_outputs
return encoder_outputs
def __call__(self,
inputs,
inputs_positions=None,
encoder_mask=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_positions: input subsequence positions for packed examples.
encoder_mask: encoder self-attention mask.
segment_ids: Input segmentation info for packed examples.
enable_dropout: Enables dropout if set to True.
Returns:
output of a transformer encoder.
"""
if self.sow_intermediates:
self.sow('intermediates', 'input_tokens_ids', inputs)
embedded_inputs = self.embed_and_combine_inputs(
inputs,
inputs_positions=inputs_positions,
segment_ids=segment_ids,
enable_dropout=enable_dropout)
logit_mask = jnp.expand_dims(
jnp.array((inputs > 0), dtype=embedded_inputs.dtype), axis=-1)
encoder_outputs = self.encode_from_continuous_inputs(
embedded_inputs,
encoder_mask=encoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
if self.sow_intermediates:
self.sow('intermediates', 'final_encoder_outputs', encoder_outputs)
return encoder_outputs
class Decoder(nn.Module, param_remapping.ParameterRemappable):
"""A stack of decoder layers.
This module can be used with or without the encoder stack. To use without an
encoder, pass in encoded=None. This will bypass the encoder-decoder attention.
Attributes:
layer_factory: A callable that returns a DecoderLayer.
dropout_factory: A callable that returns the dropout to apply to the input
and before the final logits.
layer_norm_factory: A callable that returns a layer norm.
output_logits_factory: A callable that returns the output logits. If not
provided, then the token embedders are used.
num_layers: Number of layers to generate.
dtype: DType to cast the embedded inputs.
layer_remat: whether and how to apply jax.remat to each layer to perform
recomputation in the backward pass. See documentation for LayerRemat enum.
scan_layers: whether to scan over layers.
spmd_annotations: spmd annotations needed for scanned layers.
shared_relative_position_bias_factory: A callable that returns a relative
position bias instance which will be shared for all encoder layers. Only
set this if using shared relative position biases.
token_embedder_factory: A callable that returns a token embedder. Please
provide either this or `shared_token_embedder`.
shared_token_embedder: A callable that returns a token embedder shared
between both encoder and decoder.
position_embedder_factory: A callable that returns an absolute position
embedder. Only provide this if you want absolute position embeddings.
sow_intermediates: whether to track intermediates using Module.sow.
scan_axis: axis over which to do scan over layers.
capture_gradients: whether to track input gradients using a variable in the
`grads` collection. This captures the gradient of the (combined) embedded
inputs, i.e. the input to the first encoder layer.
"""
layer_factory: MakeDecoderLayerFn
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
dtype: DType = jnp.float32
layer_remat: _LayerRematOrStr = LayerRemat.LEGACY
scan_layers: bool = False
spmd_annotations: Any = None
shared_relative_position_bias_factory: Optional[Callable[[],
nn.Module]] = None
output_logits_factory: Optional[Callable[[], nn.Module]] = None
# Embedders: Either a token_embedder_factory factory or shared token embedder
# must be provided. The position embedder is optional and provided when
# absolute position embeddings are desired.
token_embedder_factory: Optional[Callable[[],
embedding.Embedder[Array]]] = None
shared_token_embedder: Optional[embedding.Embedder[Array]] = None
position_embedder_factory: Optional[Callable[
[], embedding.Embedder[Array]]] = None
sow_intermediates: bool = False
scan_axis: int = 1
capture_gradients: bool = False
def setup(self):
# Set up the embedders.
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.dropout_factory()
if self.scan_layers and self.shared_relative_position_bias_factory:
raise ValueError("Scanned layer mode doesn't support shared relative"
'position biases.')
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
self.decoder = self._setup_layer_sequence()
self.decoder_norm = self.layer_norm_factory()
self.output_dropout = self.dropout_factory()
self.setup_output_logits()
def _setup_layer_sequence(self):
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
lyrf = maybe_remat(
lyrf,
self.layer_remat,
self.scan_layers,
static_argnums=(5, 6, 7, 8, 9))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
return common.TransparentLayerSequence(self.layers)
else:
return self._construct_scanned_decoder(lyrf, self.num_layers)
def _construct_scanned_decoder(
self,
lyrf: Callable[[], nn.Module],
num_layers: int,
num_broadcast_args: int = 10) -> Callable[..., Array]:
"""Constructs decoder from layer factory using scan."""
initializing = self.is_mutable_collection('params')
# We scan the parameters along scan_axis (default =1) as
# an XLA layout optimization.
params_spec = self.scan_axis if initializing else transforms.ScanIn(
self.scan_axis)
cache_spec = 0
intermediates_spec = 2 # Stacks intermediate layer outputs in dimension 2.
scan_annotation = (
self.spmd_annotations['decoder']
if self.spmd_annotations is not None else None)
lyrf = transforms.factory_scan(
lyrf,
in_axes=(nn.broadcast,) * num_broadcast_args,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': intermediates_spec,
},
split_rngs={
'params': True,
'dropout': True
},
length=num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation,
self.scan_axis),
axis_name='layers',
axes_collections=('params', 'cache'),
)
return lyrf()
@nn.nowrap
def setup_output_logits(self):
"""Sets up output logits; this method provides flexiblity for subclasses."""
# TODO: Re-merge with setup() once it's easier to Gin-configure
# shared modules, and directly pass submodules (instead of using factories).
if self.output_logits_factory:
# TODO: Consider renaming to "output_logits".
self.output_logits_factory: Callable[[], nn.Module]
self.logits_dense = self.output_logits_factory()
else:
self.logits_dense = None
def embed_and_combine_inputs(
self,
decoder_input_tokens,
decoder_positions=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
):
"""Returns the combined embedded decoder inputs for further processing."""
assert decoder_input_tokens.ndim == 2 # (batch, len)
embedder_inputs = {'token_ids': decoder_input_tokens}
if 'position_ids' in self.embedder.embedders:
if decoder_positions is None:
seq_length = decoder_input_tokens.shape[-1]
decoder_positions = jnp.arange(seq_length)[None, :]
embedder_inputs['position_ids'] = decoder_positions
# TODO: Pass `deterministic=not enable_dropout`?
embedded_inputs = self.embedder(
segment_ids=segment_ids, decode=decode, **embedder_inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
# TODO: Revert this cast or move to embedder.
embedded_inputs = embedded_inputs.astype(self.dtype)
return embedded_inputs
def decode_from_continuous_inputs(
self,
embedded_inputs,
encoder_outputs,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
logit_mask=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
):
"""Applies the decoder on the continuous (embedded) inputs."""
# If encoded is not given, this block is decoder only and does not contain
# attention from decoder to encoder.
if encoder_outputs is not None:
assert encoder_outputs.ndim == 3 # (batch, len, depth)
# Apply the decoder layers, attending to the encoder outputs (if provided),
# and attending to previous decoder inputs (by masking future inputs).
decoder_outputs = self.decoder(
embedded_inputs,
encoder_outputs,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.scan_layers:
decoder_outputs = decoder_outputs[0]
# Post-process final decoder layer outputs.
# TODO: We could do this in the common decoder.
decoder_outputs = self.decoder_norm(decoder_outputs)
decoder_outputs = self.output_dropout(
decoder_outputs, deterministic=not enable_dropout)
if logit_mask is not None:
decoder_outputs = logit_mask * decoder_outputs
if self.sow_intermediates:
self.sow('intermediates', 'pre_logits_layer', decoder_outputs)
# Decoded Logits
if self.logits_dense is not None:
logits = self.logits_dense(decoder_outputs)
else:
# Use the transpose of embedding matrix for logit transform.
#
# TODO: Module subclass API if we want to keep using this.
logits = self.embedder.embedders['token_ids'].attend(decoder_outputs) # pytype: disable=attribute-error
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(decoder_outputs.shape[-1])
if self.sow_intermediates:
self.sow('intermediates', 'logits', logits)
return logits
def __call__(self,
encoder_outputs,
decoder_input_tokens,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
**kwargs):
"""Applies Transformer model on the inputs.
TODO: For consistency it would be better to flip the order of the
first two positional arguments here.
Args:
encoder_outputs: The outputs from the encoder. If None, do not attend to
encoder outputs, resulting in a decoder only model (i.e. language
model).
decoder_input_tokens: The decoder input token IDs.
decoder_positions: Decoder subsequence positions for packed examples.
decoder_mask: Decoder self-attention mask.
encoder_decoder_mask: The attention mask for the encoder outputs.
segment_ids: Input segmentation info for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
**kwargs: Optional keyword arguments to pass to
decode_from_continuous_inputs.
Returns:
The decoder output logits for next token prediction.
"""
embedded_inputs = self.embed_and_combine_inputs(
decoder_input_tokens,
decoder_positions=decoder_positions,
segment_ids=segment_ids,
enable_dropout=enable_dropout,
decode=decode,
)
logit_mask = dense_attention.get_decoder_logit_mask(decoder_input_tokens,
embedded_inputs.dtype)
logits = self.decode_from_continuous_inputs(
embedded_inputs,
encoder_outputs,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
**kwargs)
return logits
class EncoderDecoder(nn.Module, param_remapping.ParameterRemappable):
"""Transformer Model for sequence to sequence translation.
Attributes:
encoder_factory: A callable that returns the lower-level Encoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `encoder_factory`.
decoder_factory: A callable that returns the lower-level Decoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `decoder_factory`.
dtype: DType for encoder/decoder to cast embedded inputs, and for attention
mask generation.
scan_layers: whether to scan over layers.
shared_token_embedder_factory: A callable that returns an embedder that can
be shared between the encoder and decoder.
"""
# Core components: encoder and decoder embedders and layers.
encoder_factory: MakeEncoderFn
decoder_factory: MakeDecoderFn
# Configures behavior when the model is called. Many of these might eventually
# be better as call parameters.
dtype: DType = jnp.float32
scan_layers: bool = False # only used to pass this option to predict_fn.
spmd_annotations: Any = None # only used for scanned spmd layers
shared_token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
def setup(self):
self.token_embedder = (
self.shared_token_embedder_factory()
if self.shared_token_embedder_factory else None)
# TODO: Clean up SPMD annotation code.
if self.spmd_annotations is None:
encoder_annotations = None
decoder_annotations = None
else:
encoder_annotations = self.spmd_annotations['encoder']
decoder_annotations = self.spmd_annotations['decoder']
encoder_factory_params = tuple(
inspect.signature(self.encoder_factory).parameters.keys())
if 'spmd_annotations' in encoder_factory_params:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=encoder_annotations)
else:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder)
decoder_factory_params = tuple(
inspect.signature(self.decoder_factory).parameters.keys())
if 'spmd_annotations' in decoder_factory_params:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=decoder_annotations)
else:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder)
def _make_padding_attention_mask(self, query_tokens: Array,
key_tokens: Array) -> Array:
return dense_attention.make_attention_mask(
query_tokens > 0, key_tokens > 0, dtype=self.dtype)
def encode(self,
encoder_input_tokens,
encoder_segment_ids=None,
encoder_positions=None,
*,
enable_dropout: bool = True):
"""Applies Transformer encoder-branch on the inputs.
Args:
encoder_input_tokens: input data to the encoder.
encoder_segment_ids: encoder input segmentation info for packed examples.
encoder_positions: encoder input subsequence positions for packed
examples.
enable_dropout: Enables dropout if set to True.
Returns:
encoded feature array from the transformer encoder.
"""
# Make padding attention mask.
encoder_mask = self._make_padding_attention_mask(encoder_input_tokens,
encoder_input_tokens)
# Add segmentation block-diagonal attention mask if using segmented data.
if encoder_segment_ids is not None:
encoder_mask = dense_attention.combine_masks(
encoder_mask,
dense_attention.make_attention_mask(
encoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=self.dtype))
return self.encoder( # pytype: disable=attribute-error
encoder_input_tokens,
inputs_positions=encoder_positions,
encoder_mask=encoder_mask,
segment_ids=encoder_segment_ids,
enable_dropout=enable_dropout)
def decode(
self,
encoded,
encoder_input_tokens, # only needed for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False,
# Args below were ported from decoder only code.
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
encoder_input_tokens: input to the encoder (only needed for masking).
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
Returns:
logits array from transformer decoder.
"""
# Make padding attention masks.
if decode:
# Do not mask decoder attention based on targets padding at
# decoding/inference time.
decoder_mask = None
encoder_decoder_mask = self._make_padding_attention_mask(
jnp.ones_like(decoder_target_tokens), encoder_input_tokens)
else:
decoder_mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=self.dtype,
decoder_segment_ids=decoder_segment_ids)
encoder_decoder_mask = self._make_padding_attention_mask(
decoder_target_tokens, encoder_input_tokens)
# Add segmentation block-diagonal attention masks if using segmented data.
if encoder_segment_ids is not None:
if decode:
raise ValueError(
'During decoding, packing should not be used but '
'`encoder_segment_ids` was passed to `Transformer.decode`.')
encoder_decoder_mask = dense_attention.combine_masks(
encoder_decoder_mask,
dense_attention.make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=self.dtype))
# When computing the logits, we don't need decoder_target_tokens, which is
# needed for computing the loss.
return self.decoder(
encoded,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
segment_ids=decoder_segment_ids,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths)
@property
def encoder_embedder(self) -> embedding.MultiEmbed:
return self.encoder.embedder
@property
def decoder_embedder(self) -> embedding.MultiEmbed:
return self.decoder.embedder
def __call__(self,
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None):
"""Applies Transformer model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is a shifted version of the former. For a packed dataset, it usually
has additional processing applied. For example, the first element of each
sequence has id 0 instead of the shifted EOS id from the previous sequence.
Args:
encoder_input_tokens: input data to the encoder.
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
encoder_positions: encoder subsequence positions for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
Returns:
logits array from full transformer.
"""
encoded = self.encode(
encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_positions,
enable_dropout=enable_dropout)
return self.decode(
encoded,
encoder_input_tokens, # Only used for masks.
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=encoder_segment_ids,
decoder_segment_ids=decoder_segment_ids,
decoder_positions=decoder_positions,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length)
class DecoderOnly(nn.Module, param_remapping.ParameterRemappable):
"""Decoder-only model.
This model sets up the relevant masking and uses Decoder to do the heavy
lifting.
Attributes:
decoder_factory: Factory which will make the lower-level Decoder object. In
the DecoderOnly usage, it will always be called with
`shared_token_embedder` as None.
dtype: DType for encoder/decoder to cast embedded inputs, and for attention
mask generation.
"""
# Core sub-component.
decoder_factory: MakeDecoderFn
# Only used to pass this option to predict_fn.
scan_layers: bool = False
# Configures behavior when the model is called. Many of these might eventually
# be better as call parameters.
dtype: DType = jnp.float32
def setup(self):
self.decoder = self.decoder_factory(shared_token_embedder=None)
def __call__(
self,
decoder_input_tokens: Array,
decoder_target_tokens: Optional[Array],
decoder_segment_ids: Optional[Array] = None,
decoder_positions: Optional[Array] = None,
decoder_causal_attention: Optional[Array] = None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
**kwargs,
):
"""Applies LanguageModel on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is typically a shifted version of the former. For a packed dataset, it
usually has additional processing applied. For example, the first element of
each sequence has id 0 instead of the shifted EOS id from the previous
sequence.
Args:
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
decoder_causal_attention: a binary mask indicating the "inputs" portion of
the concatenated sequence for a prefix LM.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
**kwargs: Additional keyword arguments to pass on to the decoder. This may
include `decoder_attention_mask`, which overrides the decoder attention
mask. If specified, this must be broadcastable to `[batch, head,
target_length, target_length]`. Meanwhile, `decoder_target_tokens` will
be ignored and `decoder_causal_attention` should not be set.
Returns:
logits array from LanguageModel.
"""
if decode and prefill:
raise ValueError('Only one of `decode` and `prefill` can be set. Use '
'`prefill` to pre-populate the cache for Prefix LMs '
'before using `decode`')
if decode:
decoder_mask = None
else:
if 'decoder_attention_mask' in kwargs:
decoder_attention_mask = kwargs.pop('decoder_attention_mask')
if decoder_causal_attention is not None:
raise ValueError(
'Only one of `decoder_causal_attention` and '
'`decoder_attention_mask` can be set.'
)
decoder_mask = jnp.asarray(decoder_attention_mask, dtype=self.dtype)
else:
decoder_mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=self.dtype,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids,
)
# We reuse Decoder class, which can optionally takes in encoded and
# encoder_decoder_mask. These are used when Decoder is used in the context
# of encoder-decoder model. For LM, we don't have an encoder. So set these
# to None.
return self.decoder( # pytype: disable=attribute-error
encoder_outputs=None,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=None,
segment_ids=decoder_segment_ids,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
**kwargs)
| 63,075 | 38.496556 | 110 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/t5_common_layers.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of standard layers for the T5-1.0 and T5-1.1 model variants."""
import functools
from flax import linen as nn
from jax import numpy as jnp
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import dense
from flaxformer.components import embedding as embedding_layers
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
BIAS_INIT = nn.initializers.normal(stddev=1e-6)
MLP_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
def attention_layer(num_heads, head_dim, dropout_rate, dtype=jnp.bfloat16):
"""Create an dense_attention layer for T5-style architectures."""
return dense_attention.MultiHeadDotProductAttention( # pytype: disable=wrong-arg-types # jax-types
num_heads=num_heads,
head_dim=head_dim,
qkv_features=None,
kernel_init=nn.initializers.variance_scaling(1.0, 'fan_in', 'normal'),
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=dropout_rate,
dtype=dtype)
def mlp_block(mlp_dim, dropout_rate, activations, dtype=jnp.bfloat16):
"""Create an MLP layer for T5-style architectures."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=mlp_dim,
activations=activations,
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=dropout_rate,
final_dropout_rate=0,
dtype=dtype)
def relative_position_bias(num_heads, dtype=jnp.bfloat16):
"""Create a standard position bias layer for T5-style architectures."""
return relative_position_biases.RelativePositionBiases(
num_heads=num_heads,
num_buckets=32,
max_distance=128,
dtype=dtype,
embedding_init=nn.initializers.variance_scaling(1.0, 'fan_avg',
'uniform'))
def embedding(vocabulary_size, embedding_dim, dtype=jnp.bfloat16):
"""Create a standard embedding layer for T5-style architectures."""
return embedding_layers.Embed( # pytype: disable=wrong-arg-types # jax-types
num_embeddings=vocabulary_size,
features=embedding_dim,
cast_input_dtype=jnp.int32,
dtype=dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
one_hot=True,
name='token_embedder')
def dropout(rate):
"""Create a standard dropout layer for T5-style architectures."""
return nn.Dropout(rate=rate, broadcast_dims=(-2,))
def encoder_layer(num_heads,
head_dim,
mlp_dim,
dropout_rate,
activations,
shared_relative_position_bias=None,
dtype=jnp.bfloat16):
"""Create a standard encoder layer for T5-style architectures."""
dropout_factory = functools.partial(dropout, rate=dropout_rate)
return t5_architecture.EncoderLayer(
attention=attention_layer(
num_heads=num_heads,
head_dim=head_dim,
dropout_rate=dropout_rate,
dtype=dtype),
mlp=mlp_block(
mlp_dim=mlp_dim,
activations=activations,
dropout_rate=dropout_rate,
dtype=dtype),
dropout_factory=dropout_factory,
layer_norm_factory=functools.partial(layer_norm.T5LayerNorm, dtype=dtype),
shared_relative_position_bias=shared_relative_position_bias) # pytype: disable=wrong-keyword-args
def decoder_layer(num_heads,
head_dim,
mlp_dim,
dropout_rate,
activations,
shared_relative_position_bias=None,
dtype=jnp.bfloat16):
"""Create a standard decoder layer for T5-style architectures."""
dropout_factory = functools.partial(dropout, rate=dropout_rate)
return t5_architecture.DecoderLayer(
self_attention=attention_layer(
num_heads=num_heads,
head_dim=head_dim,
dropout_rate=dropout_rate,
dtype=dtype),
encoder_decoder_attention=attention_layer(
num_heads=num_heads,
head_dim=head_dim,
dropout_rate=dropout_rate,
dtype=dtype),
mlp=mlp_block(
mlp_dim=mlp_dim,
activations=activations,
dropout_rate=dropout_rate,
dtype=dtype),
dropout_factory=dropout_factory,
layer_norm_factory=functools.partial(layer_norm.T5LayerNorm, dtype=dtype),
shared_relative_position_bias=shared_relative_position_bias) # pytype: disable=wrong-keyword-args
def encoder(num_heads,
head_dim,
mlp_dim,
num_layers,
shared_token_embedder,
dropout_rate,
activations,
dtype=jnp.bfloat16):
"""Create a standard encoder for T5-style architectures."""
encoder_layer_factory = functools.partial(
encoder_layer,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
activations=activations,
dropout_rate=dropout_rate,
dtype=dtype)
dropout_factory = functools.partial(dropout, rate=dropout_rate)
relative_position_bias_factory = functools.partial(
relative_position_bias, num_heads=num_heads, dtype=dtype)
return t5_architecture.Encoder(
layer_factory=encoder_layer_factory,
input_dropout_factory=dropout_factory,
output_dropout_factory=dropout_factory,
layer_norm_factory=functools.partial(layer_norm.T5LayerNorm, dtype=dtype),
num_layers=num_layers,
shared_token_embedder=shared_token_embedder,
shared_relative_position_bias_factory=relative_position_bias_factory,
dtype=dtype) # pytype: disable=wrong-keyword-args
def decoder(num_heads,
head_dim,
mlp_dim,
num_layers,
shared_token_embedder,
dropout_rate,
activations,
output_logits_factory=None,
dtype=jnp.bfloat16):
"""Create a standard decoder for T5-style architectures."""
decoder_layer_factory = functools.partial(
decoder_layer,
num_heads=num_heads,
head_dim=head_dim,
mlp_dim=mlp_dim,
activations=activations,
dropout_rate=dropout_rate,
dtype=dtype)
relative_position_bias_factory = functools.partial(
relative_position_bias, num_heads=num_heads, dtype=dtype)
dropout_factory = functools.partial(dropout, rate=dropout_rate)
return t5_architecture.Decoder(
layer_factory=decoder_layer_factory,
dropout_factory=dropout_factory,
layer_norm_factory=functools.partial(layer_norm.T5LayerNorm, dtype=dtype),
num_layers=num_layers,
shared_token_embedder=shared_token_embedder,
shared_relative_position_bias_factory=relative_position_bias_factory,
output_logits_factory=output_logits_factory,
dtype=dtype) # pytype: disable=wrong-keyword-args
| 7,623 | 35.830918 | 104 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/parallel_fused_decoder.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parallel Transformer decoder layer with fused parameters."""
import functools
from typing import Callable, Optional, Tuple
from absl import logging
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import quant_config as aqt_config
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from jax import lax
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer.architectures.common import param_remapping
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import Initializer
# pylint: disable=not-callable
# pytype: disable=not-callable
class ParallelFusedDecoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""Parallel Transformer decoder layer with fused parameters.
Attributes:
self_attention: An instance of a self-attention module.
mlp: The MLP module, applied after both attention modules.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relative_position_bias_factory: A callable that returns relative position
bias instances. This should only be used for per-layer relative position
biases; please use `shared_relative_position_bias` if they are shared
among layers.
shared_relative_position_bias: An instance of a shared relative position
bias module, usually owned by the Decoder.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the decoder layer.
sow_intermediates: Whether to track intermediates using Module.sow.
is_quant_finetune_mode: Whether the layer is loaded for quantization
finetuning. It's only applied in the context of quantization.
"""
self_attention: nn.Module
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relative_position_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relative_position_bias: Optional[nn.Module] = None
activation_partitioning_dims: int = 1
sow_intermediates: bool = False
scanned: bool = False
use_aqt: bool = False
weight_params: Optional[aqt.QuantOps.WeightParams] = None
act_params: Optional[aqt.QuantOps.ActHParams] = None
possibly_use_quantized_vars: bool = False
is_quant_finetune_mode: bool = False
q_wi_fused_kernel_init: Optional[Initializer] = None
kv_fused_kernel_init: Optional[Initializer] = None
o_wo_fused_kernel_init: Optional[Initializer] = None
dense_general_cls_factory: Optional[Callable[[], type[nn.Module]]] = None
def setup(self):
if self.activation_partitioning_dims != 1:
logging.warning(
'ParallelFusedDecoderLayer.activation_partitioning_dims '
'is deprecated and will soon be removed.'
)
if (
self.relative_position_bias_factory is not None
and self.shared_relative_position_bias is not None
):
raise ValueError(
'Please set at most one of relative_position_bias_factory and'
' shared_relative_position_bias. (They can both be None however, e.g.'
' for absolute position embeds.)'
)
self.relpos_bias = (
self.relative_position_bias_factory()
if self.relative_position_bias_factory is not None
else self.shared_relative_position_bias
)
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
if not isinstance(
self.self_attention, dense_attention.MultiQueryDotProductAttention
):
raise TypeError(
'ParallelFusedDecoderLayer requires Multiquery attention.'
)
num_heads = self.self_attention.num_heads
if self.self_attention.head_dim is not None:
head_dim = self.self_attention.head_dim
else:
head_dim = self.self_attention.qkv_features // num_heads
if self.self_attention.out_features is None:
raise ValueError(
'ParallelFusedDecoderLayer requires self-attention'
'with manually specified out_features.'
)
embed_dim = self.self_attention.out_features
n_activations = len(self.mlp.activations)
mlp_intermediate_dim = self.mlp.intermediate_dim
if mlp_intermediate_dim % num_heads != 0:
raise ValueError('num_heads must divide mlp intermediate dimension')
fused_out_dims = (
num_heads,
(mlp_intermediate_dim // num_heads) * n_activations + head_dim,
)
if self.dense_general_cls_factory:
self.dense_general_cls = self.dense_general_cls_factory()
else:
self.dense_general_cls = dense.DenseGeneral
# TODO: move the AQT branching code complexity out to the
# configuration system here and other places in Flaxformer.
def make_dense(
axis,
features,
use_bias,
dtype,
kernel_init,
bias_init,
reshape_kernel,
kernel_axis_names,
name,
):
if self.use_aqt:
if self.weight_params is None and self.act_params is None:
raise ValueError(
'If use_aqt is True, either of weights or acts quantization need '
'to be specified using arguments `weight_params` or `act_params`.'
)
aqt_context = aqt_config.DynamicContext(
update_bounds=False, collect_acts_stats=False
)
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = (
self.weight_params.half_shift if self.weight_params else False
)
aqt_hparams = aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=self.act_params, # currently supports fixed bounds only.
quant_type=aqt.QuantType.AQT,
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
if kernel_axis_names == ('heads', 'o_wo_fused', 'embed'):
assert axis == (-2, -1)
kernel_axis_names = ('joined_o_wo_fused', 'embed')
aqt_dense = aqt_flax_layers.DenseAqt(
features=features,
hparams=aqt_hparams,
train=self.is_quant_finetune_mode,
dynamic_context=aqt_context,
paxis_name=None,
# No "cross-replica" reduction expressed in the XLA graph at this
# stage. Will be imposed later, automatically, by XLA SPMD.
use_bias=use_bias,
kernel_init=kernel_init,
bias_init=bias_init,
dtype=dtype,
name=name,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
kernel_axis_names=kernel_axis_names,
)
# we do not have reshape kernel option here but we explicitly
# reshape kernel.
return functools.partial(aqt_dense, padding_mask=None)
else:
return self.dense_general_cls(
axis=axis,
features=features,
use_bias=use_bias,
dtype=dtype,
kernel_init=kernel_init,
bias_init=bias_init,
reshape_kernel=reshape_kernel,
name=name,
kernel_axis_names=kernel_axis_names,
)
self.make_dense = make_dense
q_wi_fused_kernel_init = self.self_attention.kernel_init
if self.q_wi_fused_kernel_init is not None:
q_wi_fused_kernel_init = self.q_wi_fused_kernel_init
self.q_wi_fused_args = dict(
axis=-1,
features=fused_out_dims,
use_bias=self.self_attention.use_bias,
dtype=self.self_attention.dtype,
kernel_init=q_wi_fused_kernel_init,
bias_init=self.self_attention.bias_init,
reshape_kernel=False,
name='q_wi_fused',
kernel_axis_names=('embed', 'heads', 'q_wi_fused'),
)
kv_fused_kernel_init = self.self_attention.kernel_init
if self.kv_fused_kernel_init is not None:
kv_fused_kernel_init = self.kv_fused_kernel_init
self.kv_fused_args = dict(
axis=-1,
features=(1, 2 * head_dim),
use_bias=self.self_attention.use_bias,
dtype=self.self_attention.dtype,
kernel_init=kv_fused_kernel_init,
bias_init=self.self_attention.bias_init,
reshape_kernel=False,
name='kv_fused',
kernel_axis_names=('embed', 'multiquery_heads', 'kv_fused'),
)
o_wo_fused_kernel_init = self.self_attention.kernel_init
if self.o_wo_fused_kernel_init is not None:
o_wo_fused_kernel_init = self.o_wo_fused_kernel_init
self.o_wo_fused_args = dict(
axis=(-2, -1),
features=embed_dim,
use_bias=self.self_attention.use_bias,
dtype=self.self_attention.dtype,
kernel_init=o_wo_fused_kernel_init,
bias_init=self.self_attention.bias_init,
reshape_kernel=False,
name='o_wo_fused',
# o_wo_fused = mlp//heads + head_dim
kernel_axis_names=('heads', 'o_wo_fused', 'embed'),
)
def _create_residuals_and_queries(
self, layer_input: Array, x: Array, logit_mask: Array, **kwargs
) -> Tuple[Array, Array, Array]:
"""Slice layer inputs to get versions to use as queries."""
# This is a no-op unless overridden by a subclass.
return layer_input, x, logit_mask
@nn.compact
def __call__(
self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
**kwargs,
):
"""Applies ParallelFusedDecoder1DBlock module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: required to be None, block is Decoder only, only kept for
__call__ signature uniformity.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: required to be None, block is Decoder only, only
kept for __call__ signature uniformity.
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
**kwargs: Remaining keyword arguments. Passed to
_create_residuals_and_queries.
Returns:
output after transformer encoder-decoder block.
"""
assert encoded is None, 'only pure decoder layer supported.'
assert encoder_decoder_mask is None, 'only pure decoder layer supported.'
layer_input = targets
del targets
# Shared relative position embedding attention biases.
if self.relpos_bias:
if decode and max_decode_length:
decoder_bias = self.relpos_bias(
max_decode_length, max_decode_length, False
)
else:
decoder_bias = self.relpos_bias(
layer_input.shape[-2], layer_input.shape[-2], False
)
else:
decoder_bias = None
# Decoder block.
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'),
)
if prefill and prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(decoder_mask[:, 0, 0, :], axis=-1).astype(
jnp.int32
)
x = self.layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'),
)
num_heads = self.self_attention.num_heads
if self.self_attention.head_dim is not None:
head_dim = self.self_attention.head_dim
else:
head_dim = self.self_attention.qkv_features // num_heads
n_activations = len(self.mlp.activations)
mlp_intermediate_dim = self.mlp.intermediate_dim
# Normally a no-op unless overridden by a subclass.
layer_input_residual, x_queries, logit_mask_queries = (
self._create_residuals_and_queries(layer_input, x, logit_mask, **kwargs)
)
del logit_mask_queries
# Use local fused Q + W_i to calculate fused results.
# [batch, length, embed], [heads, mlp//heads * n_act + head_dim] ->
# [batch, length, heads, mlp//heads * n_act + head_dim]
q_wi = self.make_dense(**self.q_wi_fused_args)(x_queries)
# Slice out query.
query = lax.dynamic_slice_in_dim(q_wi, 0, head_dim, -1)
# Slice out MLP inputs.
int_size = mlp_intermediate_dim // num_heads
# wi[i]: [batch, length, heads, mlp//heads]
wi = [
lax.dynamic_slice_in_dim(q_wi, head_dim + i * int_size, int_size, -1)
for i in range(n_activations)
]
# Use local fused K + V to calculate fused results.
kv = self.make_dense(**self.kv_fused_args)(x)
kv = activation_partitioning.with_sharding(kv, 1)
# Slice out key.
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, head_dim, -1), -2)
# Slice out value.
value = jnp.squeeze(
lax.dynamic_slice_in_dim(kv, head_dim, head_dim, -1), -2
)
precomputed_qkv = (query, key, value)
# y_att: [batch, length, heads, head_dim]
y_att = self.self_attention(
x_queries,
x,
mask=decoder_mask,
bias=decoder_bias,
precomputed_qkv=precomputed_qkv,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
)
# y_mlp: [batch, length, heads, mlp//heads]
y_mlp = self.mlp(
wi,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout,
)
# y_fused: [batch, length, heads, mlp//heads + head_dim]
y_fused = jnp.concatenate([y_att, y_mlp], axis=-1)
if self.use_aqt and self.weight_params is not None:
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = (
self.weight_params.half_shift if self.weight_params else False
)
aqt_hparams = aqt_flax_layers.DenseGeneralAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=None, # currently supports fixed bounds only.
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
y_out = aqt_flax_layers.DenseGeneralAqt(
**self.o_wo_fused_args,
hparams=aqt_hparams,
train=self.is_quant_finetune_mode,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
)(y_fused)
else:
y_out = self.dense_general_cls(**self.o_wo_fused_args)(y_fused)
# y *= 2**-0.5
z = layer_input_residual + self.dropout(
y_out, deterministic=not enable_dropout
)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'),
)
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return z, None
else:
return z
| 16,959 | 37.371041 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/t5/parallel_fused_decoder_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5_architecture."""
from typing import Any
from absl.testing import absltest
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer import testing_utils
from flaxformer.architectures.t5 import parallel_fused_decoder
from flaxformer.architectures.t5 import t5_architecture_test_utils as t5_test_utils
from flaxformer.components import dense
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
expected_files = testing_utils.ExpectedJsonFiles(
'flaxformer/architectures/t5/testdata'
)
check_params = expected_files.check_params_shapes_only
get_params = expected_files.get_params
class ParallelFusedDecoderOnlyTest(absltest.TestCase):
def test_decoder_shapes_fused_parallel(self):
"""Tests if the decoder parameter have the expected shapes."""
decoder = t5_test_utils.make_parallel_fused_transformer_config()
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32,
)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_fused_parallel.json')
self.assertEqual(output.shape, (2, 3, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
output = output.astype(np.float32)
output2 = output2.astype(np.float32)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_decoder_separate_init(self):
"""Tests if the decoder init can be controlled independently."""
dtype = jnp.bfloat16
num_attn_heads = 8
num_features = 13
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
bias_init = nn.initializers.normal(stddev=1.0)
def _make_mq_attention(num_attn_heads, dtype):
"""First test configuration for attention."""
return dense_attention.MultiQueryDotProductAttention(
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
out_features=num_features,
head_dim=None,
kernel_init=nn.initializers.variance_scaling(1.0, 'fan_in', 'normal'),
bias_init=bias_init,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
rescale_logits=True,
)
def _make_fusion_mlp(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
out_dim=13,
precomputed_intermediates=True,
fuse_kernels=False,
activations=('swish', 'linear'),
kernel_init=nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal'
),
bias_init=bias_init,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype,
)
def _make_relative_position_bias(
num_attn_heads: int, dtype: Any
) -> relative_position_biases.RelativePositionBiases:
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=128,
num_heads=num_attn_heads,
dtype=dtype,
embedding_init=nn.initializers.variance_scaling(
1.0, 'fan_avg', 'uniform'
),
)
decoder_layer = parallel_fused_decoder.ParallelFusedDecoderLayer(
self_attention=_make_mq_attention(num_attn_heads, dtype),
mlp=_make_fusion_mlp(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
use_aqt=False,
weight_params=None,
possibly_use_quantized_vars=False,
is_quant_finetune_mode=False,
q_wi_fused_kernel_init=nn.initializers.constant(0),
kv_fused_kernel_init=nn.initializers.constant(1),
o_wo_fused_kernel_init=nn.initializers.constant(2),
)
batch = 2
seq_len = 4
hidden_dim = 13
inputs = np.ones((batch, seq_len, hidden_dim), dtype=np.float32)
variables = decoder_layer.init(
random.PRNGKey(0),
targets=inputs,
encoded=None,
enable_dropout=False,
)
q_wi_fused = variables['params']['q_wi_fused']['kernel']
kv_fused = variables['params']['kv_fused']['kernel']
o_wo_fused = variables['params']['o_wo_fused']['kernel']
np.testing.assert_allclose(
q_wi_fused, np.zeros(q_wi_fused.shape), rtol=1e-8
)
np.testing.assert_allclose(kv_fused, np.ones(kv_fused.shape), rtol=1e-8)
np.testing.assert_allclose(
o_wo_fused, np.ones(o_wo_fused.shape) * 2, rtol=1e-8
)
def test_quantized_decoder_shapes_fused_parallel(self):
"""Tests if the decoder parameter have the expected shapes."""
weight_params = aqt.QuantOps.WeightParams(
prec=8, half_shift=False, axis=None
)
decoder = t5_test_utils.make_parallel_fused_transformer_config(
use_aqt=True, weight_params=weight_params
)
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32,
)
output, variables = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
params = variables['params']
reformatted = decoder.apply({}, params, method=decoder.to_save_format)
check_params(reformatted, 'decoder_shapes_fused_parallel_quantized.json')
self.assertEqual(output.shape, (2, 3, 4))
# Convert back to Flax module structure format and test again.
params2 = decoder.apply({}, reformatted, method=decoder.from_save_format)
output2 = decoder.apply(
{'params': params2},
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
output = output.astype(np.float32)
output2 = output2.astype(np.float32)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_materialized_decoder_shapes_fused_parallel(self):
"""Tests if the decoder parameter have the expected shapes."""
weight_params = aqt.QuantOps.WeightParams(
prec=8, half_shift=False, axis=None
)
decoder = t5_test_utils.make_parallel_fused_transformer_config(
use_aqt=True,
weight_params=weight_params,
possibly_use_quantized_vars=True,
is_quant_finetune_mode=False,
)
inputs = np.array(
[
# Batch 1.
[183, 20, 75],
# Batch 2.
[392, 19, 7],
],
dtype=np.int32,
)
_, params = decoder.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=inputs,
decoder_target_tokens=inputs, # used for mask generation
enable_dropout=False,
)
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(
params['params'], params['params_axes']
),
get_params('decoder_params_axes_fused_parallel_quantized.json'),
)
def test_dense_general_factory_parallel_fused(self):
dtype = jnp.bfloat16
num_attn_heads = 8
num_features = 13
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
bias_init = nn.initializers.normal(stddev=1.0)
def _make_mq_attention(num_attn_heads, dtype):
"""First test configuration for attention."""
return dense_attention.MultiQueryDotProductAttention(
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
out_features=num_features,
head_dim=None,
kernel_init=nn.initializers.variance_scaling(1.0, 'fan_in', 'normal'),
bias_init=bias_init,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
rescale_logits=True,
)
def _make_fusion_mlp(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
out_dim=13,
precomputed_intermediates=True,
fuse_kernels=False,
activations=('swish', 'linear'),
kernel_init=nn.initializers.variance_scaling(
1.0, 'fan_in', 'truncated_normal'
),
bias_init=bias_init,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype,
)
def _make_relative_position_bias(
num_attn_heads: int, dtype: Any
) -> relative_position_biases.RelativePositionBiases:
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=128,
num_heads=num_attn_heads,
dtype=dtype,
embedding_init=nn.initializers.variance_scaling(
1.0, 'fan_avg', 'uniform'
),
)
variables_arr = []
for dense_general_cls_factory in [None, lambda: dense.DenseGeneral]:
decoder_layer = parallel_fused_decoder.ParallelFusedDecoderLayer(
self_attention=_make_mq_attention(num_attn_heads, dtype),
mlp=_make_fusion_mlp(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)
),
use_aqt=False,
weight_params=None,
possibly_use_quantized_vars=False,
is_quant_finetune_mode=False,
q_wi_fused_kernel_init=nn.initializers.constant(0),
kv_fused_kernel_init=nn.initializers.constant(1),
o_wo_fused_kernel_init=nn.initializers.constant(2),
dense_general_cls_factory=dense_general_cls_factory,
)
batch = 2
seq_len = 4
hidden_dim = 13
inputs = np.ones((batch, seq_len, hidden_dim), dtype=np.float32)
variables = decoder_layer.init(
random.PRNGKey(0),
targets=inputs,
encoded=None,
enable_dropout=False,
)
variables_arr.append(variables)
np.testing.assert_allclose(
variables_arr[0]['params']['q_wi_fused']['kernel'],
variables_arr[1]['params']['q_wi_fused']['kernel'],
)
np.testing.assert_allclose(
variables_arr[0]['params']['kv_fused']['kernel'],
variables_arr[1]['params']['kv_fused']['kernel'],
)
np.testing.assert_allclose(
variables_arr[0]['params']['o_wo_fused']['kernel'],
variables_arr[1]['params']['o_wo_fused']['kernel'],
)
if __name__ == '__main__':
absltest.main()
| 12,093 | 33.067606 | 83 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/tensor_utils.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LongT5 utilities for transformering tensors (JAX arrays)."""
from typing import Optional
import jax
import jax.numpy as jnp
import numpy as np
from flaxformer.types import Array
def pad_to_multiple(array: Array,
factor: int,
axis: int,
mode: Optional[str] = 'constant',
constant_values=0) -> Array:
"""Pads `array` on a given `axis` to be a multiple of `factor`.
Padding will be concatenated to the end of the axis only, not the beginning.
If the length along `axis` is already a multiple of `factor`, this is
effectively a no-op.
Args:
array: Array with rank >= 1 to pad.
factor: Positive integer factor to pad for.
axis: A valid axis in `array` to pad.
mode: The padding mode to use according to `jnp.pad`. Defaults to
'constant'. See `jax.numpy.pad` documentation for more.
constant_values: For 'constant' mode, the pad value to use within `jnp.pad`.
Defaults to 0.
Returns:
The padded Array result.
"""
array = jnp.asarray(array)
if factor < 1:
raise ValueError(f'`factor` must be positive but got {factor}.')
rank = array.ndim
if axis < -rank or axis >= rank:
raise ValueError(
f'`axis` ({axis}) out of bounds for `array` rank ({rank}).')
axis_len = array.shape[axis]
pad_len = -axis_len % factor
pad_width = [(0, 0)] * rank
pad_width[axis] = (0, pad_len)
kwargs = {}
if mode == 'constant':
kwargs['constant_values'] = constant_values
return jnp.pad(array=array, pad_width=pad_width, mode=mode, **kwargs)
def split_into_blocks(array: Array,
block_len: int,
axis: int,
pad_value=0) -> Array:
"""Splits an array into blocks along the given `axis`.
If the axis length isn't a multiple of `block_len`, it'll be padded via
`pad_to_multiple` first.
Args:
array: Array of shape [..., axis_len, ...].
block_len: Positive integer length of each block.
axis: A valid axis in `array` to split along.
pad_value: The scalar pad value to use. Defaults to 0. Must be of the same
type as `array`.
Returns:
Array of shape [..., num_blocks, block_len, ...], where
num_blocks = ceiling(axis_len / block_len).
"""
array = jnp.asarray(array)
if block_len < 1:
raise ValueError(f'`block_len` must be positive but got {block_len}.')
rank = array.ndim
if axis < -rank or axis >= rank:
raise ValueError(
f'`axis` ({axis}) out of bounds for `array` rank ({rank}).')
if axis < 0:
axis += rank
padded_array = pad_to_multiple(
array, factor=block_len, axis=axis, constant_values=pad_value)
padded_len = padded_array.shape[axis]
num_blocks = padded_len // block_len
output_shape = (
array.shape[:axis] + (num_blocks, block_len) + array.shape[(axis + 1):])
return padded_array.reshape(output_shape)
def concat_3_blocks(blocked_seq: Array,
block_axis: int,
seq_axis: int,
pad_value=0) -> Array:
"""Concatenates 3 consecutive blocks for each input block for local attention.
This is meant to be called on a blocked sequence as returned by
`split_into_blocks` for example. This function augments each block with its
adjacent left and right blocks so that every token from the original block
can access all other tokens `block_len` away from it. The first and last
input blocks will have `pad_value`-padded blocks to their left and right,
respectively.
Args:
blocked_seq: [..., num_blocks, block_len, ...] shaped Array.
block_axis: integer axis of the `num_blocks` dimension.
seq_axis: integer axis of the `block_len` dimension.
pad_value: The scalar pad value to use for the first and last input blocks.
Defaults to 0.
Returns:
Array of shape [..., num_blocks, 3 * block_len, ...].
"""
blocked_seq = jnp.asarray(blocked_seq)
pad_width = [(0, 0)] * blocked_seq.ndim
pad_width[block_axis] = (1, 1)
# [..., num_blocks + 2, block_len, ...]
padded_blocked_seq = jnp.pad(
blocked_seq, pad_width, constant_values=pad_value)
num_blocks = blocked_seq.shape[block_axis]
blocks_list = []
for i in range(3):
# We use indexing approach here:
# https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
indices = [slice(0, None)] * blocked_seq.ndim
indices[block_axis] = slice(i, i + num_blocks)
indices = tuple(indices)
blocks_list.append(padded_blocked_seq[indices])
return jnp.concatenate(blocks_list, axis=seq_axis)
def concat_3_blocks_one_hot(blocked_seq: Array,
block_axis: int,
seq_axis: int,
pad_value=0) -> Array:
"""Concatenates 3 consecutive blocks for each input block for local attention.
This is an alternative implementation to `concat_3_blocks` that should
return the same output. It is slightly slower for typical LongT5
configurations but is substantially faster when training with `scan` due
to some current inefficiencies in the XLA:SPMD compilation.
Args:
blocked_seq: [..., num_blocks, block_len, ...] shaped Array.
block_axis: integer axis of the `num_blocks` dimension.
seq_axis: integer axis of the `block_len` dimension.
pad_value: The scalar pad value to use for the first and last input blocks.
Defaults to 0.
Returns:
Array of shape [..., num_blocks, 3 * block_len, ...].
"""
# TODO: This implementation follows a roll, then concat, then slice
# with one-hot `tensordot` strategy. It may be worth considering other
# alternative strategies like "slice with one-hot then concat" or
# "one-hot-like convolutions" which could turn out to be faster if we try
# them out and benchmark.
blocked_seq = jnp.asarray(blocked_seq)
num_blocks = blocked_seq.shape[block_axis]
pad_width = [(0, 0)] * blocked_seq.ndim
pad_width[block_axis] = (1, 1)
# [..., num_blocks + 2, block_len, ...]
padded_blocked_seq = jnp.pad(
blocked_seq, pad_width, constant_values=pad_value)
blocks_list = []
# Left block
blocks_list.append(padded_blocked_seq)
# Center block
blocks_list.append(jnp.roll(padded_blocked_seq, -1, axis=block_axis))
# Right block
blocks_list.append(jnp.roll(padded_blocked_seq, -2, axis=block_axis))
# [..., num_blocks + 2, 3 * block_len, ...]
result = jnp.concatenate(blocks_list, axis=seq_axis)
# Use one-hot `tensordot` to drop the last two blocks so that the final shape
# is [..., num_blocks, 3 * block_len, ...]. We avoid simple slicing here
# since it results in poor XLA:SPMD compilations when training a model with
# `scan`.
# [num_blocks, num_blocks + 2]
one_hot_matrix = jnp.eye(num_blocks, num_blocks + 2, dtype=result.dtype)
# [..., 3 * block_len, ..., num_blocks]
result = jnp.tensordot(result, one_hot_matrix, axes=([block_axis], [1]))
# [..., num_blocks, 3 * block_len, ...]
result = jnp.moveaxis(result, -1, block_axis)
return result
def make_3block_local_att_mask(block_len: int,
input_mask: Array,
segment_ids: Optional[Array] = None,
use_full_block_att: bool = False,
use_causal_mask: bool = False) -> Array:
"""Makes a 3-blocked local attention mask.
For example, let's say `block_len` is 2 and we have the following
`input_mask` representing a single example containing 3 tokens padded to
maximum `seq_len` 5:
[[1, 1, 1, 0, 0]].
With other arguments kept as defaults, the output is:
[[
[[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
]]
The output has `num_blocks = 3`, and each non-padding token is constrained
to attend locally (local_radius = block_len - 1 = 1). Padding tokens have
uniformly 0 mask values.
Args:
block_len: integer length of each block.
input_mask: [batch, seq_len] shaped boolean Array.
segment_ids: optional [batch, seq_len] shaped integer Array.
use_full_block_att: if True, attention is not explicitly masked to prevent
reaching beyond `block_len` (enforcing `local_radius`) and may reach any
other tokens in the 3 blocks. Default False.
use_causal_mask: if True, the attention is explicitly masked to prevent
attending to tokens with positive relative position to the current
query token. Default False.
Returns:
[batch, num_blocks, block_len, 3 * block_len] boolean Array with `True`
for valid attention pairs and `False` for masking attention.
"""
# [batch, num_blocks, block_len] shape.
input_mask_blocked = split_into_blocks(
input_mask, block_len, axis=-1, pad_value=False)
# [batch, num_blocks, 3 * block_len] shape.
input_mask_3blocked = concat_3_blocks(
input_mask_blocked, block_axis=-2, seq_axis=-1, pad_value=False)
# [batch, num_blocks, block_len, 3 * block_len] shape.
attention_mask = jnp.logical_and(input_mask_blocked[..., jnp.newaxis],
input_mask_3blocked[..., jnp.newaxis, :])
if not use_full_block_att:
# Enforce that tokens are not allowed to attend farther than `local_radius`.
# Note that `block_len = local_radius + 1`.
# [block_len, 3 * block_len] shape
relative_position = make_3block_relative_position(block_len)
locality_mask = jnp.abs(relative_position) < block_len
# [1, 1, block_len, 3 * block_len] shape
locality_mask = locality_mask[jnp.newaxis, jnp.newaxis, :, :]
attention_mask = jnp.logical_and(attention_mask, locality_mask)
if use_causal_mask:
# Enforce that tokens are not allowed to attend to tokens appearing 'later'
# in the sequence
# [block_len, 3 * block_len] shape
relative_position = make_3block_relative_position(block_len)
causal_mask = relative_position <= 0
# [1, 1, block_len, 3 * block_len] shape
causal_mask = causal_mask[jnp.newaxis, jnp.newaxis, :, :]
attention_mask = jnp.logical_and(attention_mask, causal_mask)
if segment_ids is None:
return attention_mask
padding_segment_id = -1
# [batch, num_blocks, block_len] shape.
segment_ids_blocked = split_into_blocks(
segment_ids, block_len, axis=-1, pad_value=padding_segment_id)
# [batch, num_blocks, 3 * block_len] shape.
segment_ids_3blocked = concat_3_blocks(
segment_ids_blocked,
block_axis=-2,
seq_axis=-1,
pad_value=padding_segment_id)
# [batch, num_blocks, block_len, 3 * block_len] shape.
segment_id_att_mask = jnp.equal(segment_ids_blocked[..., jnp.newaxis],
segment_ids_3blocked[..., jnp.newaxis, :])
return jnp.logical_and(attention_mask, segment_id_att_mask)
def make_3block_relative_position(block_len: int) -> np.ndarray:
"""Makes 3-blocked relative positions for local attention.
Args:
block_len: integer length of each block.
Returns:
[block_len, 3 * block_len] integer Array of relative positions.
Note: The sign convention we use is that the relative position is the position
of the key minus the position of the query; i.e. it is the query position
which receives a minus sign.
"""
pos_ids = np.arange(3 * block_len, dtype=np.int32)
center_pos_ids = pos_ids[block_len:-block_len]
return pos_ids[np.newaxis, :] - center_pos_ids[:, np.newaxis]
def make_custom_3block_relative_position(block_len: int,
positions: Array) -> Array:
"""Makes customized 3-blocked relative positions for local attention.
Unlike `make_3block_relative_position`, this function takes the
`positions` input to customize the relative attention pattern, which may
be different for each example.
Args:
block_len: integer length of each block.
positions: [batch, seq_len] shaped integer Array.
Returns:
[batch, num_blocks, block_len, 3 * block_len] integer Array of relative
positions.
Note: The sign convention we use is that the relative position is the position
of the key minus the position of the query; i.e. it is the query position
which receives a minus sign.
"""
positions = jnp.asarray(positions)
padding_position = -1
# [batch, num_blocks, block_len] shape.
positions_blocked = split_into_blocks(
positions, block_len, axis=-1, pad_value=padding_position)
# [batch, num_blocks, 3 * block_len] shape.
positions_3blocked = concat_3_blocks(
positions_blocked, block_axis=-2, seq_axis=-1, pad_value=padding_position)
# [batch, num_blocks, block_len, 3 * block_len] shape.
return (positions_3blocked[..., jnp.newaxis, :] -
positions_blocked[..., jnp.newaxis])
def constant_init(value, dtype=jnp.float32):
"""Returns an initializer that initializes all values to a constant."""
def init(unused_key, shape, dtype=dtype):
return jnp.ones(shape, dtype) * value
return init
def positions_from_segment_ids(segment_ids: Array) -> Array:
"""Computes packed positions from segment_ids.
See the following for an example of how packed inputs are represented
by `segment_ids` and `positions`:
https://github.com/google/seqio/blob/main/seqio/utils.py#L292
This functions derives the positions based on the segment_ids alone.
Args:
segment_ids: <int32>[batch, length] array of segmentation info for packed
examples.
Returns:
<int32>[batch, length] array of position info for packed examples.
"""
segment_ids = jnp.asarray(segment_ids)
# Indicate where new segments start, other than the first segment.
start_indicator = segment_ids - jnp.pad(
segment_ids[:, :-1], ((0, 0), (1, 0)), constant_values=1)
raw_range = jnp.arange(segment_ids.shape[-1])
reset_offset = jax.lax.cummax(start_indicator * raw_range, axis=1)
input_mask = segment_ids > 0
return (raw_range - reset_offset) * input_mask
| 14,661 | 34.936275 | 117 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/long_attention.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Long attention classes and mask/weighting functions."""
import abc
import functools
from typing import Any, Callable, Optional, Tuple, Union
from flax import linen as nn
from flax.linen import initializers
from flax.linen import partitioning
from flax.linen.linear import default_kernel_init
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
from flaxformer.architectures.longt5 import relative_position_biases_general
from flaxformer.architectures.longt5 import tensor_utils
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import layer_norm
from flaxformer.components.attention import dense_attention # GOOGLE-INTERNAL # pylint: disable=line-too-long
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
from flaxformer.types import PRNGKey
RelativePositionBiasesGeneral = (
relative_position_biases_general.RelativePositionBiasesGeneral)
def _softmax_with_extra_logit(
x: Array,
axis: Optional[Union[int, Tuple[int, ...]]] = -1,
) -> Array:
"""Softmax function with an additional virtual logit equal to zero.
For compatibility with some previously trained models.
This is equivalent to adding one to the denominator.
In the context of attention, it allows you to attend to nothing.
Args:
x: input to softmax
axis: the axis or axes along which the softmax should be computed. Either an
integer or a tuple of integers.
Returns:
A tensor with the same shape as x.
"""
m = jnp.maximum(lax.stop_gradient(x.max(axis, keepdims=True)), 0)
unnormalized = jnp.exp(x - m)
# After shift, extra logit is -m. Add exp(-m) to denominator
denom = unnormalized.sum(axis, keepdims=True) + jnp.exp(-m)
return unnormalized / denom
# ------------------------------------------------------------------------------
# Long attention layers.
# ------------------------------------------------------------------------------
class LongSelfAttention(abc.ABC):
"""API for long self-attention classes.
These should be nn.Module instances also.
"""
@abc.abstractmethod
def __call__(self,
inputs: Array,
inputs_mask: Array,
*,
segment_ids: Optional[Array] = None,
positions: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Calls the attention layer.
See the following for an example of how packed inputs are represented
by `segment_ids` and `positions`:
https://github.com/google/seqio/blob/main/seqio/utils.py#L292
Args:
inputs: <float>[batch, length, emb_dim] array of embeddings to self-attend
over.
inputs_mask: <bool>[batch, length] array indicating True for non-padding
tokens and False for padding.
segment_ids: Optional <int32>[batch, length] encoder input segmentation
info for packed examples.
positions: Optional <int32>[batch, length] encoder input subsequence
positions for packed examples.
enable_dropout: Enables dropout if set to True.
Returns:
<float>[batch, length, out_dim] result of self attention.
"""
raise NotImplementedError
class EncoderLocalSelfAttention(nn.Module, LongSelfAttention):
"""Local bidirectional sliding window self attention.
This implements self-attention analogous to `MultiHeadDotProductAttention`
but only applied to a local window of `local_radius` tokens to the left and to
the right of each token. Unlike a "blocked" approach, this is a "sliding
window" approach that can grow the receptive field with multiple stacks.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
local_radius: how many tokens to the left/right for each token to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it. TPU-friendly
values include 84, 127, 169, 255, with 127 being the LongT5 default.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
head_dim: dimension of each head. If unspecified, it defaults to
qkv_features // num_heads.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
output_projection: Project the output of `attention_fn` to `out_features`.
If False, returns the output of `attention_fn` without a projection.
split_head_kernel: whether to store QKVO variables with a split head
dimension.
kernels_to_fuse: Which kernels to fuse, if any.
concat_3_blocks_implementation: Optional string specifying an alternative
(but functionally equivalanet) local sparsity implementation. Leave as
`None` to use the default implementation. The only current alternative is
'onehot', which is more efficient when training with `scan`.
relpos_bias: `RelativePositionBiasesGeneral` module to use for relative
attention.
"""
num_heads: int
local_radius: int = 127 # TPU-friendly values include 84, 127, 169, 255
dtype: DType = jnp.float32
qkv_features: Optional[int] = None
head_dim: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.
precision: Any = None
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
bias_init: Initializer = initializers.zeros
use_bias: bool = True
rescale_logits: bool = False
use_extra_logit: bool = False
float32_logits: bool = False
output_projection: bool = True
split_head_kernel: bool = False
kernels_to_fuse: Optional[str] = None # Only 'qkv' is supported.
use_rotary_embedding: bool = False
rotary_embedding_max_timescale: float = 1e4
concat_3_blocks_implementation: Optional[str] = None
relpos_bias: Optional[RelativePositionBiasesGeneral] = None
@nn.compact
def __call__(self,
inputs: Array,
inputs_mask: Array,
*,
segment_ids: Optional[Array] = None,
positions: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Calls the attention layer (see `LongSelfAttention`)."""
validate_long_attention_call_parameter_shapes(inputs, inputs_mask,
segment_ids, positions)
block_len = self.local_radius + 1
# [batch, num_blocks, 1, block_len, 3 * block_len] shape.
mask = tensor_utils.make_3block_local_att_mask(
block_len, inputs_mask, segment_ids)[:, :, jnp.newaxis, :, :]
attention_bias = mask_to_bias(mask, self.dtype)
if self.relpos_bias:
# [block_len, 3 * block_len]
relative_position = tensor_utils.make_3block_relative_position(block_len)
rp_bucket = RelativePositionBiasesGeneral.relative_position_bucket(
relative_position,
bidirectional=True,
num_buckets=self.relpos_bias.num_buckets,
max_distance=self.relpos_bias.max_distance)
# [1, 1, num_heads, block_len, 3 * block_len]
bias = self.relpos_bias(rp_bucket)[jnp.newaxis, ...] # pylint: disable=not-callable
attention_bias += bias
features = self.out_features or inputs.shape[-1]
qkv_features = self.qkv_features or inputs.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
if self.kernels_to_fuse and not self.split_head_kernel:
raise ValueError('Un-reshaped kernels are required when using QKV fused '
'kernel optimization.')
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = self.kernel_init
else:
if self.kernels_to_fuse:
raise ValueError('Cannot fold in logit normalization to query '
'initializer when using fused kernels.')
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: self.kernel_init(*args) / depth_scaling
make_dense = functools.partial(
dense.DenseGeneral,
axis=-1,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
)
# Project inputs to multi-headed q/k/v
# dimensions are then [batch..., length, num_heads, features_per_head]
if self.kernels_to_fuse is None:
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs)
key = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='key')(
inputs)
value = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='value')(
inputs)
elif self.kernels_to_fuse == 'qkv':
qkv = make_dense(
kernel_init=self.kernel_init,
features=(3, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='qkv_fused')(
inputs)
query = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 0, 1, -3), -3)
key = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 1, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 2, 1, -3), -3)
else:
raise ValueError(
f'Unsupported kernel fusion mode: "{self.kernels_to_fuse}"')
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
if self.use_rotary_embedding:
length = inputs.shape[-2]
sin, cos = embedding.generate_fixed_pos_embedding(
head_dim, length, max_timescale=self.rotary_embedding_max_timescale
)
query, key = embedding.apply_rotary_embedding(
query, key, cos, sin, decode=False, rotary_index=None
)
# Apply attention.
x = _local_self_attention(
query,
key,
value,
local_radius=self.local_radius,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits,
concat_3_blocks_implementation=self.concat_3_blocks_implementation) # pytype: disable=wrong-keyword-args
if not self.output_projection:
return x
# Back to the original inputs dimensions.
out = dense.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
kernel_axis_names=['heads', 'kv', 'embed'],
name='out')( # pytype: disable=wrong-arg-types
x)
return out
def _local_self_attention(query: Array,
key: Array,
value: Array,
local_radius: int,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
concat_3_blocks_implementation: Optional[str] = None):
"""Sliding window local self attention.
This is analogous to `dot_product_attention` but only permits attention
between tokens that are within `local_radius` of each other. This reduces
length-dependent complexity from O(N^2) to O(NR), where N is the sequence
length and R is `local_radius`. Only self-attention is supported, not
cross attention.
The current implementation mirrors the original implementation used in ETC
(https://arxiv.org/abs/2004.08483).
Args:
query: queries for calculating attention with shape of `[batch..., length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., length,
num_heads, v_depth_per_head]`.
local_radius: How many tokens to the left/right for input tokens to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it. TPU-friendly
values include 84, 127, 169, and 255 since the internal `block_len` will
be `local_radius + 1`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_blocks, num_heads, block_len, 3 * block_len]`. This
can be used for incorporating causal masks, padding masks, proximity bias,
etc. Note that `bias` must be responsible for enforcing that tokens do
not attend beyond `local_radius` since the 3-block approach technically
permits attention to tokens up to `2 * local_radius + 1` away.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, enable_dropout or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
concat_3_blocks_implementation: Optional string specifying an alternative
implementation to use. Leave as `None` to use the default implementation.
The only current alternative is 'onehot', which is more efficient when
training with `scan`.
Returns:
Output of shape `[batch..., length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert query.shape[-3] == key.shape[-3] == value.shape[-3], (
'q, k, v lengths must match.')
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
concat_3_blocks = _get_concat_3_blocks_implementation(
concat_3_blocks_implementation)
# calculate attention matrix
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
if rescale_logits:
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# split into blocks
seq_len = query.shape[-3]
block_len = local_radius + 1
# [batch..., num_blocks, block_len, num_heads, *_depth_per_head] shape
query = tensor_utils.split_into_blocks(query, block_len, axis=-3)
key = tensor_utils.split_into_blocks(key, block_len, axis=-3)
value = tensor_utils.split_into_blocks(value, block_len, axis=-3)
# concatenate 3 blocks for keys and values
# [batch..., num_blocks, 3 * block_len, num_heads, *_depth_per_head] shape
key = concat_3_blocks(key, block_axis=-4, seq_axis=-3)
value = concat_3_blocks(value, block_axis=-4, seq_axis=-3)
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
# [batch..., num_blocks, num_heads, block_len, 3 * block_len] shape
attn_weights = jnp.einsum(
'...qhd,...khd->...hqk', query, key, precision=precision)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias.astype(attn_weights.dtype)
# normalize the attention weights
attn_weights = (_softmax_with_extra_logit if use_extra_logit else
jax.nn.softmax)(attn_weights).astype(dtype)
# apply attention dropout
if enable_dropout and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
dropout_shape[-2] = 1
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
keep = jnp.broadcast_to(keep, attn_weights.shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = (
keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# compute weighted sum over values for each query position
# [batch..., num_blocks, block_len, num_heads, v_depth_per_head] shape
y = jnp.einsum(
'...hqk,...khd->...qhd', attn_weights, value, precision=precision)
# undo blocking and return results
unblocked_output = y.reshape(y.shape[:-4] + (-1,) + y.shape[-2:])
return unblocked_output[..., :seq_len, :, :]
def _get_concat_3_blocks_implementation(name: Optional[str]):
if name is None:
return tensor_utils.concat_3_blocks
elif name == 'onehot':
return tensor_utils.concat_3_blocks_one_hot
else:
raise ValueError(f'Unknown concat_3_blocks implementation: {name}')
class EtcTransientGlobalSelfAttention(nn.Module, LongSelfAttention):
"""ETC-like self-attention with transient globals only.
This augments `EncoderLocalSelfAttention` with transiently constructed
global tokens as side inputs to attend to in addition to local self-attention.
The transient "global tokens" are computed as averages of the long input
tokens in a "fixed blocks" pattern. These block-average global tokens are
computed at each layer and thrown away after the attention operation,
allowing simpler drop-in replacement in the Transformer API since there
isn't a separate "global input" array carried along from layer to layer.
The configuration can be thought of as something like ETC without g2l and g2g
components, only l2l and l2g. (See https://arxiv.org/abs/2004.08483 for
more about ETC.)
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
tokens_per_block: positive integer number of tokens per transient global
token. Typical values are 16 or 32.
local_radius: how many tokens to the left/right for each token to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it. TPU-friendly
values include 84, 127, 169, 255, with 127 being the LongT5 default.
causal: bool. Whether to causally mask attention. Default false.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
head_dim: dimension of each head. If unspecified, it defaults to
qkv_features // num_heads.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
output_projection: Project the output of `attention_fn` to `out_features`.
If False, returns the output of `attention_fn` without a projection.
split_head_kernel: whether to store QKVO variables with a split head
dimension.
kernels_to_fuse: Which kernels to fuse, if any.
concat_3_blocks_implementation: Optional string specifying an alternative
(but functionally equivalanet) local sparsity implementation. Leave as
`None` to use the default implementation. The only current alternative is
'onehot', which is more efficient when training with `scan`.
relpos_bias: `RelativePositionBiasesGeneral` module to use for relative
attention between input tokens (local).
side_relpos_bias: `RelativePositionBiasesGeneral` module to use for relative
attention from input tokens to transient globals.
"""
num_heads: int
tokens_per_block: int # Typical values are 16 or 32.
local_radius: int = 127 # TPU-friendly values include 84, 127, 169, 255.
causal: bool = False
dtype: DType = jnp.float32
qkv_features: Optional[int] = None
head_dim: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.
precision: Any = None
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
bias_init: Initializer = initializers.zeros
use_bias: bool = True
rescale_logits: bool = False
use_extra_logit: bool = False
float32_logits: bool = False
output_projection: bool = True
split_head_kernel: bool = False
kernels_to_fuse: Optional[str] = None # Only 'kv' is supported.
concat_3_blocks_implementation: Optional[str] = None
relpos_bias: Optional[RelativePositionBiasesGeneral] = None
side_relpos_bias: Optional[RelativePositionBiasesGeneral] = None
@nn.compact
def __call__(self,
inputs: Array,
inputs_mask: Array,
*,
segment_ids: Optional[Array] = None,
positions: Optional[Array] = None,
enable_dropout: bool = True) -> Array:
"""Calls the attention layer (see `LongSelfAttention`)."""
validate_long_attention_call_parameter_shapes(inputs, inputs_mask,
segment_ids, positions)
block_len = self.local_radius + 1
# [batch, num_blocks, 1, block_len, 3 * block_len] shape.
mask = tensor_utils.make_3block_local_att_mask(
block_len, inputs_mask, segment_ids,
use_causal_mask=self.causal)[:, :, jnp.newaxis, :, :]
attention_bias = mask_to_bias(mask, self.dtype)
if self.relpos_bias:
# [block_len, 3 * block_len] shape.
relative_position = tensor_utils.make_3block_relative_position(block_len)
rp_bucket = RelativePositionBiasesGeneral.relative_position_bucket(
relative_position,
bidirectional=not self.causal,
num_buckets=self.relpos_bias.num_buckets,
max_distance=self.relpos_bias.max_distance)
# [1, 1, num_heads, block_len, 3 * block_len] shape.
bias = self.relpos_bias(rp_bucket)[jnp.newaxis, ...] # pylint: disable=not-callable
attention_bias += bias
# Create side attention bias.
block_ids, global_segment_ids = make_etc_fixed_block_ids(
self.tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=not self.causal)
global_seq_len = global_segment_ids.shape[-1]
if segment_ids is None:
segment_ids = jnp.asarray(inputs_mask, jnp.int32)
# [batch, seq_len, global_seq_len] shape.
side_mask = jnp.equal(segment_ids[..., jnp.newaxis],
global_segment_ids[..., jnp.newaxis, :])
# [batch, 1, seq_len, global_seq_len] shape.
side_mask = side_mask[..., jnp.newaxis, :, :]
attention_side_bias = mask_to_bias(side_mask, self.dtype)
global_positions = jnp.arange(global_seq_len)
if self.causal:
orphans = identify_orphan_tokens(self.tokens_per_block, inputs_mask,
positions)
# Below is a slight hack to ensure that orphan tokens can attend to the
# global tokens. By definition, orphan tokens can attend to all global
# tokens in their segment; so, we set their "effective" block_id to be
# global_seq_len, as this is greater than all global_positions and will
# thus always satisfy the causality condition.
effective_block_ids = block_ids * (1 - orphans) + global_seq_len * orphans
causal_side_mask = jnp.less(global_positions,
effective_block_ids[..., :, jnp.newaxis])
causal_side_mask = causal_side_mask[..., jnp.newaxis, :, :]
causal_side_bias = mask_to_bias(causal_side_mask, self.dtype)
attention_side_bias += causal_side_bias
if self.side_relpos_bias is None:
raise ValueError('`side_relpos_bias` must be given.')
side_relative_position = _make_side_relpos(
self.tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=not self.causal)
side_rp_bucket = RelativePositionBiasesGeneral.relative_position_bucket(
side_relative_position,
bidirectional=not self.causal,
num_buckets=self.side_relpos_bias.num_buckets,
max_distance=self.side_relpos_bias.max_distance)
# [1, num_heads, batch, seq_len, global_seq_len] shape.
side_bias = self.side_relpos_bias(side_rp_bucket) # pylint: disable=not-callable
# [batch, num_heads, seq_len, global_seq_len] shape.
side_bias = jnp.swapaxes(side_bias[0], -4, -3)
attention_side_bias += side_bias
features = self.out_features or inputs.shape[-1]
qkv_features = self.qkv_features or inputs.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
if self.kernels_to_fuse and not self.split_head_kernel:
raise ValueError('Un-reshaped kernels are required when using QKV fused '
'kernel optimization.')
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = self.kernel_init
else:
if self.kernels_to_fuse:
raise ValueError('Cannot fold in logit normalization to query '
'initializer when using fused kernels.')
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: self.kernel_init(*args) / depth_scaling
# Create global aggregates.
global_inputs = _create_global_aggregates(inputs, block_ids, global_seq_len)
global_inputs = layer_norm.T5LayerNorm(dtype=self.dtype)(global_inputs)
make_dense = functools.partial(
dense.DenseGeneral,
axis=-1,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
)
# Project inputs to multi-headed q/k/v
# dimensions are then [batch..., length, num_heads, features_per_head]
if self.kernels_to_fuse is None:
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs)
key_dense = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='key')
value_dense = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='value')
key = key_dense(inputs)
value = value_dense(inputs)
# Share global key/value projections with long input for now.
side_key = key_dense(global_inputs)
side_value = value_dense(global_inputs)
elif self.kernels_to_fuse == 'kv':
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs)
kv_dense = make_dense(
kernel_init=self.kernel_init,
features=(2, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='kv_fused')
kv = kv_dense(inputs)
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 1, 1, -3), -3)
# Share global key/value projections with long input for now.
side_kv = kv_dense(global_inputs)
side_key = jnp.squeeze(lax.dynamic_slice_in_dim(side_kv, 0, 1, -3), -3)
side_value = jnp.squeeze(lax.dynamic_slice_in_dim(side_kv, 1, 1, -3), -3)
else:
raise ValueError(
f'Unsupported kernel fusion mode: "{self.kernels_to_fuse}"')
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# Apply attention.
x = _local_plus_side_attention(
query,
key,
value,
side_key,
side_value,
local_radius=self.local_radius,
bias=attention_bias,
side_bias=attention_side_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits,
concat_3_blocks_implementation=self.concat_3_blocks_implementation) # pytype: disable=wrong-keyword-args
if not self.output_projection:
return x
# Back to the original inputs dimensions.
out = dense.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
kernel_axis_names=['heads', 'kv', 'embed'],
name='out')( # pytype: disable=wrong-arg-types
x)
return out
def make_etc_fixed_block_ids(
tokens_per_block: int,
inputs_mask: Array,
segment_ids: Optional[Array] = None,
positions: Optional[Array] = None,
adopt_orphan_tokens: bool = True) -> Tuple[Array, Array]:
"""Returns the "fixed block" global id corresponding to each long token.
The array arguments follow `LongSelfAttention`.
Args:
tokens_per_block: Integer number of input tokens assigned to each "block"
corresponding to a global token. Note that "blocks" in this sense have no
connection with the internal "blocks" used for implementing sliding window
local self-attention.
inputs_mask: <bool>[batch, seq_len] shaped Array.
segment_ids: Optional <int32>[batch, seq_len] shaped Array.
positions: Optional <int32>[batch, seq_len] shaped Array.
adopt_orphan_tokens: bool, determining the behavior when sequence lengths in
the input do not evenly divide by tokens_per_block. See 'Note on orphan
tokens' in the docstring of the helper function
_make_etc_fixed_block_ids_1d().
Returns:
(block_ids, global_segment_ids) Tuple:
block_ids: <int32>[batch, seq_len] shaped Array of global token ids for
each (long) input token. Long input tokens that aren't assigned any
global tokens will have id `-1` (which will be the case for any examples
that have fewer than `tokens_per_block` tokens).
global_segment_ids: <int32>[batch, global_seq_len] shaped Array of the
"segment id" (i.e. example id) each global token belongs to.
`global_seq_len` is inferred as `seq_len // tokens_per_block`.
"""
inputs_mask = jnp.asarray(inputs_mask)
segment_ids = (
inputs_mask.astype(jnp.int32)
if segment_ids is None else jnp.asarray(segment_ids))
positions = (
jnp.arange(inputs_mask.shape[-1]) *
inputs_mask if positions is None else jnp.asarray(positions))
vmapped_fn = jax.vmap(
_make_etc_fixed_block_ids_1d, in_axes=(None, 0, 0, 0, None), out_axes=0)
return vmapped_fn(tokens_per_block, inputs_mask, segment_ids, positions,
adopt_orphan_tokens)
def _make_etc_fixed_block_ids_1d(
tokens_per_block: int,
inputs_mask: Array,
segment_ids: Array,
positions: Array,
adopt_orphan_tokens: bool = True) -> Tuple[Array, Array]:
"""Helper for `make_etc_fixed_block_ids` applied to a single example.
See the following for an example of what packed inputs look like:
https://github.com/google/seqio/blob/main/seqio/utils.py#L292
Args:
tokens_per_block: Positive integer.
inputs_mask: <bool>[seq_len] shaped Array.
segment_ids: <int32>[seq_len] shaped Array.
positions: <int32>[seq_len] shaped Array.
adopt_orphan_tokens: bool, determining the behavior when sequence lengths in
the input do not evenly divide by tokens_per_block. See 'Note on orphan
tokens' below
Returns:
(block_ids, global_segment_ids) Tuple:
block_ids: <int32>[seq_len] shaped Array of global token ids for each
(long) input token. Long tokens that aren't assigned any global
tokens will have id `-1`.
global_segment_ids: <int32>[global_seq_len] shaped Array of the "segment
id" (i.e. example id) each global token belongs to. `global_seq_len`
is inferred as `seq_len // tokens_per_block`.
Note on orphan tokens:
If a sequence in the provided input has a length which does not evenly
divide by tokens_per_block, the final tokens which do not correspond
naturally to a block are known as orphan tokens. There are two ways this
function may assign a block number to these orphans, depending on the value
of the (bool) adopt_orphan_tokens argument.
If adopt_orphan_tokens == True,
the orphan tokens will be assigned to the same block as the final
non-orphan tokens. E.g., if tokens_per_block == 2, and the inputs_mask is
jnp.array([1,1,1,0]), the block_ids returned by this function will be
jnp.array([0,0,0,-1]), indicated that the orphan token in position 2 is
effectively a member of the same block as the non-orphan tokens in pos 0
and 1.
If adopt_orphan_tokens == False,
the orphan tokens will not be assigned to a global-token block. In this
case, if tokens_per_block == 2, and the inputs_mask is
jnp.array([1,1,1,0]), the block_ids returned by this function will be
jnp.array([0,0,-1,-1]), indicating that the orphan token in position 2 is
NOT a member of the same block as the non-orphan tokens in pos 0 and 1.
"""
assert 1 == inputs_mask.ndim == segment_ids.ndim == positions.ndim
assert inputs_mask.shape[0] == segment_ids.shape[0] == positions.shape[0]
seq_len = inputs_mask.shape[0]
num_globals = seq_len // tokens_per_block
position_mod = positions % tokens_per_block
start_marker = position_mod == 0
end_marker = position_mod == tokens_per_block - 1
candidate_blocks = jnp.cumsum(start_marker, axis=-1) * inputs_mask - 1
positions_start_end = positions * jnp.logical_or(start_marker, end_marker)
candidate_block_sums = jax.ops.segment_sum(
positions_start_end, candidate_blocks, num_segments=seq_len)
blocks_with_starts = candidate_block_sums % tokens_per_block != 0
global_start = jnp.logical_and(start_marker,
blocks_with_starts[candidate_blocks])
blocks_without_globals = candidate_block_sums == 0
token_without_global = blocks_without_globals[candidate_blocks]
token_without_global = jnp.logical_or(token_without_global,
jnp.logical_not(inputs_mask))
block_ids = jnp.cumsum(global_start) * jnp.logical_not(
token_without_global) - 1
global_segment_ids = jax.ops.segment_sum(
segment_ids * global_start, block_ids, num_segments=num_globals)
if not adopt_orphan_tokens:
orphan_tokens = _identify_orphan_tokens(tokens_per_block, inputs_mask,
positions)
not_orphan_tokens = 1 - orphan_tokens
orphan_indicator = -1
block_ids = not_orphan_tokens * block_ids + orphan_tokens * orphan_indicator
block_ids = block_ids.astype(jnp.int32)
return block_ids, global_segment_ids
def identify_orphan_tokens(tokens_per_block: int,
inputs_mask: Array,
positions: Optional[Array] = None) -> Array:
"""Returns an Array with 1s in places corresponding to "orphan" tokens.
The array arguments follow `LongSelfAttention`, with the exception of
segment_ids, which is not needed.
Args:
tokens_per_block: Integer number of input tokens assigned to each "block"
corresponding to a global token. Note that "blocks" in this sense have no
connection with the internal "blocks" used for implementing sliding window
local self-attention.
inputs_mask: <bool>[batch, seq_len] shaped Array.
positions: Optional <int32>[batch, seq_len] shaped Array.
Returns:
orphan_tokens: <int32>[batch, seq_len] shaped Array of orphan indicators for
each (long) input token. orphan_tokens has 1s in positions which
correspond to orphan tokens, and 0s in all other positions. See note
below.
Note on orphan tokens:
If a sequence in the provided input has a length which does not evenly
divide by tokens_per_block, the final tokens which do not correspond
naturally to a block are known as orphan tokens.
"""
inputs_mask = jnp.asarray(inputs_mask)
positions = (
jnp.arange(inputs_mask.shape[-1]) *
inputs_mask if positions is None else jnp.asarray(positions))
vmapped_fn = jax.vmap(
_identify_orphan_tokens, in_axes=(None, 0, 0), out_axes=0)
return vmapped_fn(tokens_per_block, inputs_mask, positions)
def _identify_orphan_tokens(tokens_per_block: int, inputs_mask: Array,
positions: Array) -> Array:
"""Helper for `identify_orphan_tokens` applied to a single example.
The array arguments follow `LongSelfAttention`.
Args:
tokens_per_block: Integer number of input tokens assigned to each "block"
corresponding to a global token. Note that "blocks" in this sense have no
connection with the internal "blocks" used for implementing sliding window
local self-attention.
inputs_mask: <bool>[seq_len] shaped Array.
positions: Optional <int32>[seq_len] shaped Array.
Returns:
orphan_tokens: <int32>[seq_len] shaped Array of orphan indicators for
each (long) input token. orphan_tokens has 1s in positions which
correspond to orphan tokens, and 0s in all other positions. See note
below.
Note on orphan tokens:
If a sequence in the provided input has a length which does not evenly
divide by tokens_per_block, the final tokens which do not correspond
naturally to a block are known as orphan tokens.
For example, if the input mask is [1,1,1,1,1,1,1,1,0,0], and the number of
tokens per global block is 3, the final two tokens in the sequence are
orphans, and the returned value of orphan_tokens is [0,0,0,0,0,0,1,1,0,0].
"""
position_mod = positions % tokens_per_block
end_marker = position_mod == tokens_per_block - 1
k = jnp.ones(tokens_per_block)
x = 1 - jnp.correlate(end_marker, k, mode='full')[tokens_per_block - 1:]
return jnp.logical_and(x.astype(jnp.int32), inputs_mask).astype(jnp.int32)
def _create_global_aggregates(inputs: Array, block_ids: Array,
global_seq_len: int) -> Array:
"""Computes global aggregates by summing embeddings in each block.
Args:
inputs: <float>[batch..., seq_len, hidden_size] array of token embeddings to
aggregate over.
block_ids: <int32>[batch..., seq_len] array indicating the block (i.e.
global token) ids for each token in `inputs`. Only ids in the range [0,
global_seq_len] will be aggregated.
global_seq_len: integer number of global tokens to return in the result.
Returns:
[batch..., global_seq_len, hidden_size] array of global aggregates taken
by summing.
"""
# [batch..., seq_len, global_seq_len] shape.
one_hot_block_ids = jax.nn.one_hot(block_ids, global_seq_len)
return jnp.einsum('...nd,...ng->...gd', inputs, one_hot_block_ids)
def _local_plus_side_attention(
query: Array,
key: Array,
value: Array,
side_key: Array,
side_value: Array,
local_radius: int,
bias: Optional[Array] = None,
side_bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
concat_3_blocks_implementation: Optional[str] = None):
"""Local self attention with side keys/values (e.g.
from global memory).
This is an extension to `_local_self_attention` that also attends to a side
input in addition to the local window.
Args:
query: queries for calculating attention with shape of `[batch..., length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., length,
num_heads, v_depth_per_head]`.
side_key: `[batch..., side_len, num_heads, qk_depth_per_head]` array of keys
for the side input.
side_value: `[batch..., side_len, num_heads, v_depth_per_head]` array of
values for the side input.
local_radius: How many tokens to the left/right for input tokens to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it. TPU-friendly
values include 84, 127, 169, and 255 since the internal `block_len` will
be `local_radius + 1`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_blocks, num_heads, block_len, 3 * block_len]`. This
can be used for incorporating causal masks, padding masks, proximity bias,
etc. Note that `bias` must be responsible for enforcing that tokens do
not attend beyond `local_radius` since the 3-block approach technically
permits attention to tokens up to `2 * local_radius + 1` away.
side_bias: `[batch..., num_heads, length, side_len]` shaped array for side
input attention bias.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, enable_dropout or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
concat_3_blocks_implementation: Optional string specifying an alternative
implementation to use. Leave as `None` to use the default implementation.
The only current alternative is 'onehot', which is more efficient when
training with `scan`.
Returns:
Output of shape `[batch..., length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], (
'q, k, v num_heads must match.')
assert query.shape[-3] == key.shape[-3] == value.shape[-3], (
'q, k, v lengths must match.')
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
assert side_key.ndim == side_value.ndim
assert query.shape[:-3] == side_key.shape[:-3] == side_value.shape[:-3], (
'side k, v batch dims must match q.')
assert query.shape[-2] == side_key.shape[-2] == side_value.shape[-2], (
'side k, v num_heads must match q.')
assert side_key.shape[-3] == side_value.shape[-3], (
'side k, v lengths must must match.')
assert query.shape[-1] == side_key.shape[-1], 'side k depth must match q.'
assert value.shape[-1] == side_value.shape[-1], 'side v depth must match v.'
assert (bias is None) == (side_bias is None), (
'bias and side_bias must be either both present or both None')
concat_3_blocks = _get_concat_3_blocks_implementation(
concat_3_blocks_implementation)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
if rescale_logits:
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# split into blocks
seq_len = query.shape[-3]
block_len = local_radius + 1
# [batch..., num_blocks, block_len, num_heads, *_depth_per_head] shape.
query = tensor_utils.split_into_blocks(query, block_len, axis=-3)
key = tensor_utils.split_into_blocks(key, block_len, axis=-3)
value = tensor_utils.split_into_blocks(value, block_len, axis=-3)
# concatenate 3 blocks for keys and values
# [batch..., num_blocks, 3 * block_len, num_heads, *_depth_per_head] shape.
key = concat_3_blocks(key, block_axis=-4, seq_axis=-3)
value = concat_3_blocks(value, block_axis=-4, seq_axis=-3)
# casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
side_key = side_key.astype(jnp.float32)
# tile side inputs across blocks
num_blocks = query.shape[-4]
reps = [1] * (side_key.ndim + 1)
reps[-4] = num_blocks
# [batch..., num_blocks, side_len, num_heads, *_depth_per_head] shape.
tiled_side_key = jnp.tile(side_key[..., jnp.newaxis, :, :, :], reps)
tiled_side_value = jnp.tile(side_value[..., jnp.newaxis, :, :, :], reps)
# [batch..., num_blocks, 3 * block_len + side_len, num_heads,
# *_depth_per_head] shape.
key = jnp.concatenate((key, tiled_side_key), axis=-3)
value = jnp.concatenate((value, tiled_side_value), axis=-3)
# [batch..., num_blocks, num_heads, block_len, 3 * block_len + side_len] shape
attn_weights = jnp.einsum(
'...qhd,...khd->...hqk', query, key, precision=precision)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
num_heads = query.shape[-2]
# [batch..., num_blocks, num_heads, block_len, 3 * block_len] shape.
bias = jnp.broadcast_to(
bias, attn_weights.shape[:-4] +
(num_blocks, num_heads, block_len, 3 * block_len))
# [batch..., num_heads, num_blocks, block_len, side_len] shape.
side_bias = tensor_utils.split_into_blocks(side_bias, block_len, axis=-2)
# [batch..., num_blocks, num_heads, block_len, side_len] shape.
side_bias = jnp.swapaxes(side_bias, -4, -3)
attn_weights += jnp.concatenate((bias, side_bias),
axis=-1).astype(attn_weights.dtype)
# normalize the attention weights
attn_weights = (_softmax_with_extra_logit if use_extra_logit else
jax.nn.softmax)(attn_weights).astype(dtype)
# apply attention dropout
if enable_dropout and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
dropout_shape[-2] = 1
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
keep = jnp.broadcast_to(keep, attn_weights.shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = (
keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# compute weighted sum over values for each query position
# [batch..., num_blocks, block_len, num_heads, v_depth_per_head] shape.
y = jnp.einsum(
'...hqk,...khd->...qhd', attn_weights, value, precision=precision)
# undo blocking and return results
unblocked_output = y.reshape(y.shape[:-4] + (-1,) + y.shape[-2:])
return unblocked_output[..., :seq_len, :, :]
def mask_to_bias(mask: Array, dtype: jnp.dtype) -> Array:
"""Converts a mask to a bias-like Array suitable for adding to other biases.
Arguments:
mask: <bool> array of arbitrary shape
dtype: jnp.dtype, desired dtype of the returned array
Returns:
bias: <bool> array of the same shape as the input, with 0 in place of truthy
values and -1e10 in place of falsy values of mask
"""
return lax.select(mask,
jnp.full(mask.shape, 0).astype(dtype),
jnp.full(mask.shape, -1e10).astype(dtype))
def _make_side_relpos(tokens_per_block: int,
inputs_mask: Array,
segment_ids: Optional[Array] = None,
positions: Optional[Array] = None,
adopt_orphan_tokens: bool = True) -> Array:
"""Makes the relative position tensor for local -> global attention.
Args:
tokens_per_block: Integer number of input tokens assigned to each "block"
corresponding to a global token. Note that "blocks" in this sense have no
connection with the internal "blocks" used for implementing sliding window
local self-attention.
inputs_mask: <bool>[batch, seq_len] shaped Array.
segment_ids: Optional <int32>[batch, seq_len] shaped Array.
positions: Optional <int32>[batch, seq_len] shaped Array.
adopt_orphan_tokens: bool, determining the behavior when sequence lengths in
the input do not evenly divide by tokens_per_block. See 'Note on orphan
tokens' in the docstring of the helper function
_make_etc_fixed_block_ids_1d().
Returns:
side_relative_position: <int32>[batch, seq_len, global_seq_len] shaped Array
of relative positions between the local tokens and
the corresponding global tokens in the segment.
"""
block_ids, global_segment_ids = make_etc_fixed_block_ids(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=True)
if not adopt_orphan_tokens:
orphan_locations = identify_orphan_tokens(tokens_per_block, inputs_mask,
positions)
block_ids = block_ids + orphan_locations
global_seq_len = global_segment_ids.shape[-1]
global_positions = jnp.arange(global_seq_len, dtype=jnp.int32)
side_relative_position = global_positions - block_ids[..., jnp.newaxis]
return side_relative_position.astype(jnp.int32)
def validate_long_attention_call_parameter_shapes(
inputs: Array,
inputs_mask: Array,
segment_ids: Optional[Array],
positions: Optional[Array],
*,
allow_positions_without_segment_ids: bool = False) -> None:
"""Validates the shapes of parameters to LongSelfAttention call methods.
Args:
inputs: <float>[batch, length, emb_dim] array of embeddings to self-attend
over.
inputs_mask: <bool>[batch, length] array indicating True for non-padding
tokens and False for padding.
segment_ids: Optional <int32>[batch, length] encoder input segmentation
info for packed examples.
positions: Optional <int32>[batch, length] encoder input subsequence
positions for packed examples.
allow_positions_without_segment_ids: If True, `segment_ids` can be None
while `positions` is given. This will be the case for example if
packing is off but tokens appear in a non-sequential order.
Raises:
ValueError if any arrays fail validation.
"""
if inputs.ndim < 3:
raise ValueError(f'Expected rank of inputs >= 3, was {inputs.ndim}')
if inputs_mask.ndim != inputs.ndim - 1:
raise ValueError(f'Mismatched ranks: expected '
f'inputs_mask.ndim ({inputs_mask.ndim}) to be one less '
f'than inputs.ndim ({inputs.ndim})')
if inputs.shape[:-2] != inputs_mask.shape[:-1]:
raise ValueError(f'Mismatched batch dims: expected '
f'inputs.shape[:-2] ({inputs.shape[:-2]}) == '
f'inputs_mask.shape[:-1] ({inputs_mask[:-1]})')
if inputs.shape[-2] != inputs_mask.shape[-1]:
raise ValueError(f'Mismatched length dim: expected '
f'inputs.shape[-2] ({inputs.shape[-2]}) == '
f'inputs_mask.shape[-1] ({inputs_mask[-1]})')
if allow_positions_without_segment_ids:
if positions is None and segment_ids is not None:
raise ValueError(
'`positions` must not be None when `segment_ids` is given')
elif (segment_ids is None) != (positions is None):
raise ValueError(
f'segment_ids and positions must either be both given or both None '
f'but got `segment_ids is None`: {segment_ids is None}, '
f'`positions is None`: {positions is None}')
if segment_ids is not None and segment_ids.shape != inputs_mask.shape:
raise ValueError(f'Mismatched shapes: expected '
f'segment_ids.shape ({segment_ids.shape}) to match '
f'inputs_mask.shape ({inputs_mask.shape})')
if positions is not None and positions.shape != inputs_mask.shape:
raise ValueError(f'Mismatched shapes: expected '
f'positions.shape ({positions.shape}) to match '
f'inputs_mask.shape ({inputs_mask.shape})')
| 56,450 | 42.059497 | 113 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/longt5_architecture_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5_architecture."""
import json
import pathlib
import re
from typing import Any, Optional
from absl.testing import absltest
from flax import linen as nn
from flax.core import frozen_dict
import jax
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.longt5 import long_attention
from flaxformer.architectures.longt5 import longt5_architecture
from flaxformer.architectures.longt5 import relative_position_biases_general
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components import layer_norm
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
def _dense_t5_testdata_dir() -> pathlib.Path:
return (pathlib.Path(absltest.get_default_test_srcdir()) /
'flaxformer/architectures/t5/testdata')
def check_dense_t5_params(actual_params: frozen_dict.FrozenDict[str, Any],
expected_filename: str) -> None:
actual = jax.tree_map(
lambda x: list(x.shape),
frozen_dict.unfreeze(param_remapping.filter_out_metadata(actual_params)))
expected = json.load(open(_dense_t5_testdata_dir() / expected_filename))
if actual != expected:
print(
re.sub(r'\[\n\s+(\d+,\s+)*\d+\s+\]',
lambda m: ''.join(m.group(0).split()).replace(',', ', '),
json.dumps(actual, indent=2)))
raise AssertionError(
f'Didn\'t match JSON params in {expected_filename}. See actual '
'values above.')
EMBEDDING_INIT = nn.initializers.normal(stddev=1.0)
RELPOS_BIAS_INIT = nn.initializers.variance_scaling(1.0, 'fan_avg', 'uniform')
ATTENTION_KERNEL_INIT = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal')
MLP_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
FINAL_KERNEL_INIT = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
BIAS_INIT = nn.initializers.normal(stddev=1e-6)
class DegenerateLongSelfAttention(dense_attention.MultiHeadDotProductAttention,
long_attention.LongSelfAttention):
"""A degenerate implementation of `LongSelfAttention` for testing.
This just performs full self-attention after creating full attention mask
and bias arrays. We inherit from `MultiHeadDotProductAttention` to preserve
the same parameter naming structure, allowing us to use the same
json testdata as the standard ("dense") T5 architecture.
"""
relpos_bias: Optional[nn.Module] = None
@nn.compact
def __call__(
self,
inputs: Array,
inputs_mask: Array,
*,
segment_ids: Optional[Array] = None,
positions: Optional[Array] = None,
enable_dropout: bool = True,
) -> Array:
# Make padding attention mask.
encoder_mask = dense_attention.make_attention_mask(
inputs_mask, inputs_mask, dtype=self.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if segment_ids is not None:
encoder_mask = dense_attention.combine_masks(
encoder_mask,
dense_attention.make_attention_mask(
segment_ids, segment_ids, jnp.equal, dtype=self.dtype))
# Shared relative position embedding attention biases.
if self.relpos_bias:
encoder_bias = self.relpos_bias(inputs.shape[-2], inputs.shape[-2], True) # pylint: disable=not-callable
else:
encoder_bias = None
return super().__call__(
inputs,
inputs,
encoder_mask,
encoder_bias,
enable_dropout=enable_dropout)
def make_token_emb1(vocab_size, dtype):
"""First test configuration for token embeddings."""
return embedding.Embed(
num_embeddings=vocab_size,
features=13,
cast_input_dtype=jnp.int32,
dtype=dtype,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=EMBEDDING_INIT,
name='token_embedder')
def make_long_att_fn1(num_attn_heads, dtype):
"""First test configuration for long encoder self-attention."""
def fn(relpos_bias):
return DegenerateLongSelfAttention(
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
relpos_bias=relpos_bias)
return fn
def make_attention1(num_attn_heads, dtype):
"""First test configuration for attention in decoder."""
return dense_attention.MultiHeadDotProductAttention(
num_heads=num_attn_heads,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1)
def make_mlp1(dtype):
"""First test configuration for the MLP."""
return dense.MlpBlock(
use_bias=False,
intermediate_dim=2048,
activations=('relu',),
kernel_init=MLP_KERNEL_INIT,
bias_init=BIAS_INIT,
intermediate_dropout_rate=0.1,
final_dropout_rate=0.1,
dtype=dtype)
def _make_relative_position_bias(
num_attn_heads: int,
dtype: Any) -> relative_position_biases.RelativePositionBiases:
return relative_position_biases.RelativePositionBiases(
num_buckets=32,
max_distance=128,
num_heads=num_attn_heads,
dtype=dtype,
embedding_init=RELPOS_BIAS_INIT)
def _make_relpos_bias_general(
num_attn_heads: int,
dtype: Any) -> relative_position_biases.RelativePositionBiases:
return relative_position_biases_general.RelativePositionBiasesGeneral(
num_buckets=32,
max_distance=128,
num_heads=num_attn_heads,
dtype=dtype,
embedding_init=RELPOS_BIAS_INIT)
def make_config1(
scan_layers: bool = False,
layer_remat: str = 'legacy') -> longt5_architecture.LongEncoderDecoder:
"""Returns a LongEncoderDecoder."""
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relpos_bias):
assert shared_relpos_bias is None
return longt5_architecture.LongEncoderLayer(
attention_factory=make_long_att_fn1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relpos_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
scanned=scan_layers)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
scanned=scan_layers)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return longt5_architecture.LongEncoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
scan_layers=scan_layers,
layer_remat=layer_remat,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
scan_layers=scan_layers,
layer_remat=layer_remat,
)
return longt5_architecture.LongEncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
dtype=dtype,
scan_layers=scan_layers,
)
def make_config1_original_t5(
scan_layers: bool = False) -> t5_architecture.EncoderDecoder:
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
scanned=scan_layers)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
scanned=scan_layers)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
scan_layers=scan_layers,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
scan_layers=scan_layers,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
dtype=dtype,
scan_layers=scan_layers,
)
# TODO: DRY up with above configs.
def make_config2_shared_relative_position_bias(
) -> longt5_architecture.LongEncoderDecoder:
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relpos_bias):
assert shared_relpos_bias is not None
return longt5_architecture.LongEncoderLayer(
attention_factory=make_long_att_fn1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relpos_bias=shared_relpos_bias)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is not None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relative_position_bias=shared_relative_position_bias)
def _make_encoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return longt5_architecture.LongEncoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
shared_relpos_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
dtype=dtype,
)
def _make_decoder(*, shared_token_embedder=None):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
shared_relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
dtype=dtype,
)
return longt5_architecture.LongEncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
# TODO: DRY up with above configs.
def make_config3_shared_token_embedder(
) -> longt5_architecture.LongEncoderDecoder:
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relpos_bias):
assert shared_relpos_bias is None
return longt5_architecture.LongEncoderLayer(
attention_factory=make_long_att_fn1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relpos_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)))
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)))
def _make_encoder(*, shared_token_embedder=None):
return longt5_architecture.LongEncoder(
num_layers=3,
shared_token_embedder=shared_token_embedder,
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(*, shared_token_embedder=None):
return t5_architecture.Decoder(
num_layers=2,
shared_token_embedder=shared_token_embedder,
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return longt5_architecture.LongEncoderDecoder(
shared_token_embedder_factory=lambda: make_token_emb1(71, dtype),
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
class LongEncoderDecoderTest(absltest.TestCase):
def test_encoder_shapes_with_relative_attention_per_layer(self):
transformer = make_config1()
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer.encode,
)
reformatted = transformer.apply({},
variables['params'],
method=transformer.to_save_format)
check_dense_t5_params(reformatted,
'encoder_shapes_per_layer_relpos_bias.json')
self.assertEqual(output.shape, (2, 4, 13))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
inputs,
enable_dropout=False,
method=transformer.encode,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
# Compare with output from original T5 layers.
transformer_t5 = make_config1_original_t5()
output3, _ = transformer_t5.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer_t5.encode,
)
np.testing.assert_allclose(output, output3, rtol=1e-8)
def test_encoder_shapes_with_relative_attention_per_layer_scan(self):
transformer = make_config1(scan_layers=True)
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
output, _ = transformer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer.encode,
)
self.assertEqual(output.shape, (2, 4, 13))
# Compare with output from original T5 layers.
transformer_t5 = make_config1_original_t5(scan_layers=True)
output2, _ = transformer_t5.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer_t5.encode,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_encode_shared_relative_position_bias(self):
transformer = make_config2_shared_relative_position_bias()
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
method=transformer.encode,
)
reformatted = transformer.apply({},
variables['params'],
method=transformer.to_save_format)
check_dense_t5_params(reformatted, 'encoder_shapes_shared_relpos_bias.json')
self.assertEqual(output.shape, (2, 4, 13))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
inputs,
enable_dropout=False,
method=transformer.encode,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_encoder_example_packing(self):
transformer = make_config1()
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 0],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer.encode,
)
encoder_input_tokens_packed = np.array([[101, 183, 20, 75, 101, 392, 19]],
dtype=np.int32)
encoder_segment_ids = np.array([[0, 0, 0, 0, 1, 1, 1]], dtype=np.int32)
encoder_input_positions = np.array([[0, 1, 2, 3, 0, 1, 2]], dtype=np.int32)
output_packed = transformer.apply(
variables,
encoder_input_tokens_packed,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_input_positions,
enable_dropout=False,
method=transformer.encode,
)
# Check that the first element matches, which is entire first batch of the
# padded setup, and the first 3 "tokens" of the packed example.
np.testing.assert_allclose(
output[0, :, :], output_packed[0, 0:4, :], rtol=1e-4, atol=1e-4)
# Check that the second element matches, which is the first 3 "tokens" of
# the padded example's second batch, and the last 3 of tokens the packed
# example's first batch.
np.testing.assert_allclose(
output[1, 0:3, :], output_packed[0, 4:7, :], rtol=1e-4, atol=1e-4)
def test_scan_and_remat(self):
"""Tests if encoder returns the same output for different scan/remat."""
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
transformer1 = make_config1(scan_layers=False, layer_remat='none')
output1, _ = transformer1.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer1.encode,
)
transformer2 = make_config1(scan_layers=False, layer_remat='minimal')
output2, _ = transformer2.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer2.encode,
)
transformer3 = make_config1(scan_layers=False, layer_remat='full')
output3, _ = transformer3.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer3.encode,
)
transformer4 = make_config1(scan_layers=True, layer_remat='minimal')
output4, _ = transformer4.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer4.encode,
)
transformer5 = make_config1(scan_layers=True, layer_remat='full')
output5, _ = transformer5.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer5.encode,
)
# Check scan_layers=False results
np.testing.assert_allclose(output1, output2, rtol=2e-4)
np.testing.assert_allclose(output1, output3, atol=1e-5, rtol=1.5e-5)
# Check scan_layers=True results
np.testing.assert_allclose(output4, output5, rtol=1.5e-5)
def test_entire_transformer_shared_embeds(self):
encoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_input_tokens = np.zeros((16, 8), dtype=np.float32)
decoder_target_tokens = np.zeros((16, 8), dtype=np.float32)
transformer = make_config3_shared_token_embedder()
output, variables = transformer.init_with_output(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
params = variables['params']
reformatted = transformer.apply({},
params,
method=transformer.to_save_format)
check_dense_t5_params(reformatted,
'encoder_decoder_shared_embedding_shapes.json')
self.assertEqual(output.shape, (16, 8, 71))
# Convert back to Flax module structure format and test again.
params2 = transformer.apply({},
reformatted,
method=transformer.from_save_format)
output2 = transformer.apply(
{'params': params2},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
)
np.testing.assert_allclose(output, output2, rtol=1e-8)
def test_encoder_local_self_attention_example_packing(self):
def make_config() -> longt5_architecture.LongEncoderDecoder:
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def attention_factory(relpos_bias):
return long_attention.EncoderLocalSelfAttention(
num_heads=num_attn_heads,
local_radius=2,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
relpos_bias=relpos_bias)
def _make_encoder_layer(shared_relpos_bias):
assert shared_relpos_bias is None
return longt5_architecture.LongEncoderLayer(
attention_factory=attention_factory,
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relpos_bias_factory=(
lambda: _make_relpos_bias_general(num_attn_heads, dtype)))
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)))
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return longt5_architecture.LongEncoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return longt5_architecture.LongEncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
transformer = make_config()
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 0],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer.encode,
)
encoder_input_tokens_packed = np.array([[101, 183, 20, 75, 101, 392, 19]],
dtype=np.int32)
encoder_segment_ids = np.array([[0, 0, 0, 0, 1, 1, 1]], dtype=np.int32)
encoder_input_positions = np.array([[0, 1, 2, 3, 0, 1, 2]], dtype=np.int32)
output_packed = transformer.apply(
variables,
encoder_input_tokens_packed,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_input_positions,
enable_dropout=False,
method=transformer.encode,
)
# Check that the first element matches, which is entire first batch of the
# padded setup, and the first 3 "tokens" of the packed example.
np.testing.assert_allclose(
output[0, :, :], output_packed[0, 0:4, :], rtol=1e-4)
# Check that the second element matches, which is the first 3 "tokens" of
# the padded example's second batch, and the last 3 of tokens the packed
# example's first batch.
np.testing.assert_allclose(
output[1, 0:3, :], output_packed[0, 4:7, :], rtol=1e-4)
def test_etc_transient_global_self_attention_example_packing(self):
def make_config() -> longt5_architecture.LongEncoderDecoder:
dtype = jnp.float32
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def attention_factory(relpos_bias, side_relpos_bias):
return long_attention.EtcTransientGlobalSelfAttention(
num_heads=num_attn_heads,
tokens_per_block=3,
local_radius=2,
dtype=dtype,
qkv_features=512,
head_dim=None,
kernel_init=ATTENTION_KERNEL_INIT,
bias_init=BIAS_INIT,
use_bias=False,
broadcast_dropout=True,
dropout_rate=0.1,
relpos_bias=relpos_bias,
side_relpos_bias=side_relpos_bias)
def _make_encoder_layer(shared_relpos_bias):
assert shared_relpos_bias is None
return longt5_architecture.LongEncoderLayer(
attention_factory=attention_factory,
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relpos_bias_factory=(
lambda: _make_relpos_bias_general(num_attn_heads, dtype)),
side_relpos_bias_factory=(
lambda: _make_relpos_bias_general(num_attn_heads, dtype)))
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)))
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return longt5_architecture.LongEncoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return longt5_architecture.LongEncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
)
transformer = make_config()
encoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 0],
],
dtype=np.int32)
output, variables = transformer.init_with_output(
random.PRNGKey(0),
encoder_input_tokens,
enable_dropout=False,
method=transformer.encode,
)
encoder_input_tokens_packed = np.array([[101, 183, 20, 75, 101, 392, 19]],
dtype=np.int32)
encoder_segment_ids = np.array([[0, 0, 0, 0, 1, 1, 1]], dtype=np.int32)
encoder_input_positions = np.array([[0, 1, 2, 3, 0, 1, 2]], dtype=np.int32)
output_packed = transformer.apply(
variables,
encoder_input_tokens_packed,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_input_positions,
enable_dropout=False,
method=transformer.encode,
)
# Check that the first element matches, which is entire first batch of the
# padded setup, and the first 3 "tokens" of the packed example.
np.testing.assert_allclose(
output[0, :, :], output_packed[0, 0:4, :], rtol=1e-4, atol=1e-6)
# Check that the second element matches, which is the first 3 "tokens" of
# the padded example's second batch, and the last 3 of tokens the packed
# example's first batch.
np.testing.assert_allclose(
output[1, 0:3, :], output_packed[0, 4:7, :], rtol=1e-4, atol=1e-6)
if __name__ == '__main__':
absltest.main()
| 33,452 | 34.816916 | 111 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/longt5_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains "architecture" classes for long input T5 models.
The classes are similar to the original T5 classes but allow custom long
attention classes in the encoder that don't depend on quadratic attention
masks or relative position biases. Currently the decoder side just uses
the original T5 classes (assuming outputs are not that long).
"""
import inspect
from typing import Callable, Optional, Any, Tuple
from flax import linen as nn
import jax.numpy as jnp
from typing_extensions import Protocol
from flaxformer import activation_partitioning
from flaxformer import transformer_common as common
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import embedding
from flaxformer.components import transforms
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
# Type Stubs
MakeDecoderFn = t5_architecture.MakeDecoderFn
# pylint: disable=not-callable
# pytype: disable=not-callable
class MakeLongEncoderLayerFn(Protocol):
"""Signature for functions that make a long input encoder layer."""
def __call__(
self,
*,
shared_relpos_bias: Optional[nn.Module],
shared_side_relpos_bias: Optional[nn.Module] = None
) -> 'LongEncoderLayer':
"""Makes a long input encoder layer.
Args:
shared_relpos_bias: Relative position bias shared for all layers within
the encoder, which is the result of calling `shared_relpos_bias_factory`
at the top-level model. Due to Flax limitations, we need to pass this in
as an attribute to modules. Please use this argument instead of using a
Python closure.
shared_side_relpos_bias: Side relative position bias shared for all layers
within the encoder, which is the result of calling
`shared_side_relpos_bias_factory` at the top-level model. Most
`LongSelfAttention` implementations do not use this, and instances of
`MakeLongEncoderLayerFn` do not need to define this parameter if it's
not used.
Returns:
LongEncoderLayer instance.
"""
pass
class MakeLongEncoderFn(Protocol):
"""Signature for functions that will make a low-level LongEncoder."""
def __call__(
self,
*,
shared_token_embedder: Optional[embedding.Embed] = None,
spmd_annotations: Any = None,
) -> 'LongEncoder':
"""Makes a low-level LongEncoder instance.
Args:
shared_token_embedder: Shared token embedder instance, which should be
passed to the returned module. If this is non-None, you should use it
instead of providing your own token embedder.
spmd_annotations: Optional SPMD annotations for scanned layers.
Returns:
LongEncoder instance.
"""
pass
class MakeLongSelfAttentionFn(Protocol):
"""Signature for functions that will make a LongSelfAttention module.
See `long_attention.py` for the definition of `LongSelfAttention` and some
particular implementations.
"""
def __call__(
self,
*,
relpos_bias: Optional[nn.Module] = None,
side_relpos_bias: Optional[nn.Module] = None,
) -> nn.Module:
"""Makes a low-level LongSelfAttention instance.
Args:
relpos_bias: General relative position bias module, which should be passed
to the returned module. If this is non-None, you should use it instead
of providing your own module.
side_relpos_bias: Side general relative position bias module, which should
be passed to the returned module. Most `LongSelfAttention`
implementations do not use this, and instances of
`MakeLongSelfAttentionFn` do not need to define this parameter if it's
not used.
Returns:
LongSelfAttention instance.
"""
pass
class LongEncoderLayer(nn.Module, param_remapping.ParameterRemappable):
"""Transformer long input encoder layer.
Attributes:
attention_factory: Factory for making the long attention module.
mlp: The MLP module, applied after attention.
dropout_factory: A callable that returns a new dropout instance. This is
applied after the attention module.
layer_norm_factory: A callable that returns a new layer norm. This is
applied before the attention module and before the MLP.
relpos_bias_factory: A callable that returns general relative position bias
instances. This should only be used for per-layer relative position
biases; please use `shared_relpos_bias` if they are shared among layers.
shared_relpos_bias: Shared general relative position bias module, usually
owned by the Encoder.
side_relpos_bias_factory: A callable that returns general relative position
bias instances like `relpos_bias_factory`. Most `LongSelfAttention`
implementations do not use this, so it can be simply left as None when
unused.
shared_side_relpos_bias: Optional shared side relative position bias module,
usually owned by the Encoder. Most `LongSelfAttention` implementations do
not use this, so it can be simply left as None when unused. This should
not be used if `side_relpos_bias_factory` is used instead.
activation_partitioning_dims: When set to 2, partitions intermediate
variables containing the input and output of the encoder layer.
parallel: whether to call attention and mlp in parallel
sow_intermediates: whether to track intermediates using Module.sow.
scanned: whether this layer is being scanned over.
use_logit_mask: whether the input mask is used to zero out the padding
representations.
"""
attention_factory: MakeLongSelfAttentionFn
mlp: nn.Module
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
relpos_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_relpos_bias: Optional[nn.Module] = None
side_relpos_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_side_relpos_bias: Optional[nn.Module] = None
activation_partitioning_dims: int = 1
parallel: bool = False
sow_intermediates: bool = False
scanned: bool = False
use_logit_mask: bool = True
def setup(self):
if (self.relpos_bias_factory is not None and
self.shared_relpos_bias is not None):
raise ValueError(
'Please set at most one of relpos_bias_factory and shared_relpos_bias. '
'(They can both be None however, e.g. for absolute position embeds.)')
self.relpos_bias = (
self.relpos_bias_factory()
if self.relpos_bias_factory is not None else self.shared_relpos_bias)
if (self.side_relpos_bias_factory is not None and
self.shared_side_relpos_bias is not None):
raise ValueError(
'Please set at most one of side_relpos_bias_factory and '
'shared_side_relpos_bias. (They can both be None however.)')
self.side_relpos_bias = (
self.side_relpos_bias_factory() if self.side_relpos_bias_factory
is not None else self.shared_side_relpos_bias)
attention_factory_kwargs = dict(relpos_bias=self.relpos_bias)
if self.side_relpos_bias is not None:
attention_factory_kwargs['side_relpos_bias'] = self.side_relpos_bias
self.attention = self.attention_factory(**attention_factory_kwargs) # pytype: disable=wrong-keyword-args # dict-kwargs
if self.parallel:
self.layer_norm = self.layer_norm_factory()
self.dropout = self.dropout_factory()
else:
self.pre_attention_layer_norm = self.layer_norm_factory()
self.pre_mlp_layer_norm = self.layer_norm_factory()
self.post_attention_dropout = self.dropout_factory()
self.post_mlp_dropout = self.dropout_factory()
def __call__(self,
inputs: Array,
inputs_mask: Array,
*,
inputs_positions: Optional[Array] = None,
inputs_segment_ids: Optional[Array] = None,
enable_dropout: bool = True):
"""Applies a single LongT5 encoder layer.
Args:
inputs: input data [batch, length, emb_dim].
inputs_mask: bool array with same shape as `inputs` indicating True for
non-padding tokens and False for padding.
inputs_positions: input subsequence positions for packed examples.
inputs_segment_ids: input segmentation info for packed examples.
enable_dropout: Enables dropout if set to True.
Returns:
output after transformer encoder block.
"""
layer_input = inputs
del inputs
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.parallel:
x = self.layer_norm(layer_input)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
y = (
self.attention(
x,
inputs_mask,
positions=inputs_positions,
segment_ids=inputs_segment_ids,
enable_dropout=enable_dropout) +
self.mlp(x, enable_dropout=enable_dropout))
y *= 2**-0.5
y = layer_input + self.dropout(y, deterministic=not enable_dropout)
else:
# Attention block.
x = self.pre_attention_layer_norm(layer_input)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Convert padding representations to zero vectors. Note inputs_mask
# and x are normally expected to have the same [batch, length] shape.
# However, if this isn't the case, set use_logit_mask to False to
# avoid a shape incompatibility error.
if self.use_logit_mask:
logit_mask = inputs_mask.astype(x.dtype)[:, :, jnp.newaxis]
x = x * logit_mask
# The shape should be maintained for the residual connection.
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = self.attention(
x,
inputs_mask,
positions=inputs_positions,
segment_ids=inputs_segment_ids,
enable_dropout=enable_dropout)
x = layer_input + self.post_attention_dropout(
x, deterministic=not enable_dropout)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# MLP block.
y = self.pre_mlp_layer_norm(x)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Convert padding representations to zero vectors
if self.use_logit_mask:
y = y * logit_mask.astype(y.dtype)
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = self.mlp(y, enable_dropout=enable_dropout)
y = x + self.post_mlp_dropout(y, deterministic=not enable_dropout)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'activations', y)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
# TODO: automate this detail.
if self.scanned:
return y, None
else:
return y
class LongEncoder(nn.Module, param_remapping.ParameterRemappable):
"""A stack of long input encoder layers.
Attributes:
layer_factory: A callable that returns an EncoderLayer.
input_dropout_factory: A callable that returns the dropout to apply to the
input.
output_dropout_factory: A callable that returns the dropout to apply to the
output. Perhaps for legacy rather than essential reasons, the broadcasting
pattern is sometimes different from input_dropout_factory().
layer_norm_factory: A callable that returns a layer norm.
num_layers: Number of layers to generate.
dtype: DType to cast the embedded inputs.
layer_remat: whether and how to apply jax.remat to each layer to perform
recomputation in the backward pass. Supported values are 'none', for no
use of jax.remat; 'minimal', for a policy that recomputes only non-matmul
operations (typically optimal); and 'full', for full recomputation of each
layer. The (legacy) default is to use 'none' when `scan_layers=False` and
and 'full' when `scan_layers=True`.
scan_layers: whether to scan over layers.
spmd_annotations: spmd annotations needed for scanned layers.
shared_relpos_bias_factory: A callable that returns a relative position bias
instance which will be shared for all encoder layers. Only set this if
using shared relative position biases.
shared_side_relpos_bias_factory: A callable that returns a relative position
bias instance for side inputs which will be shared for all encoder layers.
Only set this if using shared side relative position biases. Most
`LongSelfAttention` implementations do not use this, and it can be safely
left as `None`.
token_embedder_factory: A callable that returns a token embedder. Please
provide either this or `shared_token_embedder`.
shared_token_embedder: A callable that returns a token embedder shared
between both encoder and decoder.
position_embedder_factory: A callable that returns an absolute position
embedder. Only provide this if you want absolute position embeddings.
"""
layer_factory: MakeLongEncoderLayerFn
input_dropout_factory: Callable[[], nn.Module]
output_dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
dtype: DType = jnp.float32
layer_remat: str = 'legacy'
scan_layers: bool = False
spmd_annotations: Any = None
shared_relpos_bias_factory: Optional[Callable[[], nn.Module]] = None
shared_side_relpos_bias_factory: Optional[Callable[[], nn.Module]] = None
# Embedders: Either a token_embedder_factory factory or shared token embedder
# must be provided. The position embedder is optional and provided when
# absolute position embeddings are desired.
token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
shared_token_embedder: Optional[embedding.Embed] = None
position_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
def setup(self):
# Set up the embedders.
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder_factory: Callable[[], embedding.Embed]
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(embedders)
self.input_dropout = self.input_dropout_factory()
if self.scan_layers and (self.shared_relpos_bias_factory or
self.shared_side_relpos_bias_factory):
raise ValueError("Scanned layer mode doesn't support shared relative"
'position biases.')
self.relpos_bias = (
self.shared_relpos_bias_factory()
if self.shared_relpos_bias_factory is not None else None)
self.side_relpos_bias = (
self.shared_side_relpos_bias_factory()
if self.shared_side_relpos_bias_factory is not None else None)
layer_kwargs = dict(shared_relpos_bias=self.relpos_bias)
if self.side_relpos_bias is not None:
layer_kwargs['shared_side_relpos_bias'] = self.side_relpos_bias
lyrf = lambda: self.layer_factory(**layer_kwargs) # pytype: disable=wrong-keyword-args # dict-kwargs
lyrf = t5_architecture.maybe_remat(
lyrf, self.layer_remat, self.scan_layers, static_argnums=(4,))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
self.encoder = common.TransparentLayerSequence(self.layers)
else:
initializing = self.is_mutable_collection('params')
# We scan the parameters along axis 1 as an XLA layout optimization.
SCAN_AXIS = 1 # pylint: disable=invalid-name
params_spec = SCAN_AXIS if initializing else transforms.ScanIn(SCAN_AXIS)
cache_spec = 0
scan_annotation = (
self.spmd_annotations['encoder']
if self.spmd_annotations is not None else None)
lyrf = transforms.factory_scan(
lyrf,
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
variable_axes={
'params': params_spec,
'cache': cache_spec
},
split_rngs={
'params': True,
'dropout': True
},
length=self.num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation, SCAN_AXIS),
)
self.encoder = lyrf()
self.encoder_norm = self.layer_norm_factory()
self.output_dropout = self.output_dropout_factory()
def embed_and_combine_inputs(self,
inputs,
inputs_positions=None,
*,
enable_dropout: bool = True):
"""Returns the combined embedded inputs for further encoding."""
assert inputs.ndim == 2 # (batch, len)
if 'position_ids' in self.embedder.embedders:
if inputs_positions is None:
seq_length = inputs.shape[-1]
inputs_positions = jnp.arange(seq_length)[None, :]
embedded_inputs = self.embedder( # pytype: disable=wrong-arg-types # jax-ndarray
token_ids=inputs, position_ids=inputs_positions)
else:
embedded_inputs = self.embedder(token_ids=inputs) # pytype: disable=wrong-arg-types # jax-ndarray
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
embedded_inputs = embedded_inputs.astype(self.dtype)
return embedded_inputs
def encode_from_continuous_inputs(self,
inputs,
inputs_mask=None,
*,
inputs_positions=None,
inputs_segment_ids=None,
enable_dropout: bool = True):
"""Applies all the layers starting from the continuous (embedded) inputs."""
# Apply all encoder layers. Because of residual connection, the width of the
# network is kept at `cfg.emb_dim` throughout.
encoder_outputs = self.encoder(
inputs,
inputs_mask,
inputs_positions=inputs_positions,
inputs_segment_ids=inputs_segment_ids,
enable_dropout=enable_dropout)
if self.scan_layers:
encoder_outputs = encoder_outputs[0]
# Post-process the outputs of the final encoder layer.
encoder_outputs = self.encoder_norm(encoder_outputs)
encoder_outputs = self.output_dropout(
encoder_outputs, deterministic=not enable_dropout)
return encoder_outputs
def __call__(self,
inputs: Array,
inputs_mask: Optional[Array] = None,
*,
inputs_positions: Optional[Array] = None,
inputs_segment_ids: Optional[Array] = None,
enable_dropout: bool = True):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_mask: bool array with same shape as `inputs` indicating True for
non-padding tokens and False for padding. If `None` (the default), we
automatically construct the mask based on which `inputs` are nonzero
(rather than zero for padding).
inputs_positions: input subsequence positions for packed examples.
inputs_segment_ids: input segmentation info for packed examples.
enable_dropout: Enables dropout if set to True.
Returns:
output of a transformer encoder.
"""
if inputs_mask is None:
inputs_mask = inputs > 0
embedded_inputs = self.embed_and_combine_inputs(
inputs,
inputs_positions=inputs_positions,
enable_dropout=enable_dropout)
encoder_outputs = self.encode_from_continuous_inputs(
embedded_inputs,
inputs_mask=inputs_mask,
inputs_positions=inputs_positions,
inputs_segment_ids=inputs_segment_ids,
enable_dropout=enable_dropout)
return encoder_outputs
class LongEncoderDecoder(nn.Module, param_remapping.ParameterRemappable):
"""Transformer Model for sequence to sequence translation with long inputs.
Attributes:
encoder_factory: A callable that returns the lower-level LongEncoder object.
If shared_token_embedder_factory is non-None, then the result of it will
be passed as the `shared_token_embedder` argument to `encoder_factory`.
decoder_factory: A callable that returns the lower-level Decoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `decoder_factory`.
dtype: DType for encoder/decoder to cast embedded inputs, and for attention
mask generation.
scan_layers: whether to scan over layers.
shared_token_embedder_factory: A callable that returns an embedder that can
be shared between the encoder and decoder.
"""
# Core components: encoder and decoder embedders and layers.
encoder_factory: MakeLongEncoderFn
decoder_factory: MakeDecoderFn
# Configures behavior when the model is called. Many of these might eventually
# be better as call parameters.
dtype: DType = jnp.float32
scan_layers: bool = False # only used to pass this option to predict_fn.
spmd_annotations: Any = None # only used for scanned spmd layers
shared_token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
def setup(self):
self.token_embedder = (
self.shared_token_embedder_factory()
if self.shared_token_embedder_factory else None)
# TODO: Clean up SPMD annotation code.
if self.spmd_annotations is None:
encoder_annotations = None
decoder_annotations = None
else:
encoder_annotations = self.spmd_annotations['encoder']
decoder_annotations = self.spmd_annotations['decoder']
encoder_factory_params = tuple(
inspect.signature(self.encoder_factory).parameters.keys())
if 'spmd_annotations' in encoder_factory_params:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=encoder_annotations)
else:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder)
decoder_factory_params = tuple(
inspect.signature(self.decoder_factory).parameters.keys())
if 'spmd_annotations' in decoder_factory_params:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=decoder_annotations)
else:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder)
def encode(self,
encoder_input_tokens,
encoder_segment_ids=None,
encoder_positions=None,
*,
enable_dropout: bool = True):
"""Applies Transformer encoder-branch on the inputs.
Args:
encoder_input_tokens: input data to the encoder.
encoder_segment_ids: encoder input segmentation info for packed examples.
encoder_positions: encoder input subsequence positions for packed
examples.
enable_dropout: Enables dropout if set to True.
Returns:
encoded feature array from the transformer encoder.
"""
return self.encoder( # pytype: disable=attribute-error
encoder_input_tokens,
inputs_mask=encoder_input_tokens > 0,
inputs_positions=encoder_positions,
inputs_segment_ids=encoder_segment_ids,
enable_dropout=enable_dropout)
def decode(
self,
encoded,
encoder_input_tokens, # only needed for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
encoder_input_tokens: input to the encoder (only needed for masking).
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
Returns:
logits array from transformer decoder.
"""
# Make padding attention masks.
if decode:
# Do not mask decoder attention based on targets padding at
# decoding/inference time.
decoder_mask = None
encoder_decoder_mask = dense_attention.make_attention_mask(
jnp.ones_like(decoder_target_tokens),
encoder_input_tokens > 0,
dtype=self.dtype)
else:
decoder_mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=self.dtype,
decoder_segment_ids=decoder_segment_ids)
encoder_decoder_mask = dense_attention.make_attention_mask(
decoder_target_tokens > 0, encoder_input_tokens > 0, dtype=self.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if encoder_segment_ids is not None:
if decode:
raise ValueError(
'During decoding, packing should not be used but '
'`encoder_segment_ids` was passed to `Transformer.decode`.')
encoder_decoder_mask = dense_attention.combine_masks(
encoder_decoder_mask,
dense_attention.make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=self.dtype))
# When computing the logits, we don't need decoder_target_tokens, which is
# needed for computing the loss.
return self.decoder(
encoded,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
segment_ids=decoder_segment_ids,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length)
@property
def encoder_embedder(self) -> embedding.MultiEmbed:
return self.encoder.embedder
@property
def decoder_embedder(self) -> embedding.MultiEmbed:
return self.decoder.embedder
def __call__(self,
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None):
"""Applies Transformer model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is a shifted version of the former. For a packed dataset, it usually
has additional processing applied. For example, the first element of each
sequence has id 0 instead of the shifted EOS id from the previous sequence.
Args:
encoder_input_tokens: input data to the encoder.
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
encoder_positions: encoder subsequence positions for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
Returns:
logits array from full transformer.
"""
encoded = self.encode(
encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_positions,
enable_dropout=enable_dropout)
return self.decode(
encoded,
encoder_input_tokens, # Only used for masks.
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=encoder_segment_ids,
decoder_segment_ids=decoder_segment_ids,
decoder_positions=decoder_positions,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length)
| 30,917 | 40.224 | 124 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/tensor_utils_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor_utils."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.longt5 import tensor_utils
class TensorUtilsTest(parameterized.TestCase):
def test_pad_to_multiple_1d(self):
array = np.arange(3) + 1
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=3, axis=0), [1, 2, 3])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=5, axis=0), [1, 2, 3, 0, 0])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=7, axis=0),
[1, 2, 3, 0, 0, 0, 0])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=2, axis=0), [1, 2, 3, 0])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=1, axis=0), [1, 2, 3])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=jnp.array(7), axis=0),
[1, 2, 3, 0, 0, 0, 0])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=jnp.array(1), axis=0),
[1, 2, 3])
@parameterized.named_parameters(
('int_factor', 5),
('array_factor', np.array(5)),
)
def test_pad_to_multiple_padding_mode(self, factor):
array = np.arange(3) + 1
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(
array, factor=factor, axis=0, mode='reflect'), [1, 2, 3, 2, 1])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(
array, factor=factor, axis=0, mode='symmetric'), [1, 2, 3, 3, 2])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(
array, factor=factor, axis=0, constant_values=-1),
[1, 2, 3, -1, -1])
@parameterized.named_parameters(
('int_factor', 4),
('array_factor', np.array(4)),
)
def test_pad_to_multiple_2d(self, factor):
array = np.ones([3, 5], dtype=np.float32)
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=factor, axis=0),
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[0, 0, 0, 0, 0], #
])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=factor, axis=-1),
[
[1, 1, 1, 1, 1, 0, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=factor + 1, axis=1), array)
@parameterized.named_parameters(
('int_factor', 3),
('array_factor', np.array(3)),
)
def test_pad_to_multiple_3d(self, factor):
array = np.ones([2, 3, 5], dtype=np.float32)
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=factor, axis=0),
[
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
], #
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
], #
[
[0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0], #
]
])
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=factor, axis=-2), array)
np.testing.assert_array_equal(
tensor_utils.pad_to_multiple(array, factor=factor, axis=-1),
[
[
[1, 1, 1, 1, 1, 0], #
[1, 1, 1, 1, 1, 0], #
[1, 1, 1, 1, 1, 0], #
], #
[
[1, 1, 1, 1, 1, 0], #
[1, 1, 1, 1, 1, 0], #
[1, 1, 1, 1, 1, 0], #
]
])
@parameterized.named_parameters(
('int_block_len', lambda x: x),
('array_block_len', jnp.array),
)
def test_split_into_blocks_1d(self, wrap_fn):
array = np.arange(6) + 1
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(2), axis=0),
[[1, 2], [3, 4], [5, 6]])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(3), axis=0),
[[1, 2, 3], [4, 5, 6]])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(4), axis=0),
[[1, 2, 3, 4], [5, 6, 0, 0]])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(5), axis=0),
[[1, 2, 3, 4, 5], [6, 0, 0, 0, 0]])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(6), axis=0),
[[1, 2, 3, 4, 5, 6]])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(7), axis=0),
[[1, 2, 3, 4, 5, 6, 0]])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(
array, block_len=wrap_fn(8), axis=0, pad_value=-1),
[[1, 2, 3, 4, 5, 6, -1, -1]])
@parameterized.named_parameters(
('int_block_len', lambda x: x),
('array_block_len', jnp.array),
)
def test_split_into_blocks_3d(self, wrap_fn):
# shape: [2, 4, 2]
array = [
[[1, -1], [2, -2], [3, -3], [4, -4]], #
[[11, 21], [12, 22], [13, 23], [14, 24]]
]
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(2), axis=-2),
[
[
[[1, -1], [2, -2]], #
[[3, -3], [4, -4]], #
],
[
[[11, 21], [12, 22]], #
[[13, 23], [14, 24]], #
]
])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(3), axis=1),
[
[
[[1, -1], [2, -2], [3, -3]], #
[[4, -4], [0, 0], [0, 0]], #
],
[
[[11, 21], [12, 22], [13, 23]], #
[[14, 24], [0, 0], [0, 0]], #
]
])
np.testing.assert_array_equal(
tensor_utils.split_into_blocks(array, block_len=wrap_fn(3), axis=-1),
[
[
[[1, -1, 0]], #
[[2, -2, 0]], #
[[3, -3, 0]], #
[[4, -4, 0]], #
],
[
[[11, 21, 0]], #
[[12, 22, 0]], #
[[13, 23, 0]], #
[[14, 24, 0]], #
],
])
def test_concat_3_blocks(self):
# shape: [batch=2, num_blocks=3, block_len=2, hidden_size=2]
blocked_seq = [
[
[[1, -1], [2, -2]], #
[[3, -3], [4, -4]], #
[[5, -5], [6, -6]], #
], #
[
[[.1, -.1], [.2, -.2]], #
[[.3, -.3], [.4, -.4]], #
[[.5, -.5], [.6, -.6]], #
], #
]
np.testing.assert_allclose(
tensor_utils.concat_3_blocks(blocked_seq, block_axis=-3, seq_axis=-2),
[
[
[[0, 0], [0, 0], [1, -1], [2, -2], [3, -3], [4, -4]], #
[[1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6]], #
[[3, -3], [4, -4], [5, -5], [6, -6], [0, 0], [0, 0]], #
], #
[
[[0, 0], [0, 0], [.1, -.1], [.2, -.2], [.3, -.3], [.4, -.4]], #
[[.1, -.1], [.2, -.2], [.3, -.3], [.4, -.4], [.5, -.5],
[.6, -.6]], #
[[.3, -.3], [.4, -.4], [.5, -.5], [.6, -.6], [0, 0], [0, 0]], #
], #
])
def test_concat_3_blocks_with_extra_dim(self):
# shape: [batch=1, num_blocks=3, block_len=2, num_heads=1, size_per_head=2]
blocked_seq = [[
[[[1, -1]], [[2, -2]]], #
[[[3, -3]], [[4, -4]]], #
[[[5, -5]], [[6, -6]]], #
]]
np.testing.assert_array_equal(
tensor_utils.concat_3_blocks(blocked_seq, block_axis=1, seq_axis=2),
[[
[[[0, 0]], [[0, 0]], [[1, -1]], [[2, -2]], [[3, -3]], [[4, -4]]], #
[[[1, -1]], [[2, -2]], [[3, -3]], [[4, -4]], [[5, -5]], [[6, -6]]],
[[[3, -3]], [[4, -4]], [[5, -5]], [[6, -6]], [[0, 0]], [[0, 0]]], #
]])
@parameterized.parameters(
dict(shape=(2, 3, 4, 5), block_axis=-3, seq_axis=-2),
dict(shape=(1, 2, 3, 4, 5), block_axis=-4, seq_axis=-2),
)
def test_concat_3_blocks_one_hot(self, shape, block_axis, seq_axis):
# Make sure the output from `concat_3_blocks_one_hot` is the same as
# `concat_3_blocks`.
seed = 1234
np.random.seed(seed)
array = np.random.randn(*shape)
output = tensor_utils.concat_3_blocks_one_hot(array, block_axis, seq_axis)
expected = tensor_utils.concat_3_blocks(array, block_axis, seq_axis)
np.testing.assert_array_equal(output, expected)
def test_make_3block_local_att_mask_no_segment_ids(self):
input_mask = np.array(
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 0, 0], #
],
dtype=np.bool_)
np.testing.assert_array_equal(
tensor_utils.make_3block_local_att_mask(2, input_mask),
np.array(
[
[
[[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]], #
[[0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
[
[[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
],
dtype=np.bool_))
def test_make_3block_local_att_mask_w_causal_mask(self):
input_mask = np.array(
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 0, 0], #
],
dtype=np.bool_)
np.testing.assert_array_equal(
tensor_utils.make_3block_local_att_mask(
2, input_mask, use_causal_mask=True),
np.array(
[
[
[[0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
[
[[0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
],
dtype=np.bool_))
def test_make_3block_local_att_mask_with_segment_ids(self):
input_mask = np.array(
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 0, 0], #
],
dtype=np.bool_)
segment_ids = [
[1, 2, 2, 3, 3], #
[1, 1, 2, 0, 0], #
]
np.testing.assert_array_equal(
tensor_utils.make_3block_local_att_mask(2, input_mask, segment_ids),
np.array(
[
[
[[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
[
[[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0]], #
[[0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
],
dtype=np.bool_))
def test_make_3block_local_att_mask_no_segment_ids_full_block(self):
input_mask = np.array(
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 0, 0], #
],
dtype=np.bool_)
np.testing.assert_array_equal(
tensor_utils.make_3block_local_att_mask(
2, input_mask, use_full_block_att=True),
np.array(
[
[
[[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]], #
[[1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 0]], #
[[1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
[
[[0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0]], #
[[1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
],
dtype=np.bool_))
def test_make_3block_local_att_mask_with_segment_ids_full_block(self):
input_mask = np.array(
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 0, 0], #
],
dtype=np.bool_)
segment_ids = [
[1, 2, 2, 3, 3], #
[1, 1, 2, 0, 0], #
]
np.testing.assert_array_equal(
tensor_utils.make_3block_local_att_mask(
2, input_mask, segment_ids, use_full_block_att=True),
np.array(
[
[
[[0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0]], #
[[0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
[
[[0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0]], #
[[0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], #
], #
],
dtype=np.bool_))
def test_make_3block_relative_position(self):
np.testing.assert_array_equal(
tensor_utils.make_3block_relative_position(3),
[
[-3, -2, -1, 0, 1, 2, 3, 4, 5], #
[-4, -3, -2, -1, 0, 1, 2, 3, 4], #
[-5, -4, -3, -2, -1, 0, 1, 2, 3], #
])
def test_make_custom_3block_relative_position_simple_input(self):
positions = np.arange(3, dtype=np.int32)[np.newaxis, :]
# Unlike `make_3block_relative_position`, the "position" for all
# padding tokens is set to -1, but this shouldn't matter since attention
# to padding tokens should be masked out.
np.testing.assert_array_equal(
tensor_utils.make_custom_3block_relative_position(3, positions),
[[[
[-1, -1, -1, 0, 1, 2, -1, -1, -1], #
[-2, -2, -2, -1, 0, 1, -2, -2, -2], #
[-3, -3, -3, -2, -1, 0, -3, -3, -3], #
]]])
def test_make_custom_3block_relative_position_customized_input(self):
positions = [
[5, 4, 3, 2, 1, 0], #
[3, 4, 0, 1, 2, 5], #
]
np.testing.assert_array_equal(
tensor_utils.make_custom_3block_relative_position(2, positions),
[
[
[
[-6, -6, 0, -1, -2, -3], #
[-5, -5, 1, 0, -1, -2], #
],
[
[2, 1, 0, -1, -2, -3], #
[3, 2, 1, 0, -1, -2], #
],
[
[2, 1, 0, -1, -2, -2], #
[3, 2, 1, 0, -1, -1], #
],
],
[
[
[-4, -4, 0, 1, -3, -2], #
[-5, -5, -1, 0, -4, -3], #
],
[
[3, 4, 0, 1, 2, 5], #
[2, 3, -1, 0, 1, 4], #
],
[
[-2, -1, 0, 3, -3, -3], #
[-5, -4, -3, 0, -6, -6], #
],
],
])
def test_positions_from_segment_ids(self):
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
expected_positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
np.testing.assert_array_equal(
expected_positions,
tensor_utils.positions_from_segment_ids(segment_ids))
if __name__ == '__main__':
absltest.main()
| 16,879 | 31.461538 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/relative_position_biases_general_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for relative_position_biases_general.
The parameter names and outputs should be identical to the original "dense"
`RelativePositionBiases` when we use `full_att_rp_bucket()`.
"""
from absl.testing import absltest
import jax
from jax import random
import jax.numpy as jnp
from flaxformer.architectures.longt5 import relative_position_biases_general as rp_biases_general
class RelativePositionBiasesGeneralTest(absltest.TestCase):
def setUp(self):
self.num_heads = 3
self.query_len = 5
self.key_len = 7
self.relative_attention = rp_biases_general.RelativePositionBiasesGeneral(
num_buckets=12,
max_distance=10,
num_heads=3,
dtype=jnp.float32,
)
super().setUp()
def test_relative_attention_bidirectional_params(self):
"""Tests that bidirectional relative position biases have expected params."""
rp_bucket = self.relative_attention.full_att_rp_bucket(
self.query_len, self.key_len, bidirectional=True)
params = self.relative_attention.init(
random.PRNGKey(0), rp_bucket, mutable=['params'])
param_shapes = jax.tree_map(lambda x: x.shape, params)
self.assertEqual(param_shapes, {
'params': {
'rel_embedding': (3, 12),
},
})
def test_regression_relative_attention_bidirectional_values(self):
"""Tests that bidirectional relative position biases match expected values.
"""
rp_bucket = self.relative_attention.full_att_rp_bucket(
self.query_len, self.key_len, bidirectional=True)
outputs, unused_params = self.relative_attention.init_with_output(
random.PRNGKey(0), rp_bucket)
self.assertEqual(outputs.shape,
(1, self.num_heads, self.query_len, self.key_len))
self.assertAlmostEqual(outputs[0, 0, 0, 0], -0.1094, places=5)
self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.22087, places=5)
self.assertAlmostEqual(outputs[0, 1, 4, 6], 0.27360, places=5)
self.assertAlmostEqual(outputs[0, 2, 4, 6], -0.31798, places=5)
def test_relative_attention_unidirectional_params(self):
"""Tests that unidirectional relative position biases have expected params."""
rp_bucket = self.relative_attention.full_att_rp_bucket(
self.query_len, self.key_len, bidirectional=False)
params = self.relative_attention.init(
random.PRNGKey(0), rp_bucket, mutable=['params'])
param_shapes = jax.tree_map(lambda x: x.shape, params)
self.assertEqual(param_shapes, {
'params': {
'rel_embedding': (3, 12),
},
})
def test_regression_relative_attention_unidirectional_values(self):
"""Tests that unidirectional relative position biases match expected values.
"""
rp_bucket = self.relative_attention.full_att_rp_bucket(
self.query_len, self.key_len, bidirectional=False)
outputs, unused_params = self.relative_attention.init_with_output(
random.PRNGKey(0), rp_bucket)
self.assertEqual(outputs.shape,
(1, self.num_heads, self.query_len, self.key_len))
self.assertAlmostEqual(outputs[0, 0, 0, 0], -0.109404, places=5)
self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.220874, places=5)
self.assertAlmostEqual(outputs[0, 1, 4, 6], -0.189960, places=5)
self.assertAlmostEqual(outputs[0, 2, 4, 6], 0.366049, places=5)
if __name__ == '__main__':
absltest.main()
| 3,959 | 37.446602 | 97 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/long_attention_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for long attention classes."""
import dataclasses
from typing import Optional
from absl.testing import absltest
from absl.testing import parameterized
from flax.core import frozen_dict
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer.architectures.longt5 import long_attention
from flaxformer.architectures.longt5 import relative_position_biases_general
from flaxformer.components import relative_position_biases
from flaxformer.components.attention import dense_attention
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
RelativePositionBiasesGeneral = (
relative_position_biases_general.RelativePositionBiasesGeneral)
@dataclasses.dataclass(frozen=True)
class EncoderLocalSelfAttArgs:
num_heads: int = 1
local_radius: int = 2
batch_size: int = 2
qkv_features: int = 3
out_features: int = 4
q_len: int = 5
features: int = 6
broadcast_dropout: bool = True
dropout_rate: float = 0.1
enable_dropout: bool = True
use_bias: bool = True
rescale_logits: bool = True
float32_logits: bool = False
split_head_kernel: bool = False
kernels_to_fuse: Optional[str] = None # Only 'qkv' is supported.
relpos_bias: Optional[RelativePositionBiasesGeneral] = None
def init_args(self):
return dict(
num_heads=self.num_heads,
local_radius=self.local_radius,
qkv_features=self.qkv_features,
out_features=self.out_features,
broadcast_dropout=self.broadcast_dropout,
dropout_rate=self.dropout_rate,
use_bias=self.use_bias,
rescale_logits=self.rescale_logits,
float32_logits=self.float32_logits,
split_head_kernel=self.split_head_kernel,
kernels_to_fuse=self.kernels_to_fuse,
relpos_bias=self.relpos_bias)
def apply_args(self):
inputs = jnp.ones((self.batch_size, self.q_len, self.features))
inputs_mask = jnp.ones((self.batch_size, self.q_len))
return {
'inputs': inputs,
'inputs_mask': inputs_mask,
'enable_dropout': self.enable_dropout
}
class LongAttentionTest(parameterized.TestCase):
def test_local_self_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, seq_len, qk_depth, v_depth = 1, 2, 8, 3, 5
local_radius = 1
block_len = local_radius + 1 # 2
num_blocks = seq_len // block_len + bool(seq_len % block_len) # 4
query = jnp.ones((batch_size, seq_len, num_heads, qk_depth))
key = jnp.ones((batch_size, seq_len, num_heads, qk_depth))
value = jnp.ones((batch_size, seq_len, num_heads, v_depth))
bias = jnp.ones(
(batch_size, num_blocks, num_heads, block_len, 3 * block_len))
args = dict(
query=query,
key=key,
value=value,
local_radius=local_radius,
bias=bias,
rescale_logits=True,
dropout_rng=dropout_rng,
dropout_rate=0.5,
enable_dropout=True,
)
output = long_attention._local_self_attention(
**args, broadcast_dropout=True)
self.assertEqual(output.shape, (batch_size, seq_len, num_heads, v_depth))
# Make sure we also reach the code path where we don't broadcast dropout.
output = long_attention._local_self_attention(
**args, broadcast_dropout=False)
self.assertEqual(output.shape, (batch_size, seq_len, num_heads, v_depth))
def test_encoder_local_self_attention(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs()
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_encoder_local_self_attention_cast_logits_float32(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(float32_logits=True)
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_encoder_local_self_attention_no_rescale_logits(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(rescale_logits=False)
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_encoder_local_self_attention_no_out_features(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(out_features=None)
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.features))
def test_encoder_local_self_attention_with_kernel_fusion(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = EncoderLocalSelfAttArgs(
split_head_kernel=True, kernels_to_fuse='qkv')
model = long_attention.EncoderLocalSelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
@parameterized.named_parameters(
('even_blocking', 15),
('uneven_blocking', 16),
('degenerate_blocking', 35),
)
def test_encoder_local_self_attention_logic(self, local_radius):
# This test checks the logic of the local attention calculations by
# comparing with the output of `MultiHeadDotProductAttention`
# (full attention) after manually applying the local sparsity pattern.
# The outputs should be identical for non-padding tokens.
keys = random.split(random.PRNGKey(0), 4)
batch_size = 3
seq_len = 64
in_features = 11
out_features = 12
num_heads = 5
dtype = jnp.float32
inputs = random.normal(keys[0], (batch_size, seq_len, in_features))
inputs_mask = random.bernoulli(keys[1], 0.9, (batch_size, seq_len))
inputs_mask = inputs_mask.astype(jnp.bool_)
segment_ids = jnp.cumsum(
random.bernoulli(keys[2], 0.1, (batch_size, seq_len)), axis=-1)
# `positions` is unused in `EncoderLocalSelfAttention`, so we set to zeros.
positions = jnp.zeros_like(segment_ids)
att_config = dict(
num_heads=num_heads,
dtype=dtype,
qkv_features=20,
out_features=out_features,
use_rotary_embedding=True,
)
relpos_bias = RelativePositionBiasesGeneral(
num_heads=num_heads, num_buckets=32, max_distance=128, dtype=dtype)
local_att = long_attention.EncoderLocalSelfAttention(
local_radius=local_radius,
relpos_bias=relpos_bias,
**att_config,
)
full_att = dense_attention.MultiHeadDotProductAttention(
use_bias=True, **att_config
)
local_att_output, local_att_vars = local_att.init_with_output(
keys[3],
inputs,
inputs_mask,
segment_ids=segment_ids,
positions=positions,
enable_dropout=False)
relpos_bias_vars = dict(params=local_att_vars['params']['relpos_bias'])
# Full attention uses the same variables as local attention (ignoring
# `relpos_bias`).
full_att_vars = local_att_vars
rp_bucket = relpos_bias.full_att_rp_bucket(
qlen=seq_len, klen=seq_len, bidirectional=True)
bias = relpos_bias.apply(relpos_bias_vars, rp_bucket)
mask = dense_attention.make_attention_mask(
inputs_mask, inputs_mask, dtype=dtype)
mask = dense_attention.combine_masks(
mask,
dense_attention.make_attention_mask(
segment_ids, segment_ids, jnp.equal, dtype=dtype))
# Overlay local sparsity attention mask for full attention case.
range_array = np.arange(seq_len)
locality_mask = np.abs(range_array[np.newaxis, :] -
range_array[:, np.newaxis]) <= local_radius
# [1, 1, seq_len, seq_len] shape
locality_mask = locality_mask[np.newaxis, np.newaxis, :, :]
mask = dense_attention.combine_masks(mask, locality_mask)
full_att_output = full_att.apply(
full_att_vars, inputs, inputs, mask, bias, enable_dropout=False)
np.testing.assert_array_equal(local_att_output.shape,
(batch_size, seq_len, out_features))
np.testing.assert_array_equal(local_att_output.shape, full_att_output.shape)
# Padding tokens may have different embeddings which we'll want to ignore
# in our comparison, so we "clear" them to zero.
def clear_padding(array):
return array * inputs_mask[..., jnp.newaxis].astype(dtype)
np.testing.assert_allclose(
clear_padding(local_att_output),
clear_padding(full_att_output),
atol=1e-5)
@parameterized.named_parameters(
('even_blocking', 15),
('uneven_blocking', 16),
('degenerate_blocking', 35),
('uneven_blocking_use_kernel_fusion', 16, True),
('even_blocking_causal', 15, False, True),
('uneven_blocking_causal', 16, False, True),
('degenerate_blocking_causal', 35, False, True),
('uneven_blocking_use_kernel_fusion_causal', 16, True, True),
)
def test_etc_transient_global_self_attention(self,
local_radius,
use_kernel_fusion=False,
causal=False):
# This test just makes sure the layer successfully runs with different
# input sizes.
keys = random.split(random.PRNGKey(0), 3)
batch_size = 3
seq_len = 64
tokens_per_block = 4
in_features = 11
out_features = 12
num_heads = 5
dtype = jnp.float32
inputs = random.normal(keys[0], (batch_size, seq_len, in_features))
# Construct realistic packed inputs.
new_segment_marker = random.bernoulli(keys[2], 0.1, (batch_size, seq_len))
segment_ids = jnp.cumsum(new_segment_marker, axis=-1)
# We make the last segment padding.
is_padding = segment_ids == jnp.max(segment_ids, axis=-1, keepdims=True)
inputs_mask = jnp.logical_not(is_padding)
# Create positions based on segments.
arange = np.broadcast_to(np.arange(seq_len), segment_ids.shape)
positions = arange - np.maximum.accumulate(
new_segment_marker * arange, axis=-1)
positions *= inputs_mask
relpos_bias = RelativePositionBiasesGeneral(
num_heads=num_heads, num_buckets=32, max_distance=128, dtype=dtype)
side_relpos_bias = RelativePositionBiasesGeneral(
num_heads=num_heads, num_buckets=32, max_distance=128, dtype=dtype)
att_layer = long_attention.EtcTransientGlobalSelfAttention(
num_heads=num_heads,
tokens_per_block=tokens_per_block,
local_radius=local_radius,
dtype=dtype,
causal=causal,
qkv_features=15,
out_features=out_features,
rescale_logits=use_kernel_fusion,
split_head_kernel=use_kernel_fusion,
kernels_to_fuse='kv' if use_kernel_fusion else None,
relpos_bias=relpos_bias,
side_relpos_bias=side_relpos_bias,
)
output, _ = att_layer.init_with_output(
keys[3],
inputs,
inputs_mask,
segment_ids=segment_ids,
positions=positions,
enable_dropout=False)
np.testing.assert_array_equal(output.shape,
(batch_size, seq_len, out_features))
def test_make_etc_fixed_block_ids(self):
# See this documentation for an example of what packed inputs look like:
# https://github.com/google/seqio/blob/main/seqio/utils.py#L292
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3,
inputs_mask=inputs_mask,
segment_ids=segment_ids,
positions=positions)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], #
[0, 0, 0, 1, 1, 1, 1, 1, -1, -1, 2, 2, 2, 2, -1, -1], #
[-1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, 1, 1], #
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1, 2, 2, 3], #
[1, 1, 3, 0, 0], #
[6, 6, 0, 0, 0], #
[1, 1, 1, 1, 1], #
])
def test_make_etc_fixed_block_ids_without_packing(self):
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
],
dtype=np.bool_)
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3, inputs_mask=inputs_mask)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, 1, 1], #
[0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, 0, 0, -1, -1, -1], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1], #
[1, 1], #
[1, 0], #
])
def test_make_etc_fixed_block_ids_without_orphan_adoption(self):
# See this documentation for an example of what packed inputs look like:
# https://github.com/google/seqio/blob/main/seqio/utils.py#L292
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3,
inputs_mask=inputs_mask,
segment_ids=segment_ids,
positions=positions,
adopt_orphan_tokens=False)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, -1, 2, 2, 2, 3, 3, 3, 4, 4, 4], #
[0, 0, 0, 1, 1, 1, -1, -1, -1, -1, 2, 2, 2, -1, -1, -1], #
[-1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, -1], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1, 2, 2, 3], #
[1, 1, 3, 0, 0], #
[6, 6, 0, 0, 0], #
[1, 1, 1, 1, 1], #
])
def test_make_etc_fixed_block_ids_without_packing_nor_adoption(self):
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
],
dtype=np.bool_)
block_ids, global_segment_ids = long_attention.make_etc_fixed_block_ids(
tokens_per_block=3, inputs_mask=inputs_mask, adopt_orphan_tokens=False)
np.testing.assert_array_equal(
block_ids,
[
[0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, 1, 1, 1, -1, -1], #
[0, 0, 0, -1, -1, -1, -1, -1], #
])
np.testing.assert_array_equal(
global_segment_ids,
[
[1, 1], #
[1, 1], #
[1, 0], #
])
def test_orphan_token_identification(self):
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 0, 0, 0], #
],
dtype=np.bool_)
orphan_tokens = long_attention.identify_orphan_tokens(
tokens_per_block=3, inputs_mask=inputs_mask)
np.testing.assert_array_equal(
orphan_tokens,
[
[0, 0, 0, 0, 0, 0, 1, 1], #
[0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 1, 1, 0, 0, 0], #
])
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
list(range(16)), #
]
orphan_tokens = long_attention.identify_orphan_tokens(
tokens_per_block=3, inputs_mask=inputs_mask, positions=positions)
np.testing.assert_array_equal(
orphan_tokens,
[
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], #
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], #
])
def test_mask_to_bias(self):
mask1 = np.array([1, 0], dtype=bool)
bias1 = long_attention.mask_to_bias(mask1, dtype=np.float32)
np.testing.assert_array_equal(bias1, np.array([0, -1e10], dtype=np.float32))
assert bias1.dtype == np.float32
mask2 = np.array([[1, 0], [0, 0]], dtype=bool)
bias2 = long_attention.mask_to_bias(mask2, dtype=np.float32)
np.testing.assert_array_equal(
bias2, np.array([[0, -1e10], [-1e10, -1e10]], dtype=np.float32))
assert bias2.dtype == np.float32
def test_make_side_relpos(self):
tokens_per_block = 3
inputs_mask = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]) #
positions = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3]
]) #
segment_ids = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
]) #
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=True)
# certain biases are not important b/c they will be masked out; we represent
# these with NaNs and ignore their positions in testing.
x = np.nan
expected_relative_positions = np.array([
[
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, x], #
[x, x, x, x]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, 0], #
[x, x, x, 0], #
[x, x, x, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=False)
expected_relative_positions = np.array([
[
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[0, 1, 2, 3], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-1, 0, 1, 2], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-2, -1, 0, 1], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0], #
[-3, -2, -1, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-3, -2, -1, x], #
[x, x, x, x], #
[x, x, x, x]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, x, 0], #
[x, x, x, 0], #
[x, x, x, 0]
], #
[
[0, 1, 2, x], #
[0, 1, 2, x], #
[0, 1, 2, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-1, 0, 1, x], #
[-2, -1, 0, x], #
[-2, -1, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, 0, x], #
[x, x, -1, x]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
inputs_mask = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
],
dtype=np.bool_)
segment_ids = [
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3], #
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 0, 0], #
[1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6], #
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #
]
positions = [
[0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2], #
[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 0, 1, 2, 3, 0, 0], #
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7], #
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] #
]
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=True)
expected_relative_positions = np.array([
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
], #
[
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x]
], #
[
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3]
], #
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
side_relative_positions = long_attention._make_side_relpos(
tokens_per_block,
inputs_mask,
segment_ids,
positions,
adopt_orphan_tokens=False)
expected_relative_positions = np.array([
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0]
], #
[
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[0, 1, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-1, 0, x, x, x], #
[-2, -1, x, x, x], #
[-2, -1, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, 0, x, x], #
[x, x, -1, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x]
], #
[
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[x, x, x, x, x], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2]
], #
[
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[0, 1, 2, 3, 4], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-1, 0, 1, 2, 3], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-2, -1, 0, 1, 2], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-3, -2, -1, 0, 1], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-4, -3, -2, -1, 0], #
[-5, -4, -3, -2, -1]
]
]) #
positions_to_compare = np.isfinite(expected_relative_positions)
np.testing.assert_array_equal(
side_relative_positions[positions_to_compare],
expected_relative_positions[positions_to_compare])
def test_validate_long_att_call_params_positions_without_segment_ids(self):
# This test ensures the `allow_positions_without_segment_ids` option
# relaxes the default convention that requires both `positions` and
# `segment_ids` to be given together.
inputs = jnp.array([
[[.1, .2], [.3, .4], [.5, .6], [.7, .8], [0., 0.]], #
[[.1, .2], [.3, .4], [.5, .6], [.7, .8], [.9, 0.]], #
])
inputs_mask = jnp.array([
[1, 1, 1, 1, 0], #
[1, 1, 1, 1, 1], #
])
segment_ids = inputs_mask
positions = jnp.array([
[0, 1, 2, 3, 0], #
[0, 1, 2, 3, 4], #
])
# allow_positions_without_segment_ids=False (default)
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=positions,
segment_ids=segment_ids)
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=None,
segment_ids=None)
with self.assertRaises(ValueError):
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=positions,
segment_ids=None)
with self.assertRaises(ValueError):
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=None,
segment_ids=segment_ids)
# allow_positions_without_segment_ids=True
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=positions,
segment_ids=segment_ids,
allow_positions_without_segment_ids=True)
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=None,
segment_ids=None,
allow_positions_without_segment_ids=True)
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=positions,
segment_ids=None,
allow_positions_without_segment_ids=True)
with self.assertRaises(ValueError):
long_attention.validate_long_attention_call_parameter_shapes(
inputs=inputs,
inputs_mask=inputs_mask,
positions=None,
segment_ids=segment_ids,
allow_positions_without_segment_ids=True)
if __name__ == '__main__':
absltest.main()
| 33,226 | 33.432124 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/longt5/relative_position_biases_general.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for relative position biases generalized to long inputs."""
from typing import Any, Callable
from flax import linen as nn
from flax.linen import partitioning
from jax import lax
import jax.numpy as jnp
import numpy as np
from flaxformer.types import Array
class RelativePositionBiasesGeneral(nn.Module):
"""Adds T5-style relative positional embeddings to the attention logits.
This generalizes the original `RelativePositionBiases` implementation to
accept an `rp_bucket` input of any shape, avoiding construction of
an O(N^2) tensor for long inputs of length N. The original full attention
`rp_bucket` can be retrieved with `full_att_rp_bucket()`.
T5 uses a form of relative attention which biases the attention matrix, so
each head effectively attends to things at different scales, irrespective of
the contents of keys and queries.
In the future, this class may be unified with classes which take into account
key and query contents, like the original relative position embeddings of Shaw
et al. and new proposals. However, this will rely on XLA to recover efficiency
for this class (especially when, as in the original T5, the same bias matrix
is shared for all layers).
Attributes:
num_buckets: Number of buckets to bucket distances between key and query
positions into.
max_distance: Maximum distance before everything is lumped into the last
distance bucket.
num_heads: Number of heads in the attention layer. Each head will get a
different relative position weighting.
dtype: Type of arrays through this module.
embedding_init: initializer for relative embedding table.
"""
num_buckets: int
max_distance: int
num_heads: int
dtype: Any
embedding_init: Callable[..., Array] = nn.linear.default_embed_init
@staticmethod
def relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translates relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative
positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been
trained on.
Args:
relative_position: an int32 array
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).astype(jnp.int32) * num_buckets
n = jnp.abs(n)
else:
n = jnp.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = (n < max_exact)
val_if_large = max_exact + (
jnp.log(n.astype(jnp.float32) / max_exact + jnp.finfo(jnp.float32).eps)
/ jnp.log(max_distance / max_exact) *
(num_buckets - max_exact)).astype(jnp.int32)
val_if_large = jnp.minimum(val_if_large, num_buckets - 1)
ret += jnp.where(is_small, n, val_if_large)
return ret
def full_att_rp_bucket(self, qlen, klen, bidirectional=True):
"""Gets relative position buckets for full attention.
Args:
qlen: attention query length.
klen: attention key length.
bidirectional: a boolean - whether the attention is bidirectional
Returns:
int32 (qlen, klen) shaped array containing values in the range
[0, num_buckets).
"""
# TODO: should we be computing this w. numpy as a program
# constant?
context_position = np.arange(qlen, dtype=jnp.int32)[:, None]
memory_position = np.arange(klen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self.relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance)
return rp_bucket
@nn.compact
def __call__(self, rp_bucket: Array):
"""Produces relative position embedding attention biases.
Args:
rp_bucket: int32 containing values in the range [0, num_buckets). In the
full attention case, this should have shape (qlen, klen).
Returns:
output: Attention bias array with shape `(1, num_heads) + rp_bucket.shape`
"""
relative_attention_bias = partitioning.param_with_axes(
'rel_embedding',
self.embedding_init, (self.num_heads, self.num_buckets),
jnp.float32,
axes=('heads', 'relpos_buckets'))
relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)
# Instead of using a slow gather, we create a leading-dimension one-hot
# array from rp_bucket and use it to perform the gather-equivalent via a
# contraction. For example, if `rp_bucket` has shape (qlen, klen), the
# contraction looks like:
# (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).
# This is equivalent to relative_attention_bias[:, rp_bucket]
bcast_iota_shape = [self.num_buckets] + [1] * rp_bucket.ndim
bcast_iota = lax.broadcasted_iota(jnp.int32, bcast_iota_shape, 0)
rp_bucket_one_hot = jnp.array(
rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)
# --> shape (num_heads, rp_bucket.shape)
values = lax.dot_general(
relative_attention_bias,
rp_bucket_one_hot,
(
((1,), (0,)), # lhs, rhs contracting dims
((), ()))) # no batched dims
# Add a singleton batch dimension.
# --> shape (1, num_heads, rp_bucket.shape)
return values[jnp.newaxis, ...]
| 6,812 | 39.313609 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/calm_t5/components.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extending components.attention.dense_attention to allow cache propagation."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import functools
from typing import Optional
from flax import linen as nn
from flax.linen import partitioning as flax_partitioning
from flax.training import common_utils
from jax import lax
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
class MultiHeadDotProductAttention(dense_attention.MultiHeadDotProductAttention
):
"""Extends Multi-head dot-product attention class to enable cache propagation.
Cache propagation is enabled by setting only_propagate_state in the __call__
function. Used for early exiting, passing the last hidden state to the skipped
layers.
"""
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
precomputed_qkv: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
only_propagate_state: bool = False) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode`.
During decoding mode, this method is called twice, by `init` and
`apply`. In the former, inputs_q: [batch..., length, qkv_features] and
inputs_kv: [batch..., length, qkv_features]
During apply, query, key and value all have the shape: [batch * beam, 1,
qkv_features] where the batch dimension is added to include multiple beams.
Note that the batch dimension is different during the init and apply calls.
This is because the cached variables are directly passed-in during `apply`
method. In other words, the cache variables such as `cached_key` are
initialized with `batch` dim, expanded by tiling in the beam search function
to `batch * beam` dimension, and passed to the `apply` method as part of a
variable dict.
Args:
inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`.
inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`.
mask: attention mask of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
bias: attention bias of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
precomputed_qkv: when using fused implementations QKVO are defined outside
this module and we only use the module to run computations.
decode: Whether to prepare and use an autoregressive cache.
enable_dropout: Enables dropout if set to True.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
only_propagate_state: Whether to stop after the projected key-values are
computed and stored in the auto-regressive cache. If true, the rest of
the computation is skipped, and nothing is returned.
Returns:
If output_projection is True, then output of shape
`[batch_sizes..., length, out_features]`, where out_features is set to
features if not provided. If output_projection is False, then output of
shape `[batch_sizes..., length, num_heads, head_dim]`.
"""
dense_attention.validate_dense_attention_call_parameter_shapes(
inputs_q, inputs_kv, mask, bias, self.num_heads)
qkv_kernel_init = (
self.qkv_kernel_init
if self.qkv_kernel_init is not None else self.kernel_init)
kv_kernel_init = (
self.kv_kernel_init
if self.kv_kernel_init is not None else self.kernel_init)
q_kernel_init = (
self.q_kernel_init
if self.q_kernel_init is not None else self.kernel_init)
if precomputed_qkv is not None:
raise ValueError('Support for precomputed QKVO not implemented.')
rotary_index = None
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
if self.kernels_to_fuse and not self.split_head_kernel:
raise ValueError('Un-reshaped kernels are required when using QKV fused '
'kernel optimization.')
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = q_kernel_init
else:
if self.kernels_to_fuse:
raise ValueError('Cannot fold in logit normalization to query '
'initializer when using fused kernels.')
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: q_kernel_init(*args) / depth_scaling
make_dense = functools.partial(
dense.DenseGeneral,
axis=-1,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
)
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch..., length, num_heads, features_per_head]
if self.kernels_to_fuse is None:
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs_q)
key = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='key')(
inputs_kv)
value = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='value')(
inputs_kv)
# TODO: should we fuse/slice along depth or head dim?
elif self.kernels_to_fuse == 'qkv':
if inputs_q is not inputs_kv:
raise ValueError('qkv fusion is only supported in self-attention mode '
'(when inputs_q is inputs_kv).')
# 'qkv' fusion mode implies self-attention
qkv = make_dense(
kernel_init=qkv_kernel_init,
features=(3, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='qkv_fused')(
inputs_q)
query = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 0, 1, -3), -3)
key = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 1, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 2, 1, -3), -3)
elif self.kernels_to_fuse == 'kv':
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query')(
inputs_q)
kv = make_dense(
kernel_init=kv_kernel_init,
features=(2, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='kv_fused')(
inputs_kv)
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 1, 1, -3), -3)
else:
raise ValueError('Incorrect kernel fusion mode specified.')
# Multi Dconv Head Attention options:
if self.q_conv is not None:
query = self.q_conv( # pylint: disable=not-callable
query,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.k_conv is not None:
key = self.k_conv( # pylint: disable=not-callable
key,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.v_conv is not None:
value = self.v_conv( # pylint: disable=not-callable
value,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.sharding_over_head_dimension:
# Note: We don't use `activation_partitioning.with_sharding_migration`
# here because we do often want this 2D sharded. However, if rules are
# valid, they should result in 2D sharding. We don't need to raise errors
# if both result in 2D sharding (which with_sharding_migration does).
if flax_partitioning.get_axis_rules():
query = flax_partitioning.with_sharding_constraint(
query, ('batch', 'length', 'heads', 'kv'))
key = flax_partitioning.with_sharding_constraint(
key, ('batch', 'length', 'heads', 'kv'))
value = flax_partitioning.with_sharding_constraint(
value, ('batch', 'length', 'heads', 'kv'))
else:
query = activation_partitioning.with_sharding(query, 2)
key = activation_partitioning.with_sharding(key, 2)
value = activation_partitioning.with_sharding(value, 2)
query: Array = query # hint to quiet pytype.
key: Array = key
value: Array = value
if prefill and decode:
raise ValueError('prefill and decode cannot both be true at the same'
'time. If you are using a prefix LM with bidirectional '
'attention on the inputs, please make a call with '
'prefill=True that includes an attention mask that '
'covers your inputs first and then make your decoding '
'calls.')
if prefill or decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension
# [batch..., length, num_heads, features_per_head], but we cache them as
# [batch..., num_heads, features_per_head, length] as a TPU fusion
# optimization. This also enable the "scatter via one-hot broadcast"
# trick, which means we do a one-hot broadcast instead of a scatter/gather
# operations, which gives a 3-4x speedup in practice.
swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3])
cached_key = self.variable('cache', 'cached_key', jnp.zeros,
swap_dims(key.shape), key.dtype)
cached_value = self.variable('cache', 'cached_value', jnp.zeros,
swap_dims(value.shape), value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
rotary_index = cache_index.value
if is_initialized:
# Here we are in "apply()".
*batch_dims, num_heads, features_per_head, length = (
cached_key.value.shape)
if prefill:
if prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(
mask[:, 0, 0, :], axis=-1).astype(cache_index.value.dtype)
(key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value) = self.update_cache_prefill(
key, value, cached_key, cached_value, cache_index,
prefill_lengths)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
elif decode:
# Check the shape of the cached key against the input query.
expected_shape = tuple(batch_dims) + (1, num_heads, features_per_head)
if expected_shape != query.shape:
raise ValueError('Autoregressive cache shape error, '
'expected query shape %s instead got %s.' %
(expected_shape, query.shape))
(key, value, cur_index, cached_key_value, cached_value_value,
cache_index_value) = self.update_cache_decode(
key, value, cached_key, cached_value, cache_index)
# Enforcing the Causal mask over previous positions and selecting only
# the bias value for the current index is only needed during decode
# mode where a single example is feed at a time. In prefill mode we
# uses these as provided, that same way it is done in a normal forward
# pass, like when computing logits during training.
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
#
# Add trailing dims to the current index so it can either
# broadcast over the batch dim or it can just be batch size.
mask = dense_attention.combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length),
tuple(batch_dims) +
(1, 1, length)) <= jnp.reshape(cur_index, (-1, 1, 1, 1)))
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
# If we are doing prefix decoding where cur index is a vector the
# result will be [batch, heads, 1, :]. If cur_index is a scalar
# like in encdec decoding, the result will be [1, heads, 1, :].
# We use a one-hot einsum rather than a slice to avoid introducing
# a Gather op that is currently lowered poorly by SPMD passes,
# adding expensive all-reduce and all-gather operations.
bias = jnp.einsum(
'bq, bhqk->bhk',
common_utils.onehot(cur_index, num_classes=length), bias)
bias = jnp.expand_dims(bias, 2)
# Currently, updating a variable inside of a method is not handled
# in flax, so we return the actual values and assign them in the main
# compacted call for now.
# TODO: Move variable assignment inside of the
# cache update functions once variable references are tracked across
# transform boundaries.
cache_index.value = cache_index_value
cached_key.value = cached_key_value
cached_value.value = cached_value_value
if only_propagate_state:
return # pytype: disable=bad-return-type # jax-ndarray
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = dense_attention.combine_biases(attention_bias, bias)
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
if self.use_rotary_embedding:
# use rotary embeddings before attention
# https://arxiv.org/abs/2104.09864
# TODO: Put it in a new class
dim = query.shape[-1]
max_length = max(query.shape[1], key.shape[1])
sin, cos = embedding.generate_fixed_pos_embedding(
dim, max_length, max_timescale=self.rotary_embedding_max_timescale)
query, key = embedding.apply_rotary_embedding(
query, key, cos, sin, decode=decode, rotary_index=rotary_index)
# Compute attention.
x = self.attention_fn(
query,
key,
value,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits,
) # pytype: disable=wrong-keyword-args
if not self.output_projection:
return x
# Back to the original inputs dimensions.
out = dense.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
kernel_axis_names=['heads', 'kv', 'embed'],
name='out')( # pytype: disable=wrong-arg-types
x)
return out
| 18,386 | 43.306024 | 80 | py |
flaxformer | flaxformer-main/flaxformer/architectures/calm_t5/calm_architecture.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides T5 architecture with CALM decoding-time early-exiting."""
from __future__ import annotations
import dataclasses
import inspect
from typing import Any, Callable, List, Optional, Tuple, Union
from flax import linen as nn
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer import transformer_common as common
from flaxformer.architectures.calm_t5 import components as calm_components
from flaxformer.architectures.common import param_remapping
from flaxformer.architectures.t5 import t5_architecture
from flaxformer.components import embedding
from flaxformer.components import transforms
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
from flaxformer.types import DType
# pylint: disable=not-callable
# pytype: disable=not-callable
@dataclasses.dataclass(frozen=True)
class TransparentLayerSequencePartial(common.TransparentLayerSequence):
"""Extending TransparentLayerSequence for CALM.
Supports passing only through a subsequence of all layers, and also allows
returning all intermediate activations (used for anytime prediction training).
Attributes:
layers: List of nn.Modules, which should be owned by a parent Flax module.
return_all_representations: Whether to return intermediate encodings.
"""
layers: List[nn.Module]
return_all_representations: bool = False
def __call__(self, inputs: Array, *args, **kwargs) -> Array:
"""Applies Transformer layers to the inputs sequentially.
Args:
inputs: The inputs to the first layer <float>[..., seq_len, hidden_size].
Typically these are the embedded token IDs, combined with embedded
position IDs (or sinusoidal position encodings) and segment IDs.
*args: Positional arguments to be passed to each layer.
**kwargs: Keyword arguments to be passed to each layer.
Returns:
The encoded inputs <float>[..., seq_len, hidden_size].
If return_all_representations is True, returns an Array
<float>[..., end_idx - start_idx, seq_len, hidden_size]
"""
start_idx = kwargs.pop('start_idx', 0)
end_idx = kwargs.pop('end_idx', None)
return self.apply_range_of_layers(start_idx, end_idx, inputs, *args,
**kwargs)
def apply_range_of_layers(self, start_idx: int, end_idx: Optional[int],
inputs: Array, *args, **kwargs) -> Array:
"""Passes the inputs to layers [start_idx, end_idx) and returns the output.
Args:
start_idx: The first layer to be applied to the inputs. Numeration starts
from layer zero.
end_idx: The last layer to be applied to the inputs. This layer is
excluded from the interval, i.e. outputs will be returned from layers in
the [start_idx, end_idx) interval. You can set this to None to apply all
layers starting from start_idx.
inputs: The inputs to the first layer. [batch_size..., length, features]
*args: Positional arguments to be passed to each layer.
**kwargs: Keyword arguments to be passed to each layer.
Returns:
The output of the last layer that was applied, if
self.return_all_representations is False. If True, returns the outputs
of all layers (including both intermediate layers and the last one).
"""
decode = kwargs.get('decode', False)
current_activations = inputs
all_activations = []
for layer in self.layers[start_idx:end_idx]:
current_activations = layer(current_activations, *args, **kwargs) # pytype: disable=not-callable
all_activations.append(current_activations)
if self.return_all_representations and not decode:
all_activations = jnp.array(all_activations)
return all_activations
else:
return current_activations
class DecoderLayer(t5_architecture.DecoderLayer):
"""Extends DecoderLayer to allow cache propagation."""
def __call__(self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
*,
logit_mask=None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
only_propagate_state: bool = False,
**kwargs):
"""Applies EncoderDecoder1DBlock module.
Args:
targets: Input data for decoder with shape [batch_size,
decoder_seq_length, decoder_hidden_size].
encoded: Input data from encoder with shape [batch_size,
encoder_seq_length, decoder_hidden_size]. If None, block is Decoder
only.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask with shape [
batch_size, 1, decoder_seq_length, encoder_seq_length].
logit_mask: a mask (e.g., padding logit mask) to be applied to the
attention logits.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
only_propagate_state: Will run the decoder layer only until the key-value
self-attention values are computed and stored in cache, and will exit
after.
**kwargs: Remaining keyword arguments. Passed to
_create_residuals_and_queries.
Returns:
Output after transformer encoder-decoder block.
"""
layer_input = targets
del targets
# Decoder block.
assert layer_input.ndim == 3
layer_input = activation_partitioning.with_sharding_migration(
layer_input,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if only_propagate_state:
# Calls the self attention in order to compute the key-values and
# (automatically) store them in cache. They are computed with a dense
# Transformation on over the inputs inside the self_attention module.
if not isinstance(self.self_attention,
calm_components.MultiHeadDotProductAttention):
raise TypeError(
'Self-attention should be the one implemented in '
'architectures/calm_t5/components.py to allow cache propagation. '
f'Got {type(self.self_attention)}.'
)
if prefill and prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(
decoder_mask[:, 0, 0, :], axis=-1).astype(jnp.int32)
if self.parallel:
x = self.layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Normally a no-op unless overridden by a subclass.
layer_input_residual, x_queries, logit_mask_queries = (
self._create_residuals_and_queries(layer_input, x, logit_mask,
**kwargs))
# Shared relative position embedding attention biases.
decoder_bias, encoder_decoder_bias = self.get_bias(
max_decode_length, decode, layer_input=x, encoded=encoded)
if only_propagate_state:
self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
only_propagate_state=True)
return layer_input
y = (
self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths) + self.mlp(
x,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout))
if encoded is not None:
y += self.encoder_decoder_attention(
x,
encoded,
encoder_decoder_mask,
encoder_decoder_bias,
enable_dropout=enable_dropout)
y *= (3 if encoded is not None else 2)**-0.5
z = layer_input_residual + self.dropout(
y, deterministic=not enable_dropout)
else:
# layer_input is derived from decoder_input_tokens.
x = self.pre_self_attention_layer_norm(
layer_input,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Normally a no-op unless overridden by a subclass.
layer_input_residual, x_queries, logit_mask_queries = (
self._create_residuals_and_queries(layer_input, x, logit_mask,
**kwargs))
if logit_mask is not None:
# When using QKV fusion, x and x_queries must be the exact same
# Python object, so reuse the object if possible.
if x is x_queries and logit_mask is logit_mask_queries:
x = logit_mask * x
x_queries = x
else:
x = logit_mask * x
x_queries = logit_mask_queries * x_queries
# Shared relative position embedding attention biases.
decoder_bias, encoder_decoder_bias = self.get_bias(
max_decode_length, decode, layer_input=x, encoded=encoded)
if only_propagate_state:
self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
only_propagate_state=True)
return layer_input
# The first and second arguments to the attention are the same,
# i.e., this is a self-attention layer.
x = self.self_attention(
x_queries,
x,
decoder_mask,
decoder_bias,
enable_dropout=enable_dropout,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
x = layer_input_residual + self.post_self_attention_dropout(
x, deterministic=not enable_dropout)
x = activation_partitioning.with_sharding_migration(
x,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# Encoder-Decoder block.
if encoded is None:
# If encoder outputs not provided, skip attending from decoder to
# encoder. This results in a decoder only block.
y = x
else:
if self.encoder_decoder_attention is None:
raise ValueError('Expected encoder_decoder_attention to be populated '
'when called with `encoded` inputs.')
y = self.pre_cross_attention_layer_norm(
x, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
y = logit_mask_queries * y
y = self.encoder_decoder_attention(
y,
encoded,
encoder_decoder_mask,
encoder_decoder_bias,
enable_dropout=enable_dropout)
y = x + self.post_cross_attention_dropout(
y, deterministic=not enable_dropout)
y = activation_partitioning.with_sharding_migration(
y,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
# MLP block.
z = self.pre_mlp_layer_norm(
y, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if logit_mask is not None:
z = logit_mask_queries * z
z = self.mlp(
z,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths,
enable_dropout=enable_dropout)
z = y + self.post_mlp_dropout(z, deterministic=not enable_dropout)
z = activation_partitioning.with_sharding_migration(
z,
self.activation_partitioning_dims,
logical_axis_names=('batch', 'length', 'embed'))
if self.sow_intermediates:
self.sow('intermediates', 'activations', z)
# scan expects functions to have a signature: fn(carry, in) --> carry, out
if self.scanned:
return z, None
else:
return z
class Decoder(nn.Module, param_remapping.ParameterRemappable):
"""A stack of decoder layers.
This module can be used with or without the encoder stack. To use without an
encoder, pass in encoded=None. This will bypass the encoder-decoder attention.
Attributes:
layer_factory: A callable that returns a DecoderLayer.
dropout_factory: A callable that returns the dropout to apply to the input
and before the final logits.
layer_norm_factory: A callable that returns a layer norm.
output_logits_factory: A callable that returns the output logits. If not
provided, then the token embedders are used.
num_layers: Number of layers to generate.
dtype: DType to cast the embedded inputs.
layer_remat: whether and how to apply jax.remat to each layer to perform
recomputation in the backward pass. Supported values are 'none', for no
use of jax.remat; 'minimal', for a policy that recomputes only non-matmul
operations (typically optimal); and 'full', for full recomputation of each
layer. The (legacy) default is to use 'none' when `scan_layers=False` and
and 'full' when `scan_layers=True`.
scan_layers: whether to scan over layers.
spmd_annotations: spmd annotations needed for scanned layers.
shared_relative_position_bias_factory: A callable that returns a relative
position bias instance which will be shared for all encoder layers. Only
set this if using shared relative position biases.
token_embedder_factory: A callable that returns a token embedder. Please
provide either this or `shared_token_embedder`.
shared_token_embedder: A callable that returns a token embedder shared
between both encoder and decoder.
position_embedder_factory: A callable that returns an absolute position
embedder. Only provide this if you want absolute position embeddings.
return_all_logits: If true, instead of returning only the logits of the last
layer. All logits of potential "exit" layers (determined by `first_exit`
and `exit_interval`) are returned. Adds a dimension to the returned Array.
first_exit: First layer to compute the logits from for `return_all_logits`.
exit_interval: Interval between layers to for `return_all_logits`.
from layer `first_exit`.
sow_intermediates: whether to track intermediates using Module.sow.
scan_axis: axis over which to do scan over layers.
capture_gradients: whether to track input gradients using a variable in the
`grads` collection. This captures the gradient of the (combined) embedded
inputs, i.e. the input to the first encoder layer.
"""
layer_factory: t5_architecture.MakeDecoderLayerFn
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
dtype: DType = jnp.float32
layer_remat: str = 'legacy'
scan_layers: bool = False
spmd_annotations: Any = None
shared_relative_position_bias_factory: Optional[Callable[[],
nn.Module]] = None
output_logits_factory: Optional[Callable[[], nn.Module]] = None
# Embedders: Either a token_embedder_factory factory or shared token embedder
# must be provided. The position embedder is optional and provided when
# absolute position embeddings are desired.
token_embedder_factory: Optional[Callable[[],
embedding.Embedder[Array]]] = None
shared_token_embedder: Optional[embedding.Embed] = None
position_embedder_factory: Optional[Callable[
[], embedding.Embedder[Array]]] = None
return_all_logits: bool = False
first_exit: int = 0 # Zero means the first contextual representations.
exit_interval: int = 1
meta_cls_factory: Optional[Callable[[], nn.Module]] = None
sow_intermediates: bool = False
scan_axis: int = 1
capture_gradients: bool = False
def setup(self):
# Set up the embedders.
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder_factory: Callable[[], embedding.Embed]
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.dropout_factory()
if self.scan_layers and self.shared_relative_position_bias_factory:
raise ValueError("Scanned layer mode doesn't support shared relative"
'position biases.')
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
self.decoder = self._setup_layer_sequence()
self.decoder_norm = self.layer_norm_factory()
self.output_dropout = self.dropout_factory()
self.setup_output_logits()
self.setup_meta_cls()
def _setup_layer_sequence(self):
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
lyrf = t5_architecture.maybe_remat(
lyrf,
self.layer_remat,
self.scan_layers,
static_argnums=(5, 6, 7, 8, 9))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
return TransparentLayerSequencePartial(
layers=self.layers, return_all_representations=self.return_all_logits)
else:
# TODO: add adaptive computation support with scan_layers.
raise ValueError('Early exiting is not supported with scan_layers.')
# return self._construct_scanned_decoder(lyrf, self.num_layers)
def _construct_scanned_decoder(
self,
lyrf: Callable[[], nn.Module],
num_layers: int,
num_broadcast_args: int = 10) -> Callable[..., Array]:
"""Constructs decoder from layer factory using scan."""
initializing = self.is_mutable_collection('params')
# We scan the parameters along scan_axis (default =1) as
# an XLA layout optimization.
params_spec = self.scan_axis if initializing else transforms.ScanIn(
self.scan_axis)
cache_spec = 0
intermediates_spec = 2 # Stacks intermediate layer outputs in dimension 2.
scan_annotation = (
self.spmd_annotations['decoder']
if self.spmd_annotations is not None else None)
lyrf = transforms.factory_scan(
lyrf,
in_axes=(nn.broadcast,) * num_broadcast_args,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': intermediates_spec,
},
split_rngs={
'params': True,
'dropout': True
},
length=num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation,
self.scan_axis),
axis_name='layers',
axes_collections=('params', 'cache'),
)
return lyrf()
@nn.nowrap
def setup_output_logits(self):
"""Sets up output logits; this method provides flexiblity for subclasses."""
# TODO: Re-merge with setup() once it's easier to Gin-configure
# shared modules, and directly pass submodules (instead of using factories).
if self.output_logits_factory:
self.output_logits_factory: Callable[[], nn.Module]
self.logits_dense = self.output_logits_factory()
else:
self.logits_dense = None
@nn.nowrap
def setup_meta_cls(self):
if self.meta_cls_factory:
self.meta_cls_factory: Callable[[], nn.Module]
self.meta_cls = self.meta_cls_factory()
else:
self.meta_cls = None
def embed_and_combine_inputs(
self,
decoder_input_tokens,
decoder_positions=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
):
"""Returns the combined embedded decoder inputs for further processing."""
assert decoder_input_tokens.ndim == 2 # (batch, len)
embedder_inputs = {'token_ids': decoder_input_tokens}
if 'position_ids' in self.embedder.embedders:
if decoder_positions is None:
seq_length = decoder_input_tokens.shape[-1]
decoder_positions = jnp.arange(seq_length)[None, :]
embedder_inputs['position_ids'] = decoder_positions
embedded_inputs = self.embedder(
segment_ids=segment_ids, decode=decode, **embedder_inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
embedded_inputs = embedded_inputs.astype(self.dtype)
return embedded_inputs
def compute_logits(
self, # pytype: disable=annotation-type-mismatch # jax-ndarray
decoder_outputs: Array,
logit_mask: Array = None,
enable_dropout: bool = True,
) -> Array:
# Post-process final decoder layer outputs.
decoder_outputs = self.decoder_norm(decoder_outputs)
decoder_outputs = self.output_dropout(
decoder_outputs, deterministic=not enable_dropout)
if logit_mask is not None:
decoder_outputs = logit_mask * decoder_outputs
if self.sow_intermediates:
self.sow('intermediates', 'pre_logits_layer', decoder_outputs)
# Decoded Logits
if self.logits_dense is not None:
logits = self.logits_dense(decoder_outputs)
else:
# Use the transpose of embedding matrix for logit transform.
logits = self.embedder.embedders['token_ids'].attend(decoder_outputs) # pytype: disable=attribute-error
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(decoder_outputs.shape[-1])
return logits
def decode_from_continuous_inputs(
self,
embedded_inputs,
encoder_outputs,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
logit_mask=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
return_prelogits: bool = False,
**kwargs) -> Union[Array, Tuple[Array, Array]]:
"""Applies the decoder on the continuous (embedded) inputs."""
# If encoded is not given, this block is decoder only and does not contain
# attention from decoder to encoder.
if encoder_outputs is not None:
assert encoder_outputs.ndim == 3 # (batch, len, depth)
# Apply the decoder layers, attending to the encoder outputs (if provided),
# and attending to previous decoder inputs (by masking future inputs).
decoder_outputs = self.decoder(
embedded_inputs,
encoder_outputs,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
**kwargs)
if self.scan_layers:
decoder_outputs = decoder_outputs[0]
if self.sow_intermediates:
self.sow('intermediates', 'pre_logits_layer', decoder_outputs)
if self.return_all_logits and not return_prelogits and not decode:
# Keep only part of the layers (first_exit, first_exit+exit_interval, ...)
keep_inds = jnp.arange(
self.first_exit, self.num_layers - 1, step=self.exit_interval)
# And always keep the last layer.
decoder_outputs = jnp.concatenate([
decoder_outputs.take(keep_inds, 0),
jnp.expand_dims(decoder_outputs[-1, ...], 0)
], 0)
all_logits = self.compute_logits(
decoder_outputs, jnp.resize(logit_mask, decoder_outputs.shape),
enable_dropout)
outputs = all_logits
elif return_prelogits:
outputs = decoder_outputs
else:
logits = self.compute_logits(decoder_outputs, logit_mask, enable_dropout)
if self.sow_intermediates:
self.sow('intermediates', 'logits', logits)
outputs = logits
if self.meta_cls is not None:
meta_preds = self.meta_cls(decoder_outputs)
outputs = (outputs, meta_preds)
return outputs
def __call__(self,
encoder_outputs,
decoder_input_tokens,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
decoder_embedded_input: Optional[Array] = None,
return_prelogits: bool = False,
**kwargs):
"""Applies Transformer model on the inputs.
Args:
encoder_outputs: The outputs from the encoder. If None, do not attend to
encoder outputs, resulting in a decoder only model (i.e. language
model).
decoder_input_tokens: The decoder input token IDs.
decoder_positions: Decoder subsequence positions for packed examples.
decoder_mask: Decoder self-attention mask.
encoder_decoder_mask: The attention mask for the encoder outputs.
segment_ids: Input segmentation info for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
decoder_embedded_input: If given, it is passed directly to the decoder as
the embedded_inputs instead of embedding the `decoder_input_tokens`. Can
be useful for calling the decoder multiple times for the same token with
different intervals of layers, passing the hidden-state between calls.
return_prelogits: Returns the decoder output directly, before the logits
computation.
**kwargs: Optional keyword arguments to pass to
decode_from_continuous_inputs.
Returns:
The decoder output logits for next token prediction.
"""
if decoder_embedded_input is None:
embedded_inputs = self.embed_and_combine_inputs(
decoder_input_tokens,
decoder_positions=decoder_positions,
segment_ids=segment_ids,
enable_dropout=enable_dropout,
decode=decode,
)
else:
embedded_inputs = decoder_embedded_input
logit_mask = dense_attention.get_decoder_logit_mask(decoder_input_tokens,
embedded_inputs.dtype)
logits = self.decode_from_continuous_inputs(
embedded_inputs,
encoder_outputs,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
return_prelogits=return_prelogits,
**kwargs)
return logits
class EncoderDecoder(nn.Module, param_remapping.ParameterRemappable):
"""Transformer Model for sequence to sequence translation.
Attributes:
encoder_factory: A callable that returns the lower-level Encoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `encoder_factory`.
decoder_factory: A callable that returns the lower-level Decoder object. If
shared_token_embedder_factory is non-None, then the result of it will be
passed as the `shared_token_embedder` argument to `decoder_factory`.
dtype: DType for encoder/decoder to cast embedded inputs, and for attention
mask generation.
scan_layers: whether to scan over layers.
shared_token_embedder_factory: A callable that returns an embedder that can
be shared between the encoder and decoder.
"""
# Core components: encoder and decoder embedders and layers.
encoder_factory: t5_architecture.MakeEncoderFn
decoder_factory: t5_architecture.MakeDecoderFn
# Configures behavior when the model is called. Many of these might eventually
# be better as call parameters.
dtype: DType = jnp.float32
scan_layers: bool = False # only used to pass this option to predict_fn.
spmd_annotations: Any = None # only used for scanned spmd layers
shared_token_embedder_factory: Optional[Callable[[], embedding.Embed]] = None
def setup(self):
self.token_embedder = (
self.shared_token_embedder_factory()
if self.shared_token_embedder_factory else None)
# TODO: Clean up SPMD annotation code.
if self.spmd_annotations is None:
encoder_annotations = None
decoder_annotations = None
else:
encoder_annotations = self.spmd_annotations['encoder']
decoder_annotations = self.spmd_annotations['decoder']
encoder_factory_params = tuple(
inspect.signature(self.encoder_factory).parameters.keys())
if 'spmd_annotations' in encoder_factory_params:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=encoder_annotations)
else:
self.encoder = self.encoder_factory(
shared_token_embedder=self.token_embedder)
decoder_factory_params = tuple(
inspect.signature(self.decoder_factory).parameters.keys())
if 'spmd_annotations' in decoder_factory_params:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder,
spmd_annotations=decoder_annotations)
else:
self.decoder = self.decoder_factory(
shared_token_embedder=self.token_embedder)
def compute_logits(
self, # pytype: disable=annotation-type-mismatch # jax-ndarray
decoder_outputs: Array,
logit_mask: Array = None,
enable_dropout: bool = True,
) -> Array:
return self.decoder.compute_logits(
decoder_outputs=decoder_outputs,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
def encode(self,
encoder_input_tokens,
encoder_segment_ids=None,
encoder_positions=None,
*,
enable_dropout: bool = True) -> Array:
"""Applies Transformer encoder-branch on the inputs.
Args:
encoder_input_tokens: input data to the encoder.
encoder_segment_ids: encoder input segmentation info for packed examples.
encoder_positions: encoder input subsequence positions for packed
examples.
enable_dropout: Enables dropout if set to True.
Returns:
Encoded feature array from the transformer encoder.
"""
# Make padding attention mask.
encoder_mask = dense_attention.make_attention_mask(
encoder_input_tokens > 0, encoder_input_tokens > 0, dtype=self.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if encoder_segment_ids is not None:
encoder_mask = dense_attention.combine_masks(
encoder_mask,
dense_attention.make_attention_mask(
encoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=self.dtype))
return self.encoder( # pytype: disable=attribute-error
encoder_input_tokens,
inputs_positions=encoder_positions,
encoder_mask=encoder_mask,
segment_ids=encoder_segment_ids,
enable_dropout=enable_dropout)
def decode(
self,
encoded,
encoder_input_tokens, # only needed for masks
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False,
# Args below were ported from decoder only code.
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
return_prelogits: bool = False,
**kwargs):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
encoder_input_tokens: input to the encoder (only needed for masking).
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
return_prelogits: Returns the decoder output directly, without the logit
computation.
**kwargs: additional keyword arguments to pass to the decoder layers.
Returns:
Logits array from transformer decoder. If return_prelogits is True,
returns the decoder state without computing the logits.
"""
# Make padding attention masks.
if decode:
# Do not mask decoder attention based on targets padding at
# decoding/inference time.
decoder_mask = None
encoder_decoder_mask = dense_attention.make_attention_mask(
jnp.ones_like(decoder_target_tokens),
encoder_input_tokens > 0,
dtype=self.dtype)
else:
decoder_mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=self.dtype,
decoder_segment_ids=decoder_segment_ids)
encoder_decoder_mask = dense_attention.make_attention_mask(
decoder_target_tokens > 0, encoder_input_tokens > 0, dtype=self.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if encoder_segment_ids is not None:
if decode:
raise ValueError(
'During decoding, packing should not be used but '
'`encoder_segment_ids` was passed to `Transformer.decode`.')
encoder_decoder_mask = dense_attention.combine_masks(
encoder_decoder_mask,
dense_attention.make_attention_mask(
decoder_segment_ids,
encoder_segment_ids,
jnp.equal,
dtype=self.dtype))
# When computing the logits, we don't need decoder_target_tokens, which is
# needed for computing the loss.
return self.decoder(
encoded,
decoder_input_tokens=decoder_input_tokens,
decoder_positions=decoder_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
segment_ids=decoder_segment_ids,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths,
return_prelogits=return_prelogits,
**kwargs)
@property
def encoder_embedder(self) -> embedding.MultiEmbed:
return self.encoder.embedder
@property
def decoder_embedder(self) -> embedding.MultiEmbed:
return self.decoder.embedder
def __call__(self,
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
only_propagate_state: Optional[bool] = False):
"""Applies Transformer model on the inputs.
This method requires both decoder_target_tokens and decoder_input_tokens,
which is a shifted version of the former. For a packed dataset, it usually
has additional processing applied. For example, the first element of each
sequence has id 0 instead of the shifted EOS id from the previous sequence.
Args:
encoder_input_tokens: input data to the encoder.
decoder_input_tokens: input token to the decoder.
decoder_target_tokens: target token to the decoder.
encoder_segment_ids: encoder segmentation info for packed examples.
decoder_segment_ids: decoder segmentation info for packed examples.
encoder_positions: encoder subsequence positions for packed examples.
decoder_positions: decoder subsequence positions for packed examples.
enable_dropout: Enables dropout if set to True.
decode: Whether to prepare and use an autoregressive cache.
max_decode_length: An optional integer specifying the maximum decoding
length. Note that this is only used for defining the relative position
embedding parameters.
only_propagate_state: Specifies if only the state should be propagated
from the last executed layer.
Returns:
Logits array from full transformer.
"""
encoded = self.encode(
encoder_input_tokens,
encoder_segment_ids=encoder_segment_ids,
encoder_positions=encoder_positions,
enable_dropout=enable_dropout)
return self.decode(
encoded,
encoder_input_tokens, # Only used for masks.
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=encoder_segment_ids,
decoder_segment_ids=decoder_segment_ids,
decoder_positions=decoder_positions,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
only_propagate_state=only_propagate_state)
| 41,532 | 39.28419 | 110 | py |
flaxformer | flaxformer-main/flaxformer/components/embedding.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library with embedding classes and functions."""
import abc
import collections
import dataclasses
import enum
import functools
import math
from typing import (Any, Callable, DefaultDict, Dict, Generic, List, Mapping,
Optional, Sequence, TypeVar, Union)
import chex
from flax import linen as nn
from flax.linen import partitioning
import jax
from jax import lax
from jax import numpy as jnp
import numpy as np
from flaxformer.components import initializers
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
# Note: We don't use this in real models, but keep the default initializers the
# same as in Flax.
default_embed_init = nn.initializers.variance_scaling(
1.0, 'fan_in', 'normal', out_axis=0)
@enum.unique
class EmbedCombineMethod(enum.IntEnum):
# We use IntEnum here so that this class is serializable. Enum isn't.
SUM = 1
CONCAT = 2
_Inputs = TypeVar('_Inputs')
class Embedder(Generic[_Inputs], metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self,
inputs: _Inputs,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True) -> Array:
"""Returns embeddings of the inputs.
The generic type parameter `_Inputs` allows this interface to be used for
embedding any type. For example, the base-level `Embed` class defined below
inherits from `Embedder[Array]` since it is an embedder of `Array`s. At the
other end of the spectrum, one could define a custom dataclass with that
holds a combination of text, audio, and image inputs and interit from
`Embedder[MyDataclass]`.
Args:
inputs: The inputs to embed.
segment_ids: Input segmentation info for packed examples.
decode: True if running in single-position autoregressive decode mode.
enable_dropout: Enables dropout if set to True.
Returns:
The embedded inputs.
"""
class InspectableMultiEmbedder(Generic[_Inputs], Embedder[_Inputs]):
"""Interface for embedders that provide hooks for interpretability tools."""
def __call__(self,
inputs: _Inputs,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True) -> Array:
"""Embeds inputs using get_individual_embeddings and combine_embeddings."""
# Embed the inputs and pass results directly into `combine_embeddings`.
return self.combine_embeddings(
self.get_individual_embeddings(
inputs,
segment_ids=segment_ids,
decode=decode,
enable_dropout=enable_dropout))
@abc.abstractmethod
def get_individual_embeddings(self,
inputs: Array,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True) -> Sequence[Array]:
"""Embeds the contents of each input array and returns the results."""
@abc.abstractmethod
def combine_embeddings(self, embeddings: Sequence[Array]) -> Array:
"""Combines the separate embeddings into a single array."""
class DictEmbedder(nn.Module, InspectableMultiEmbedder[Mapping[str, Any]]):
"""Embeds any number of inputs and combines them for further processing.
Attributes:
embedders: A dictionary with the name of the embedders as keys, and their
embedding modules as values (usually Embed for input tokens, but can be
any module). To embed inputs with these embedders, the dict used to call
this class need to match the names of the embedders in this dictionary. If
the resulting embeddings are to be summed, the `embedding_size` attributes
of all embedders need to match, but that is not a requirement in case they
are to be concatenated.
embeddings_combiner: A function that determines how the results of the
individual embedders should be combined.
"""
embedders: Mapping[str, Embedder[Any]]
embeddings_combiner: Callable[[Sequence[Array]], Array] = (
sum # pytype: disable=annotation-type-mismatch # jax-ndarray
)
def get_individual_embeddings(
self, # pytype: disable=signature-mismatch # jax-ndarray
inputs: Mapping[str, Any],
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
) -> Sequence[Array]:
"""Embeds each keyword argument with its corresponding embedder.
Args:
inputs: The inputs to be embedded. All keys in `inputs` must be present in
`self.embedders`. The shape of each input tensor should be <int>[...,
seq_len]. When using a first batch dimension, the batch dimensions also
need to match, or be 1 (to broadcast).
segment_ids: Input segmentation info for packed examples.
decode: Decoding parameter to pass through to all embedders.
enable_dropout: Enables dropout if set to True.
Returns:
A list of individual embeddings, in the iteration order `inputs`. A tensor
for an embedder with name `k` is shaped <float32>[..., embedding_size_k].
If the embeddings are to be summed by `combine_embeddings`, then their
embedding sizes should match.
"""
if inputs.keys() != self.embedders.keys():
raise ValueError(f'Expected input keys {self.embedders.keys()}, '
f'but got {inputs.keys()}')
embeddings = []
for k, v in inputs.items():
embeddings.append(self.embedders[k](
v,
segment_ids=segment_ids,
decode=decode,
enable_dropout=enable_dropout))
return embeddings
def combine_embeddings(self, embeddings: Sequence[Array]) -> Array:
"""Combines the dictionary of embeddings using the combine method."""
return self.embeddings_combiner(embeddings)
class EmbedderWithDecode(Generic[_Inputs], Embedder[_Inputs]):
"""Denotes embedder classes that support the `decode` parameter."""
pass
class EmbedderWithDeterministic(Generic[_Inputs], Embedder[_Inputs]):
"""Denotes embedder classes that support the `deterministic` parameter."""
pass
class Embed(nn.Module, Embedder[Array]):
"""An embedder for `Array`s.
A parameterized function from integers [0, n) to d-dimensional vectors.
Attributes:
num_embeddings: number of embeddings.
features: number of feature dimensions for each embedding.
dtype: the dtype of the embedding vectors (default: float32).
embedding_init: embedding initializer.
one_hot: performs the gather with a one-hot contraction rather than a true
gather. This is currently needed for SPMD partitioning.
axes: default axis metadata names for the embedding table.
input_axis_names: default axis metadata names for the input activations.
"""
num_embeddings: int
features: int
cast_input_dtype: Optional[DType] = None
dtype: DType = jnp.float32
attend_dtype: Optional[DType] = None
embedding_init: Initializer = default_embed_init # pytype: disable=annotation-type-mismatch # jax-types
one_hot: bool = False
axes: Sequence[str] = ('vocab', 'embed')
input_axis_names: Sequence[str] = ('batch', 'length')
embedding: Array = dataclasses.field(init=False)
def setup(self):
self.embedding = partitioning.param_with_axes(
'embedding',
self.embedding_init,
(self.num_embeddings, self.features),
jnp.float32,
axes=tuple(self.axes),
)
def __call__(
self,
inputs: Array,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
input_axis_names: Optional[Sequence[str]] = None,
) -> Array:
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, all dimensions are considered batch dimensions.
segment_ids: Input segmentation info for packed examples.
decode: True if running in single-position autoregressive decode mode.
enable_dropout: Enables dropout if set to True.
input_axis_names: Names of axes of input array. Used for logical
activation sharding annotation. If None, then no output sharding
annotation will be generated.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional `features` dimension appended.
"""
del segment_ids # Unused.
if input_axis_names is None:
input_axis_names = self.input_axis_names
if self.cast_input_dtype:
inputs = inputs.astype(self.cast_input_dtype)
if not jnp.issubdtype(inputs.dtype, jnp.integer):
raise ValueError('Input type must be an integer or unsigned integer.')
if self.one_hot:
iota = lax.iota(jnp.int32, self.num_embeddings)
one_hot = jnp.array(inputs[..., jnp.newaxis] == iota, dtype=self.dtype)
if input_axis_names is not None and self.axes:
one_hot = partitioning.with_sharding_constraint(
one_hot,
tuple(input_axis_names) + (self.axes[0],))
output = jnp.dot(one_hot, jnp.asarray(self.embedding, self.dtype))
else:
output = jnp.asarray(self.embedding, self.dtype)[inputs]
if input_axis_names is not None and self.axes:
output = partitioning.with_sharding_constraint(
output,
tuple(input_axis_names) + (self.axes[1],))
return output
def attend(self, query: Array) -> Array:
"""Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An array with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
dtype = self.attend_dtype if self.attend_dtype is not None else self.dtype
return jnp.dot(query, jnp.asarray(self.embedding, dtype).T)
# DEPRECATED.
# TODO: Delete this in favor of the type-safe `DictEmbedder`.
class MultiEmbed(nn.Module):
"""Embeds any number of inputs and combines them for further processing.
Attributes:
embedders: A dictionary with the name of the embedders as keys, and their
embedding modules as values (usually Embed for input tokens, but can be
any module). To embed inputs with these embedders, the keyword arguments
provided to the __call__-method of this class need to match the names of
the embedders in this dictionary. If the resulting embeddings are to be
summed, the `embedding_size` attributes of all embedders need to match,
but that is not a requirement in case they are to be concatenated.
sow_intermediates: whether to track intermediates using Module.sow.
capture_gradients: whether to track input gradients using a variable in the
`grads` collection. This captures the gradient of the (combined) embedded
inputs, i.e. the output of this module which is usually the input to the
first encoder layer.
"""
embedders: Dict[str, Union[Embedder[Array], Callable[[Array], Array]]]
sow_intermediates: bool = False
capture_gradients: bool = False
def get_individual_embeddings(
self,
decode: bool = False,
deterministic: bool = False,
segment_ids: Optional[Array] = None,
**input_kwargs: Mapping[str, Array]) -> Dict[str, Array]:
"""Embeds each keyword argument with its corresponding embedder.
The names of the keyword arguments need to match. To embed the input keyword
argument 'word_embed', self.embedders['word_embed'] needs to exist.
Args:
decode: Decoding parameter to pass through to all embedders.
deterministic: Deterministic parameter to pass through to all embedders.
segment_ids: Input segmentation info for packed examples.
**input_kwargs: The input tensors to be embedded, with a name that matches
the embedder in self.embedders. The shape of each input tensor should be
<int64>[..., seq_len]. When using a first batch dimension, the batch
dimensions also need to match, or be 1 (to broadcast).
Returns:
A dictionary mapping the input keys to their embedded inputs. A tensor for
an embedder with name `k` is shaped <float32>[..., embedding_size_k].
If the embeddings are to be summed by `combine_embeddings`, then their
embedding sizes should match.
"""
if 'segment_ids' in self.embedders:
if segment_ids is not None:
input_kwargs = dict(**input_kwargs, segment_ids=segment_ids)
embeddings = {}
for k, v in input_kwargs.items():
embedder: Callable[..., Array] = self.embedders[k]
passthru_kwargs = {}
if isinstance(embedder, EmbedderWithDecode):
passthru_kwargs['decode'] = decode
if isinstance(embedder, EmbedderWithDeterministic):
passthru_kwargs['deterministic'] = deterministic
if isinstance(embedder, Embedder):
passthru_kwargs['segment_ids'] = segment_ids
embeddings[k] = embedder(v, **passthru_kwargs)
return embeddings
def combine_embeddings(
self,
embeddings: Dict[str, Array],
combine_method: EmbedCombineMethod = EmbedCombineMethod.SUM) -> Array:
"""Combines the dictionary of embeddings using the combine method.
Args:
embeddings: A dictionary containing the embeddings to be combined, with
the names of the embeddings as keys, and embedding tensors as values.
Each embedding `k` is shaped <float32>[..., seq_len, embedding_size_k].
Embedding sizes need to match if they are to be summed.
combine_method: The method used for combination: sum or concat.
Returns:
A tensor with the combined embeddings <float32>[..., embedding_size] in
case of summing, and <float32>[..., size_1 + size_2 + ..] in case of
concatenation.
Raises:
ValueError: If the given combine_method is unknown.
"""
if combine_method == EmbedCombineMethod.SUM:
return jax.tree_util.tree_reduce(
lambda total, embedding: total + embedding, embeddings)
elif combine_method == EmbedCombineMethod.CONCAT:
return jnp.concatenate(jax.tree_leaves(embeddings), axis=-1)
else:
raise ValueError((
f'Invalid combine_method {combine_method} given to combine_embeddings'
'. Allowed values: sum, concat.'))
@nn.compact
def __call__(self,
combine_method: EmbedCombineMethod = EmbedCombineMethod.SUM,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
deterministic: bool = False,
**input_kwargs: Mapping[str, Array]) -> Array:
"""Embeds each input with its corresponding embedder and combines them.
Args:
combine_method: The method used for combination: sum or concat.
segment_ids: Input segmentation info for packed examples.
decode: Parameter to pass through to all embedders.
deterministic: Parameter to pass through to all embedders.
**input_kwargs: The input tensors to be embedded, with a name that matches
the embedder in self.embedders, and each shaped: <int64>[..., seq_len].
Returns:
A tensor with the combined embeddings <float32>[..., embedding_size] in
case of summing, and
<float32>[..., embedding_size_1 + embedding_size_2 + ..] in case of
concatenation.
"""
y = self.combine_embeddings(
self.get_individual_embeddings(
segment_ids=segment_ids,
decode=decode,
deterministic=deterministic,
**input_kwargs),
combine_method=combine_method)
# We sow the embedded (continuous) inputs and grads for feature attribution.
if self.sow_intermediates:
self.sow('intermediates', 'output', y)
if not self.sow_intermediates and self.capture_gradients:
raise ValueError('Must sow intermediates when capture_gradients is True.')
# Capture the gradients by adding a zeros variable that will catch grads.
# We only do this when `grads` is mutable because that prevents the grads
# variable from being added for `Model.predict`.
if (self.sow_intermediates and self.capture_gradients and
self.scope.is_mutable_collection('grads')):
eps = partitioning.variable_with_axes(
'grads',
'output_grad',
lambda: jnp.zeros_like(y),
axes=('batch', 'length', 'embed'))
y = y + eps.value
return y
class FixedEmbed(nn.Module, EmbedderWithDecode[Array]):
"""Fixed (not learnable) embeddings specified by the initializer function.
Note: This embedding is not currently compatible with using prefixes when
decoding because it assumes that the decoding loop starts at position 0.
Attributes:
init_fn: The initializer function that defines the embeddings.
max_length: The maximum supported length.
dtype: The DType to use for the embeddings.
"""
features: int
max_length: int = 2048
embedding_init: Initializer = initializers.sinusoidal()
dtype: jnp.dtype = jnp.float32
def setup(self):
# The key is set to None because sinusoid init is deterministic.
shape = (self.max_length, self.features)
self.embedding = self.embedding_init(None, shape, self.dtype) # pytype: disable=wrong-arg-types # jax-ndarray
@nn.compact
def __call__(self,
inputs,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True):
"""Returns the fixed position embeddings specified by the initializer.
Args:
inputs: <int>[batch_size, seq_len] input position indices.
segment_ids: Input segmentation info for packed examples.
decode: True if running in single-position autoregressive decode mode.
enable_dropout: Enables dropout if set to True.
Returns:
The fixed position embeddings <float32>[batch_size, seq_len, features].
"""
del segment_ids # Unused.
# We use a cache position index for tracking decoding position.
# TODO: Keep track of this index in the decoder instead.
if decode:
position_embedder_index = self.variable(
'cache', 'position_embedder_index',
lambda: jnp.array(-1, dtype=jnp.uint32))
i = position_embedder_index.value
position_embedder_index.value = i + 1
return jax.lax.dynamic_slice(self.embedding, jnp.array((i, 0)),
np.array((1, self.features)))
return jnp.take(self.embedding, inputs, axis=0)
class PositionEmbed(nn.Module, EmbedderWithDecode[Array]):
"""Learned absolute positional embeddings for the inputs.
Note: This embedding is not currently compatible with using prefixes when
decoding because it assumes that the decoding loop starts at position 0.
Attributes:
num_embeddings: The maximum supported length. We learn this many positions.
features: The number of features (size) for each position embedding.
dtype: The DType to use for the position embeddings.
embedding_init: Initialize the position embeddings with this function.
"""
num_embeddings: int
features: int
dtype: DType = jnp.float32
embedding_init: Initializer = default_embed_init # pytype: disable=annotation-type-mismatch # jax-types
def setup(self):
shape = (self.num_embeddings, self.features)
self.pos_embedding = partitioning.param_with_axes(
'pos_embedding',
self.embedding_init,
shape,
jnp.float32,
axes=('abspos_buckets', 'embed'))
@nn.compact
def __call__(self,
inputs,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True):
"""Applies PositionEmbed module.
Args:
inputs: <int>[batch_size, seq_len] input position indices.
segment_ids: Input segmentation info for packed examples.
decode: True if running in single-position autoregressive decode mode.
enable_dropout: Enables dropout if set to True.
Returns:
The position embeddings <float32>[batch_size, seq_len, features].
"""
del segment_ids # Unused.
# We use a cache position index for tracking decoding position.
# TODO: Keep track of this index in the decoder instead.
if decode:
position_embedder_index = self.variable(
'cache', 'position_embedder_index',
lambda: jnp.array(-1, dtype=jnp.uint32))
i = position_embedder_index.value
position_embedder_index.value = i + 1
return jax.lax.dynamic_slice(self.pos_embedding, jnp.array((i, 0)),
np.array((1, self.features)))
return jnp.take(self.pos_embedding, inputs, axis=0)
def rotate_half(x):
"""Helper that splits a tensor at last dim into half and rotate it."""
x1, x2 = jnp.split(x, 2, axis=-1)
x = jnp.concatenate([-x2, x1], axis=-1)
return x
@functools.partial(jax.jit, static_argnums=(4,))
def apply_rotary_embedding(q, k, cos, sin, decode=False, rotary_index=None):
"""Helper function to apply Rotary Embeddings."""
if len(k.shape) == 3:
# for multi query attention
k = jnp.expand_dims(k, 2)
multiquery = True
else:
multiquery = False
batch, qlen, qheads, d = q.shape
kbatch, klen, kheads, kd = k.shape
assert batch == kbatch, f'{batch} != {kbatch}'
assert d == kd, f'{d} != {kd}'
# cos: [len, d]
# sin: [len, d]
# rotary_index: [batch]
if decode and qlen == 1 and rotary_index is not None:
# we check qlen == 1 so that we don't do this when initializing cache.
qcos = cos[rotary_index, :]
qsin = sin[rotary_index, :]
# qcos, qsin: [batch, d]
qcos = jax.lax.broadcast_in_dim(qcos, (batch, qlen, qheads, d), (0, 3))
qsin = jax.lax.broadcast_in_dim(qsin, (batch, qlen, qheads, d), (0, 3))
# qcos, qsin: [batch, qlen, qheads, d]
else:
qcos, qsin = cos[:qlen, :], sin[:qlen, :]
# qcos, qsin: [qlen, d]
qcos = jax.lax.broadcast_in_dim(qcos, (batch, qlen, qheads, d), (1, 3))
qsin = jax.lax.broadcast_in_dim(qsin, (batch, qlen, qheads, d), (1, 3))
# qcos, qsin: [batch, qlen, qheads, d]
kcos, ksin = cos[:klen, :], sin[:klen, :]
# kcos, ksin: [klen, d]
kcos = jax.lax.broadcast_in_dim(kcos, (batch, klen, kheads, d), (1, 3))
ksin = jax.lax.broadcast_in_dim(ksin, (batch, klen, kheads, d), (1, 3))
# kcos, ksin: [batch, klen, kheads, d]
out_q = (q * qcos) + (rotate_half(q) * qsin)
out_k = (k * kcos) + (rotate_half(k) * ksin)
if multiquery:
out_k = jnp.squeeze(out_k, 2)
return out_q, out_k
def generate_fixed_pos_embedding(features,
length,
min_timescale=1.0,
max_timescale=10000.0):
"""Generate Sin/Cos for Rotary Embeddings.
Generates sinusoids at (features//2) different timescales, where the
timescales form a gemetric series from min_timescale to max_timescale
(max_timescale is not included, but would be the next element in the series).
Sinusoids are evaluated at integer positions i in [0, length).
The outputs are computed as:
output_sin[i, j] = sin(i / timescale[j])
output_cos[i, j] = cos(i / timescale[j])
Finally, the outputs are tiled twice in the features dimension.
Args:
features: an integer
length: an integer
min_timescale: an optional float
max_timescale: an optional float
Returns:
output_sin: a float32 Tensor with shape [length, features]
output_cos: a float32 Tensor with shape [length, features]
"""
fraction = jnp.arange(0, features, 2, dtype=jnp.float32) / features
timescale = min_timescale * (max_timescale / min_timescale)**fraction
rotational_frequency = 1. / timescale
# Must use high precision einsum here, since rounding off to a bfloat16 is
# catastrophic. bfloat16 rounds 257 to 256, but sin(257) is very different
# from sin(256).
sinusoid_inp = jnp.einsum(
'i , j -> i j',
jnp.arange(length),
rotational_frequency,
precision=jax.lax.Precision.HIGHEST)
sinusoid_inp = jnp.concatenate([sinusoid_inp, sinusoid_inp], axis=-1)
return jnp.sin(sinusoid_inp), jnp.cos(sinusoid_inp)
def _generate_primes(num_primes: int) -> Sequence[int]:
result = []
i = 2
while len(result) < num_primes:
if _is_prime(i):
result.append(i)
i += 1
return result
def _is_prime(n: int) -> int:
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0: # A number >=2 evenly divides `n`, so it is not prime.
return False
return True
class HashEmbed(nn.Module, Embedder[Array]):
"""Embeds integer identifiers using multiple hashing.
Each input identifier's embedding vector is the concatenation of multiple
shards, with each shard coming from a separate embedding table. To reduce the
effect of hash collisions, the identifier is retrieved from each different
embedding table using a different hash function.
Attributes:
features: Dimensionality of final embedding.
num_embeddings_per_table: Size ("vocabulary") of each embedding table.
num_tables: Number of embedding tables (a.k.a. hash functions / shards).
cast_input_dtype: DType to cast input to.
dtype: DType of resulting embeddings.
embedding_init: Initializer for embeddings.
one_hot: Performs the gather with a one-hot contraction rather than a true
gather. This is currently needed for SPMD partitioning.
"""
features: int
num_embeddings_per_table: int
num_tables: int = 8
cast_input_dtype: Optional[DType] = None
dtype: DType = jnp.float32
embedding_init: Initializer = default_embed_init # pytype: disable=annotation-type-mismatch # jax-types
one_hot: bool = False
_tables: Sequence[Embed] = dataclasses.field(init=False)
_primes: Sequence[int] = dataclasses.field(init=False)
def setup(self):
if self.features % self.num_tables != 0:
raise ValueError(f'Expected `features` ({self.features}) % '
f'`num_tables` ({self.num_tables}) == 0')
if self.num_tables <= 8:
# For compatibility with the public Canine checkpoints.
self._primes = [31, 43, 59, 61, 73, 97, 103, 113][:self.num_tables]
else:
self._primes = _generate_primes(self.num_tables)
shard_embedding_size = self.features // self.num_tables
tables = []
for i in range(self.num_tables):
tables.append(
Embed(
name=f'hash_embedder_table_{i}',
num_embeddings=self.num_embeddings_per_table,
features=shard_embedding_size,
dtype=self.dtype,
embedding_init=self.embedding_init,
one_hot=self.one_hot))
self._tables = tables
def __call__(self,
input_ids: Array,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True) -> Array:
"""Converts IDs into embeddings via multiple hashing.
Args:
input_ids: The IDs to be hashed. <int>[..., seq_length]
segment_ids: Input segmentation info for packed examples.
decode: True if running in single-position autoregressive decode mode.
enable_dropout: Enables dropout if set to True.
Returns:
The emeddings (concatenated across hash shards).
<float>[..., seq_length, features]
"""
if self.cast_input_dtype:
input_ids = input_ids.astype(self.cast_input_dtype)
embedding_shards = []
for table, prime in zip(self._tables, self._primes):
# `hash_bucket_ids`: <int>[batch, seq]
hash_bucket_ids = (((input_ids + 1) * prime) %
self.num_embeddings_per_table)
# `shard_embeddings`: <float>[batch, seq, features/num_tables]
shard_embeddings: Array = table(
hash_bucket_ids,
segment_ids=segment_ids,
decode=decode,
enable_dropout=enable_dropout)
embedding_shards.append(shard_embeddings)
# RESULT: <float>[batch, seq, features]
return jnp.concatenate(embedding_shards, axis=-1)
class NgramHashEmbed(nn.Module, Embedder[Array]):
"""Produces embeddings for ngrams of identifiers.
This is similar to `HashEmbed`, but instead of embedding just the input IDs,
it embeds ngrams of those IDs.
Attributes:
ngram_orders: The sizes of ngrams to embed.
padding_id: The ID to be used for padding the ends of the inputs.
features: Dimensionality of final embedding.
num_embeddings_per_table: Size ("vocabulary") of each embedding table.
num_tables: Number of embedding tables (a.k.a. hash functions / shards).
cast_input_dtype: DType to cast input to.
dtype: DType of resulting embeddings.
embedding_init: Initializer for embeddings.
one_hot: Performs the gather with a one-hot contraction rather than a true
gather. This is currently needed for SPMD partitioning.
"""
ngram_orders: Sequence[int]
padding_id: int
features: int
num_embeddings_per_table: int
num_tables: int = 8
cast_input_dtype: Optional[DType] = None
dtype: DType = jnp.float32
embedding_init: Initializer = default_embed_init # pytype: disable=annotation-type-mismatch # jax-types
one_hot: bool = False
_tables_by_order: Mapping[str, Sequence[Embed]] = (
dataclasses.field(init=False))
_primes_by_table: Sequence[int] = dataclasses.field(init=False)
def setup(self):
if self.features % self.num_tables != 0:
raise ValueError(f'Expected `features` ({self.features}) % '
f'`num_tables` ({self.num_tables}) == 0')
self._primes_by_table = _generate_primes(self.num_tables)
shard_embedding_size = self.features // self.num_tables
tables_by_order: DefaultDict[int, List[Embed]] = (
collections.defaultdict(list))
for order in self.ngram_orders:
for i in range(self.num_tables):
tables_by_order[order].append(
Embed(
name=f'{order}gram_hash_embed_table_{i}',
num_embeddings=self.num_embeddings_per_table,
features=shard_embedding_size,
dtype=self.dtype,
embedding_init=self.embedding_init,
one_hot=self.one_hot))
self._tables_by_order = {
str(order): table for order, table in tables_by_order.items()
}
def __call__(self,
input_ids: Array,
*,
segment_ids: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True) -> Array:
"""Converts IDs to ngram embeddings via multiple hashing.
This can run entirely on the TPU and so requires no modifications to the
CPU-side input function. Rather than computing string-wise n-grams, this
function approximates n-grams by using multiple hash functions over a window
of character IDs.
Args:
input_ids: The IDs to be hashed. <int>[batch..., seq_length]
segment_ids: Segment IDs for packed examples. <int>[batch..., seq_length]
decode: True if running in single-position autoregressive decode mode.
enable_dropout: Enables dropout if set to True.
Returns:
The emeddings. <float>[batch..., seq_length, features]
"""
if self.cast_input_dtype:
input_ids = input_ids.astype(self.cast_input_dtype)
if segment_ids is not None:
chex.assert_shape(segment_ids, input_ids.shape)
segment_ids = segment_ids.astype(self.cast_input_dtype)
if segment_ids is not None:
# Create an array that, when multiplied by the input, will zero out the
# final position of each segment.
boundary_mask = segment_ids == self._shift_left(segment_ids)
# Create an array that contains `self.padding_id` at every boundary
# position, and zeros elsewhere.
boundary_padding = jnp.logical_not(boundary_mask) * self.padding_id
# Compute hash values for all orders of ngrams of `input_ids`, for each
# embedding lookup table. Note that the initial (empty) value ensures that,
# unigram hashes will be at index 1, bigrams at index 2, etc.
hashes_by_table_by_order: List[List[Array]] = [[]]
cur_ids = input_ids
for order in range(1, max(self.ngram_orders) + 1):
hashes_by_table = []
for table_idx in range(self.num_tables):
# Each `n`-gram's hash value is computed by "extending" the `n-1`-gram's
# hash value with the `n`th ID and re-hashing.
prev_hash = hashes_by_table_by_order[-1][table_idx] if order > 1 else 0
prime: int = self._primes_by_table[table_idx]
hashed: Array = (prev_hash + cur_ids) * prime
hashes_by_table.append(hashed)
hashes_by_table_by_order.append(hashes_by_table)
cur_ids = self._shift_left(cur_ids)
if segment_ids is not None:
# Prevent leaking information across segments by zeroing out each
# position that contains an ID that crossed from another segment, and
# then replacing (only) those zeros with `self.padding_id`.
cur_ids *= boundary_mask
cur_ids += boundary_padding
# Construct a mapping from ngram orders to lists of arrays, where each
# <int>[batch..., seq_len] array contains the hashed ngram lookup keys for a
# particular embedding table.
hash_keys_by_table_by_order: Dict[int, List[Array]] = {}
for order in self.ngram_orders:
hash_keys_by_table_by_order[order] = [
hashed % self.num_embeddings_per_table
for hashed in hashes_by_table_by_order[order]
]
# `ngram_embeddings`: A <float>[batch..., seq, dim] array for each order.
ngram_embeddings: List[Array] = []
for order in self.ngram_orders:
tables: Sequence[Embed] = self._tables_by_order[str(order)]
hash_keys_by_table: Sequence[Array] = hash_keys_by_table_by_order[order]
embedding_shards: List[Array] = []
for table, hash_keys in zip(tables, hash_keys_by_table):
embedding_shards.append(
table(
hash_keys,
segment_ids=segment_ids,
decode=decode,
enable_dropout=enable_dropout))
ngram_embeddings.append(jnp.concatenate(embedding_shards, axis=-1))
# TODO: Fancier aggregation function?
result = sum(ngram_embeddings)
chex.assert_shape(result, (*input_ids.shape, self.features))
return result
def _shift_left(self, ids: Array) -> Array:
"""Shifts `ids` left by one sequence position, padding the right."""
sliced_ids = ids[..., 1:]
batch_sizes = ids.shape[:-1]
padding = jnp.expand_dims(jnp.tile(self.padding_id, batch_sizes), axis=-1)
result = jnp.concatenate([sliced_ids, padding], axis=-1)
chex.assert_shape(result, ids.shape)
return result
| 35,706 | 38.19539 | 115 | py |
flaxformer | flaxformer-main/flaxformer/components/relative_position_biases_export_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for relative_position_biases."""
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax.experimental import jax2tf # type: ignore[import]
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from flaxformer.components import relative_position_biases
class RelativePositionBiasesTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.num_heads = 3
self.query_len = 5
self.key_len = 7
@parameterized.product(on_device_computation=[True, False])
def test_relative_position_with_on_device_computation_exporting(
self, on_device_computation):
class TestModule(nn.Module):
"""A test Module that simply uses relpos layer."""
@nn.compact
def __call__(self, x):
return relative_position_biases.RelativePositionBiases(
num_buckets=12,
max_distance=10,
num_heads=3,
dtype=jnp.float32,
on_device_computation=on_device_computation,
)(qlen=x.shape[0], klen=x.shape[-1], bidirectional=True, decode=False)
inputs = np.ones((self.query_len, self.key_len))
test_module = TestModule()
params = test_module.init(jax.random.PRNGKey(0), inputs)
class ExportableModule(tf.Module):
"""A mini Module that is exportable to TensorFlow."""
def __init__(self, params, apply_fn):
def create_var(value):
return tf.Variable(value)
self._params = jax.tree_map(create_var, params)
# Use jax2tf graph serialization because test inspects the graph.
self._apply = jax2tf.convert(apply_fn, native_serialization=False)
self._apply = tf.autograph.experimental.do_not_convert(self._apply)
def __call__(self, x):
return self._apply(self._params, x)
# Export the module to SavedModel.
module = ExportableModule(params=params, apply_fn=test_module.apply)
@tf.function(autograph=False)
def forward(x):
return module(x)
to_save = tf.Module()
to_save.forward = forward
to_save.params = list(module.variables)
signatures = {
'serving_default':
forward.get_concrete_function(
tf.TensorSpec((self.query_len, self.key_len), dtype=tf.int32)),
}
export_dir = self.create_tempdir('export_test').full_path
tf.saved_model.save(
to_save,
export_dir,
signatures=signatures,
)
# Inspect whether the graph has a constant embedded.
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
metagraph = tf.compat.v1.saved_model.loader.load(sess, ['serve'],
export_dir)
has_embedded_const_tensor = False
for f in metagraph.graph_def.library.function:
for n in f.node_def:
if n.op == 'Const':
if [d.size for d in n.attr['value'].tensor.tensor_shape.dim
] == [1, self.query_len, self.key_len]:
has_embedded_const_tensor = True
break
# Using on_device_computation should give us a graph without constants.
self.assertEqual(has_embedded_const_tensor, not on_device_computation)
if __name__ == '__main__':
absltest.main()
| 3,830 | 32.313043 | 79 | py |
flaxformer | flaxformer-main/flaxformer/components/initializers.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializers for Flaxformer models."""
from typing import Union
import jax
from jax import numpy as jnp
import numpy as np
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
from flaxformer.types import PRNGKey
from flaxformer.types import Shape
def sinusoidal(min_scale: float = 1.0,
max_scale: float = 10000.0,
dtype: DType = jnp.float32) -> Initializer:
"""Creates 1D Sinusoidal Position Embedding Initializer.
Args:
min_scale: Minimum frequency-scale in sine grating.
max_scale: Maximum frequency-scale in sine grating.
dtype: The DType of the returned values.
Returns:
The sinusoidal initialization function.
"""
def init(key: PRNGKey, shape: Shape, dtype: DType = dtype) -> Array:
"""Sinusoidal init."""
del key
if dtype != np.float32:
raise ValueError('The sinusoidal initializer only supports float32.')
if len(list(shape)) != 2:
raise ValueError(
f'Expected a 2D shape (max_len, features), but got {shape}.')
max_len, features = shape
pe = np.zeros((max_len, features), dtype=dtype)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (features // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, features // 2) * scale_factor)
pe[:, :features // 2] = np.sin(position * div_term)
pe[:, features // 2:2 * (features // 2)] = np.cos(position * div_term)
return jnp.array(pe)
return init
def truncated_normal(mean: Union[float, Array] = 0.0,
stddev: Union[float, Array] = 0.05,
dtype: DType = jnp.float32) -> Initializer:
"""Returns an initialization function "truncated normal".
This is the initialization that is used in the original BERT implementation.
Args:
mean: The mean of the random values to generate.
stddev: The standard deviation of the random values to generate.
dtype: dtype of the initialized values.
Returns:
The truncated normal initializer.
"""
def init(key: PRNGKey, shape: Shape, dtype: DType = dtype) -> Array:
return jax.random.truncated_normal(
key=key, lower=-2., upper=2., shape=shape, dtype=dtype) * stddev + mean
return init
| 2,868 | 33.566265 | 79 | py |
flaxformer | flaxformer-main/flaxformer/components/dense.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5 Transformer model."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import functools
import operator
from typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Tuple, Union
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import quant_config as aqt_config
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from flax.core import frozen_dict
from flax.linen import partitioning
from flax.linen.linear import default_kernel_init
from jax import lax
import jax.numpy as jnp
import numpy as np
from flaxformer import activation_partitioning
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
# ------------------------------------------------------------------------------
# Adafactor-compatible DenseGeneral for attention layers.
# ------------------------------------------------------------------------------
def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int, ...]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple([ax if ax >= 0 else ndim + ax for ax in axes])
def _canonicalize_tuple(x):
if isinstance(x, Iterable):
return tuple(x)
else:
return (x,)
# The Flaxformer sharding API emits some names that are too detailed, so we
# remap them here. Any values that don't match keys here are unchanged.
_RESHAPED_KERNEL_AXIS_NAME_MAP = frozen_dict.freeze({
'heads * kv': 'joined_kv',
})
class DenseGeneral(nn.Module):
"""A linear transformation with flexible axes.
Kernel stored as 2d parameter for compatibility with Adafactor optimizer.
Attributes:
features: tuple with numbers of output features.
use_bias: whether to add a bias to the output (default: False).
axis: tuple with axes to apply the transformation on.
dtype: the dtype of the computation (default: float32).
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_axis_names: logical axis names to use for kernel sharding. Each
should be one of _VALID_AXIS_NAMES in sharding.py.
reshaped_kernel_axis_name_map: Rules for renaming fused kernel axes. We
keep this as a separate parameter than kernel_axis_names so that
experiments can toggle `reshape_kernel` without having to keep
`kernel_axis_names` in sync.
reshape_kernel: whether to reshape the kernel parameter to 2D for
Adafactor.
"""
features: Union[Iterable[int], int]
use_bias: bool
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
bias_init: Initializer = nn.initializers.zeros
precision: Any = None
kernel_axis_names: Optional[Sequence[str]] = None
reshaped_kernel_axis_name_map: Mapping[str, str] = (
_RESHAPED_KERNEL_AXIS_NAME_MAP)
reshape_kernel: bool = True
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
inputs = jnp.asarray(inputs, self.dtype)
axis = _normalize_axes(axis, inputs.ndim)
kernel_shape = tuple([inputs.shape[ax] for ax in axis]) + features
if self.reshape_kernel:
kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),
np.prod(features))
else:
kernel_param_shape = kernel_shape
# Determine axes names metadata for partitioning/adafactor rules.
if self.kernel_axis_names is None:
kernel_axis_names = ['unmodeled'] * len(kernel_param_shape)
else:
kernel_axis_names = self.kernel_axis_names
if len(kernel_axis_names) != len(kernel_shape):
raise ValueError(f"Kernel axis names {kernel_axis_names} doesn't match "
f'kernel shape {kernel_shape}.')
if self.reshape_kernel:
def _reshaped_axis_names(names):
result = ' * '.join(names)
return self.reshaped_kernel_axis_name_map.get(result, result)
kernel_axis_names = (
_reshaped_axis_names(kernel_axis_names[:len(axis)]),
_reshaped_axis_names(kernel_axis_names[len(axis):]),
)
kernel = partitioning.param_with_axes(
'kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=tuple(kernel_axis_names))
kernel = jnp.asarray(kernel, self.dtype)
kernel = jnp.reshape(kernel, kernel_shape)
contract_ind = tuple(range(0, len(axis)))
out = lax.dot_general(
inputs,
kernel, ((axis, contract_ind), ((), ())),
precision=self.precision)
if self.use_bias:
bias = partitioning.param_with_axes(
'bias',
self.bias_init, (np.prod(features),),
jnp.float32,
axes=(kernel_axis_names[-1],))
bias = jnp.asarray(bias, self.dtype)
bias = jnp.reshape(bias, features)
# Reshape bias for broadcast.
expand_dims = sorted(set(range(inputs.ndim)) - set(axis))
for ax in expand_dims:
bias = jnp.expand_dims(bias, ax)
out = out + bias
return out
def _convert_to_activation_function(
fn_or_string: Union[str, Callable]) -> Callable:
"""Convert a string to an activation function."""
if fn_or_string == 'linear':
return lambda x: x
elif isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
elif callable(fn_or_string):
return fn_or_string
else:
raise ValueError("don't know how to convert %s to an activation function" %
(fn_or_string,))
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
use_bias: Whether to use bias in the dense layers.
intermediate_dim: Shared dimension of hidden layers.
activations: Type of activations for each layer. Each element is either
'linear', a string function name in flax.linen, or a function.
kernel_init: Kernel function, passed to the dense layers.
wi_fused_kernel_init: Optional wi_fused kernel function, passed to the
dense layers. If None, then kernel_init will be passed instead.
bias_init: Bias initializer.
enable_dropout: Enables non-deterministic dropout when set to True.
intermediate_dropout_rate: Dropout rate used after the intermediate layers.
final_dropout_rate: Dropout rate used after the final layer.
intermediate_dropout: Optional Dropout layer used after the intermediate
layers.
final_dropout: Optional Dropout layer used after the final layer.
dtype: Type for the dense layer.
out_dim: Final dimension of the output. If not set, it will be the same as
the input dimension.
intermediate_conv: Optional module applied to the first factor of the
intermediate layer, after activation.
precomputed_intermediates: whether we're using outside W_i and W_o
computations, merely using this layer for intermediate computations.
fuse_kernels: whether to fuse the kernels for gated activation.
input_axis_name: Axis name for input activations.
activations_axis_name: Axis name for intermediate activations.
intermediate_axis_name: Axis name for output activations.
data_sharding_constraints: Sharding constraint for data. If unspecified
(default), sharding constraints are inferred from the data shape; see
_get_logical_axes().
activation_partitioning_dims: Activation partition for the intermediate
activations.
use_aqt: Whether to use aqt quantization.
weight_params: Parameters for weight quantization.
act_params: Parameters for activation quantization.
"""
use_bias: bool
intermediate_dim: int = 2048
activations: Sequence[Union[str, Callable]] = ('relu',)
kernel_init: Callable = nn.initializers.xavier_uniform()
wi_fused_kernel_init: Optional[Callable] = None
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
intermediate_dropout_rate: float = 0.1
final_dropout_rate: float = 0.1
intermediate_dropout: Optional[nn.Module] = None
final_dropout: Optional[nn.Module] = None
dtype: Any = jnp.float32
out_dim: Optional[int] = None
intermediate_conv: Optional[nn.Module] = None
precomputed_intermediates: bool = False
fuse_kernels: bool = False
input_axis_name: str = 'embed'
activations_axis_name: str = 'mlp_activations'
intermediate_axis_name: str = 'mlp'
output_axis_name: str = 'embed'
data_sharding_constraints: Optional[Tuple[str, ...]] = None
activation_partitioning_dims: Optional[int] = 2
use_aqt: Optional[bool] = False
weight_params: Optional[aqt.QuantOps.WeightParams] = None
act_params: Optional[aqt.QuantOps.ActHParams] = None
possibly_use_quantized_vars: bool = False
dense_general_factory: Callable[..., nn.Module] = DenseGeneral
@nn.compact
def __call__(self,
inputs,
decode: bool = False,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
*,
enable_dropout: bool = True):
"""Applies Transformer MlpBlock module."""
wi_fused_kernel_init = (
self.wi_fused_kernel_init
if self.wi_fused_kernel_init is not None else self.kernel_init)
actual_out_dim = (
inputs.shape[-1] if self.out_dim is None else self.out_dim)
def dense(features, name, inputs, kernel_axis_names):
if self.use_aqt:
if self.weight_params is None and self.act_params is None:
raise ValueError(
'If use_aqt is True, either of weights or acts quantization need '
'to be specified using arguments `weight_params` or `act_params`.'
)
# TODO: Push the "quantized vs not" decision down into the
# AQT library. Currently we make that decision here, because the AQT
# library doesn't support DenseGeneral, so there's extra reshapes here
# whose performance impact I don't know.
aqt_context = aqt_config.DynamicContext(
update_bounds=False, collect_acts_stats=False)
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = self.weight_params.half_shift if self.weight_params else False
aqt_hparams = aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=self.act_params, # currently supports fixed bounds only.
quant_type=aqt.QuantType.AQT,
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
batch, seq_len, channels = inputs.shape
inputs = inputs.reshape((batch * seq_len, channels))
result = aqt_flax_layers.DenseAqt(
features=features,
hparams=aqt_hparams,
train=enable_dropout,
dynamic_context=aqt_context,
paxis_name=None,
# No "cross-replica" reduction expressed in the XLA graph at this
# stage. Will be imposed later, automatically, by XLA SPMD.
use_bias=self.use_bias,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
dtype=self.dtype,
kernel_axis_names=kernel_axis_names,
name=name,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
)(inputs, padding_mask=None)
return result.reshape((batch, seq_len, features))
else:
return self.dense_general_factory(
features=features,
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
kernel_axis_names=kernel_axis_names,
name=name,
)(inputs)
# Iterate over specified MLP input activation functions.
# e.g. ('relu',) or ('gelu', 'linear') for gated-gelu.
activations = []
# TODO: don't bother w/ fusion if only a single input matrix?
if not self.fuse_kernels:
if self.precomputed_intermediates:
for idx, (inpt, act_fn) in enumerate(zip(inputs, self.activations)):
x = _convert_to_activation_function(act_fn)(inpt)
if idx == 0 and self.intermediate_conv is not None:
x = self.intermediate_conv( # pylint: disable=not-callable
x,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
activations.append(x)
else:
for idx, act_fn in enumerate(self.activations):
dense_name = 'wi' if len(self.activations) == 1 else f'wi_{idx}'
x = dense(self.intermediate_dim, dense_name, inputs,
(self.input_axis_name, self.intermediate_axis_name))
x = _convert_to_activation_function(act_fn)(x)
if idx == 0 and self.intermediate_conv is not None:
x = self.intermediate_conv( # pylint: disable=not-callable
x,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
activations.append(x)
else:
if self.weight_params is not None or self.act_params is not None:
# TODO: need to make quantization work with fused kernels.
raise NotImplementedError('Quantization is not supported yet for ',
'fused kernels.')
if self.precomputed_intermediates:
if self.out_dim is None:
raise ValueError('Must specify mlp out_dim when using precomputed '
'intermediates.')
xs = inputs
else:
xs = self.dense_general_factory(
features=(len(self.activations), self.intermediate_dim),
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=wi_fused_kernel_init,
bias_init=self.bias_init,
reshape_kernel=False,
kernel_axis_names=(
self.input_axis_name,
self.activations_axis_name,
self.intermediate_axis_name,
),
name='wi_fused',
)(inputs)
for idx, act_fn in enumerate(self.activations):
x = jnp.squeeze(lax.dynamic_slice_in_dim(xs, idx, 1, -2), -2)
x = _convert_to_activation_function(act_fn)(x)
if idx == 0 and self.intermediate_conv is not None:
x = self.intermediate_conv( # pylint: disable=not-callable
x,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
activations.append(x)
# Take elementwise product of above intermediate activations.
x = functools.reduce(operator.mul, activations)
# Apply dropout and final dense output projection.
# TODO: Change the `None` branch to not applying dropout
# instead of fallback to default dropout.
if self.intermediate_dropout:
x = self.intermediate_dropout(x, deterministic=not enable_dropout) # pylint: disable=not-callable
else:
x = nn.Dropout(
rate=self.intermediate_dropout_rate, broadcast_dims=(-2,))(
x, deterministic=not enable_dropout) # Broadcast along length.
# Note: We don't use `activation_partitioning.with_sharding_migration` here
# because we do often want this 2D sharded. However, if rules are valid,
# they should result in 2D sharding. We don't need to raise errors if both
# result in 2D sharding (which with_sharding_migration does).
if partitioning.get_axis_rules():
logical_axis_resources = (
self.data_sharding_constraints or _get_logical_axes(x))
x = partitioning.with_sharding_constraint(
x, logical_axis_resources=logical_axis_resources)
else:
x = activation_partitioning.with_sharding(
x, self.activation_partitioning_dims)
if self.precomputed_intermediates:
# we fuse W_out and attention 'O' matrix outside.
output = x
else:
output = dense(actual_out_dim, 'wo', x,
(self.intermediate_axis_name, self.output_axis_name))
# TODO: Change the `None` branch to not applying dropout
# instead of fallback to default dropout.
if self.final_dropout:
output = self.final_dropout(output, deterministic=not enable_dropout) # pylint: disable=not-callable
else:
output = nn.Dropout(
rate=self.final_dropout_rate, broadcast_dims=(-2,))(
output, deterministic=not enable_dropout)
return output
def _get_logical_axes(x: Array) -> Tuple[str, ...]:
"""Returns array-shape-dependent logical axis resources."""
if x.ndim == 2:
return ('length', 'mlp')
elif x.ndim == 3:
return ('batch', 'length', 'mlp')
elif x.ndim == 4:
return ('batch', 'length', 'heads', 'mlp_per_head')
else:
raise ValueError(
f'Unexpected array shape. Cannot partition array of shape {x.shape}')
| 17,908 | 39.981693 | 109 | py |
flaxformer | flaxformer-main/flaxformer/components/layer_norm_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for layer_norm."""
from absl.testing import absltest
import chex
from flax import linen as nn
from flax.core import unfreeze
from jax import numpy as jnp
from jax import random
from flaxformer.components import layer_norm
class T5LayerNormTest(absltest.TestCase):
def test_layer_norm(self):
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
x = random.normal(key1, (2, 3, 4))
model_fn = lambda dtype: layer_norm.T5LayerNorm(dtype=dtype)
y, _ = model_fn(jnp.float32).init_with_output(key2, x)
self.assertEqual(x.shape, y.shape)
self.assertEqual(y.dtype, jnp.float32)
y, _ = model_fn(jnp.int32).init_with_output(key3, x)
self.assertEqual(y.dtype, jnp.int32)
def test_default_axis_name(self):
module = layer_norm.T5LayerNorm()
rng = random.PRNGKey(0)
variables = module.init(rng, jnp.zeros([2, 3, 4], dtype=jnp.float32))
chex.assert_trees_all_equal_shapes(
unfreeze(variables["params"]),
{
"scale": jnp.zeros([4]),
},
)
chex.assert_trees_all_equal(
unfreeze(variables["params_axes"]),
{
"scale_axes": nn.partitioning.AxisMetadata(names=("embed",)),
},
)
if __name__ == "__main__":
absltest.main()
| 1,843 | 29.733333 | 74 | py |
flaxformer | flaxformer-main/flaxformer/components/rich_attention_position_scores.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines an API for "rich attention" mechanisms.
These require the entire input vector.
"""
import abc
from typing import Any, Callable
from flax import linen as nn
from flax.linen import initializers
from flax.linen import partitioning
from jax import lax
import jax.numpy as jnp
import numpy as np
from flaxformer.components import dense
from flaxformer.types import Array
class RichAttentionApi(metaclass=abc.ABCMeta):
"""Interface for relative attention APIs that need the entire input vector."""
@abc.abstractmethod
def __call__(self,
q_inputs: Array,
k_inputs: Array,
bidirectional: bool = True,
is_cross_attention: bool = False) -> Array:
raise NotImplementedError()
| 1,331 | 27.956522 | 80 | py |
flaxformer | flaxformer-main/flaxformer/components/dense_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dense modules."""
import functools
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from aqt.jax_legacy.jax import quantization as aqt
import flax
from flax import linen as nn
from flax.linen import partitioning
import jax
from jax import dtypes
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
from flaxformer import sharding
from flaxformer import testing_utils
from flaxformer.components import dense
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
def assert_same_tree(a, b):
jax.tree_map(
functools.partial(np.testing.assert_allclose, atol=1e-6, rtol=1e-6), a, b)
class DenseTest(parameterized.TestCase):
def _mock_initializer(self, key, shape, dtype=jnp.float_, val=1.0): # pylint: disable=unused-argument
return jnp.ones(shape, dtypes.canonicalize_dtype(dtype)) * val
def test_dense_general_no_bias(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = dense.DenseGeneral(
features=4,
use_bias=False,
kernel_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_general_with_bias(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = dense.DenseGeneral(
features=4,
use_bias=True,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = model.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 4.))
def test_dense_general_two_features(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 3))
model = dense.DenseGeneral(
features=(2, 2),
use_bias=False,
kernel_init=initializers.ones,
bias_init=initializers.ones,
kernel_axis_names=('a', 'b', 'c'),
)
y, variables = model.init_with_output(rng, x)
# We transform the last input dimension to two output dimensions (2, 2).
np.testing.assert_allclose(y, np.full((1, 2, 2), 3.))
# The output sharding dimensions have been collapsed.
sharding.check_params_and_axis_names_match(variables)
self.assertEqual(variables['params_axes']['kernel_axes'],
sharding.axis_names('a', 'b * c'))
def test_dense_general_two_axes(self):
rng = random.PRNGKey(0)
x = jnp.ones((1, 2, 2))
model = dense.DenseGeneral(
features=3,
use_bias=False,
axis=(-2, 2), # Note: this is the same as (1, 2).
kernel_init=initializers.ones,
bias_init=initializers.ones,
kernel_axis_names=('a', 'b', 'c'),
)
y, variables = model.init_with_output(rng, x)
# We transform the last two input dimensions (2, 2) to one output dimension.
np.testing.assert_allclose(y, np.full((1, 3), 4.))
# The input sharding dimensions have been collapsed.
sharding.check_params_and_axis_names_match(variables)
self.assertEqual(variables['params_axes']['kernel_axes'],
sharding.axis_names('a * b', 'c'))
def test_mlp_same_out_dim(self):
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
params = module.init(random.PRNGKey(0), inputs, enable_dropout=False)
assert_same_tree(
flax.core.unfreeze(params['params']),
{
'wi': {
'kernel': [
[
-0.2650487422943115,
-0.9350943565368652,
-0.09850478172302246,
-0.3685007095336914,
],
[
0.4673573970794678,
0.058478593826293945,
-0.5871121883392334,
-0.7413773536682129,
],
],
},
'wo': {
'kernel': [
[-0.7278401851654053, 0.6603918075561523],
[-0.4713869094848633, -0.37511157989501953],
[-0.15709185600280762, 0.7399897575378418],
[-0.7014286518096924, -0.2968623638153076],
],
},
},
)
self.assertDictEqual(
flax.core.unfreeze(params['params_axes']),
{
'wi': {
'kernel_axes': partitioning.AxisMetadata(names=('embed', 'mlp'))
},
'wo': {
'kernel_axes': partitioning.AxisMetadata(names=('mlp', 'embed'))
},
},
)
result = module.apply(params, inputs, enable_dropout=False)
np.testing.assert_allclose(
result.tolist(),
[[[-0.14724837243556976, 0.13360297679901123],
[-0.14724837243556976, 0.13360297679901123],
[-0.4874098598957062, 0.44224196672439575]],
[[-0.2944967448711395, 0.26720595359802246], [0.0, 0.0],
[-0.2944967448711395, 0.26720595359802246]]],
rtol=1e-6,
)
def test_mlp_different_out_dim(self):
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
out_dim=3,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
variables = module.init(
random.PRNGKey(0),
inputs,
enable_dropout=False,
mutable=['params', 'params_axes'])
assert_same_tree(
flax.core.unfreeze(variables['params']),
{
'wi': {
'kernel': [
[
-0.2650487422943115,
-0.9350943565368652,
-0.09850478172302246,
-0.3685007095336914,
],
[
0.4673573970794678,
0.058478593826293945,
-0.5871121883392334,
-0.7413773536682129,
],
],
},
'wo': {
'kernel': [
[
0.549019455909729,
-0.7615442276000977,
0.2908056378364563,
],
[
0.8247717618942261,
-0.37039434909820557,
0.14754922688007355,
],
[
-0.4929429590702057,
0.34858351945877075,
-0.27896377444267273,
],
[
-0.5565190315246582,
-0.8740609288215637,
0.6347796320915222,
],
],
},
},
)
self.assertDictEqual(
flax.core.unfreeze(variables['params_axes']),
{
'wi': {
'kernel_axes': partitioning.AxisMetadata(names=('embed', 'mlp'))
},
'wo': {
'kernel_axes': partitioning.AxisMetadata(names=('mlp', 'embed'))
},
},
)
result = module.apply(variables, inputs, enable_dropout=False)
np.testing.assert_allclose(
result.tolist(),
[[[0.1110713854432106, -0.1540669947862625, 0.05883249640464783],
[0.1110713854432106, -0.1540669947862625, 0.05883249640464783],
[0.36765968799591064, -0.509980320930481, 0.19474266469478607]],
[[0.2221427708864212, -0.308133989572525, 0.11766499280929565],
[0.0, 0.0, 0.0],
[0.2221427708864212, -0.308133989572525, 0.11766499280929565]]],
rtol=1e-6,
)
def test_mlp_input_shapes(self):
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
)
axis_rules = [('batch', 'data'), ('embed', None), ('length', None),
('mlp', 'model')]
# 2D inputs.
inputs = np.array(
[
[1, 2, 3], # Batch 1.
[4, 5, 6], # Batch 2.
],
dtype=np.float32)
with mock.patch(
'flax.linen.partitioning._AxisRules.rules',
new_callable=mock.PropertyMock,
return_value=axis_rules):
result, _ = module.init_with_output(
random.PRNGKey(0), inputs, enable_dropout=False)
expected_result = [[
1.1578339338302612, -2.476144552230835, 1.1046674251556396
], [2.4860682487487793, -5.988793849945068, 2.46048641204834]]
np.testing.assert_allclose(
result.tolist(),
expected_result,
rtol=1e-6,
)
# 3D inputs
inputs_with_batch_dim = inputs[np.newaxis, ...]
with mock.patch(
'flax.linen.partitioning._AxisRules.rules',
new_callable=mock.PropertyMock,
return_value=axis_rules):
batch_result, _ = module.init_with_output(
random.PRNGKey(0), inputs_with_batch_dim, enable_dropout=False)
np.testing.assert_allclose(batch_result, result[np.newaxis, ...])
def test_user_defined_data_sharding_constraints(self):
customized_module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
data_sharding_constraints=('my_constraint', 'embed'))
axis_rules = [('embed', None), ('my_constraint', 'model')]
inputs = np.array(
[
[1, 2, 3], # Batch 1.
[4, 5, 6], # Batch 2.
],
dtype=np.float32)
with mock.patch(
'flax.linen.partitioning._AxisRules.rules',
new_callable=mock.PropertyMock,
return_value=axis_rules):
result, _ = customized_module.init_with_output(
random.PRNGKey(0), inputs, enable_dropout=False)
expected_result = [[
1.1578339338302612, -2.476144552230835, 1.1046674251556396
], [2.4860682487487793, -5.988793849945068, 2.46048641204834]]
np.testing.assert_allclose(
result.tolist(),
expected_result,
rtol=1e-6,
)
def test_quantization_no_params_specified(self):
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
use_aqt=True,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
with self.assertRaisesRegex(
ValueError,
'If use_aqt is True, either of weights or acts quantization'):
module.init(random.PRNGKey(0), inputs, enable_dropout=False)
def test_mlp_materialized_weights(self):
weight_params = aqt.QuantOps.WeightParams(
prec=8, half_shift=False, axis=None)
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
use_aqt=True,
weight_params=weight_params,
possibly_use_quantized_vars=True,
)
# enable_dropout
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
result, variables = module.init_with_output(
random.PRNGKey(0), inputs, enable_dropout=False)
assert_same_tree(
flax.core.unfreeze(variables['params']),
{
'wi': {
'qkernel': [[0, 0, 0, 0], [0, 0, 0, 0]],
'qscale': [[
2.818772e-07,
-9.838715e-07,
1.211104e-06,
2.669436e-07,
]],
},
'wo': {
'qkernel': [[0, 0], [0, 0], [0, 0], [0, 0]],
'qscale': [[-1.854524e-06, 1.883966e-06]],
},
},
)
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(variables['params'],
variables['params_axes']),
{
'wi': {
'qkernel': ['int8', 'embed=2', 'mlp=4'],
'qscale': ['float32', 'embed_qscale=1', 'mlp=4']
},
'wo': {
'qkernel': ['int8', 'mlp=4', 'embed=2'],
'qscale': ['float32', 'mlp_qscale=1', 'embed=2']
}
})
np.testing.assert_allclose(
result.tolist(),
[[[-0.0, -0.0], [-0.0, -0.0], [-0.0, -0.0]],
[[-0.0, -0.0], [-0.0, -0.0], [-0.0, -0.0]]],
rtol=1e-6,
)
def test_mlp_quantized_weights(self):
weight_params = aqt.QuantOps.WeightParams(
prec=8, half_shift=False, axis=None)
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
activations=('relu',),
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
use_aqt=True,
weight_params=weight_params,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
result, variables = module.init_with_output(
random.PRNGKey(0), inputs, enable_dropout=False)
assert_same_tree(
flax.core.unfreeze(variables['params']),
{
'wi': {
'kernel': [
[
-0.2650487422943115,
-0.9350943565368652,
-0.09850478172302246,
-0.3685007095336914,
],
[
0.4673573970794678,
0.058478593826293945,
-0.5871121883392334,
-0.7413773536682129,
],
],
},
'wo': {
'kernel': [
[-0.7278401851654053, 0.6603918075561523],
[-0.4713869094848633, -0.37511157989501953],
[-0.15709185600280762, 0.7399897575378418],
[-0.7014286518096924, -0.2968623638153076],
],
},
},
)
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(variables['params'],
variables['params_axes']),
{
'wi': {
'kernel': ['float32', 'embed=2', 'mlp=4']
},
'wo': {
'kernel': ['float32', 'mlp=4', 'embed=2']
}
})
np.testing.assert_allclose(
result.tolist(),
[[[-0.14731408655643463, 0.1332627385854721],
[-0.14731408655643463, 0.1332627385854721],
[-0.48747575283050537, 0.4409784972667694]],
[[-0.29462817311286926, 0.2665254771709442], [0.0, 0.0],
[-0.29462817311286926, 0.2665254771709442]]],
rtol=1e-6,
)
def test_fuse_kernels(self):
x = np.random.randn(2, 3)
fused = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
fuse_kernels=True,
activations=('gelu', 'linear'))
# Check default axis names.
variables = fused.init(
random.PRNGKey(0),
x,
enable_dropout=False,
mutable=['params', 'params_axes'])
self.assertEqual(
jax.tree_map(lambda a: a.tolist(), variables['params_axes']), {
'wi_fused': {
'kernel_axes':
nn.partitioning.AxisMetadata(
names=('embed', 'mlp_activations', 'mlp')),
},
'wo': {
'kernel_axes':
nn.partitioning.AxisMetadata(names=('mlp', 'embed')),
},
})
not_fused = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
fuse_kernels=False,
activations=('gelu', 'linear'))
wi_0 = np.random.randn(3, 4)
wi_1 = np.random.randn(3, 4)
wo = np.random.randn(4, 3)
params_not_fused = {
'wi_0': {
'kernel': wi_0
},
'wi_1': {
'kernel': wi_1
},
'wo': {
'kernel': wo
}
}
params_fused = {
'wi_fused': {
'kernel':
np.stack([wi_0, wi_1], axis=1) # shape: [3, 2, 4]
},
'wo': {
'kernel': wo
}
}
y_fused = fused.apply({'params': params_fused}, x, enable_dropout=False)
y_not_fused = not_fused.apply({'params': params_not_fused},
x,
enable_dropout=False)
np.testing.assert_allclose(y_fused, y_not_fused, rtol=1e-5)
@parameterized.named_parameters([
('fuse_kernel_set_wi_fused_init', True, True),
('fuse_kernel_no_set_wi_fused_init', True, False),
('no_fuse_kernel_no_set_wi_fused_init', False, False),
('no_fuse_kernel_set_wi_fused_init', False, True)
])
def test_fuse_kernels_kernel_init(self, fuse_kernels, set_wi_fused_init):
module = dense.MlpBlock(
use_bias=False,
intermediate_dim=4,
fuse_kernels=fuse_kernels,
activations=('relu', 'linear'),
kernel_init=initializers.ones,
wi_fused_kernel_init=(functools.partial(
self._mock_initializer, val=2.0) if set_wi_fused_init else None),
bias_init=initializers.zeros,
dtype=jnp.float32,
)
inputs = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32)
params = module.init(random.PRNGKey(0), inputs, enable_dropout=False)
# Construct expected params
wi_0 = [[1., 1., 1., 1.], [1., 1., 1., 1.]]
wi_1 = [[1., 1., 1., 1.], [1., 1., 1., 1.]]
wo = [[1., 1.], [1., 1.], [1., 1.], [1., 1.]]
if fuse_kernels:
if set_wi_fused_init:
wi_0 = [[2., 2., 2., 2.], [2., 2., 2., 2.]]
wi_1 = [[2., 2., 2., 2.], [2., 2., 2., 2.]]
expected_params = {
'wi_fused': {
'kernel': np.stack([wi_0, wi_1], axis=1).tolist()
},
'wo': {
'kernel': wo
}
}
else:
expected_params = {
'wi_0': {
'kernel': wi_0
},
'wi_1': {
'kernel': wi_1
},
'wo': {
'kernel': wo
}
}
self.assertDictEqual(
jax.tree_map(
lambda a: a.tolist(), flax.core.unfreeze(params['params'])
),
expected_params,
)
if __name__ == '__main__':
absltest.main()
| 20,540 | 30.552995 | 104 | py |
flaxformer | flaxformer-main/flaxformer/components/embedding_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flaxformer.embedding."""
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from flax.core import pop
from flax.linen import partitioning as flax_partitioning
import jax
import numpy as np
from flaxformer.components import embedding
from flaxformer.components import initializers
class EmbedTest(parameterized.TestCase):
def test_embedder_raises_exception_for_incorrect_input_type(self):
"""Tests that inputs are integers and that an exception is raised if not."""
embed = embedding.Embed(num_embeddings=10, features=5)
inputs = np.expand_dims(np.arange(5, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
bad_inputs = inputs.astype(np.float32)
with self.assertRaisesRegex(
ValueError, 'Input type must be an integer or unsigned integer.'):
_ = embed.apply(variables, bad_inputs)
@parameterized.named_parameters(
{
'testcase_name': 'with_ones',
'init_fn': jax.nn.initializers.ones,
'num_embeddings': 10,
'features': 5,
'matrix_sum': 5 * 10,
}, {
'testcase_name': 'with_zeros',
'init_fn': jax.nn.initializers.zeros,
'num_embeddings': 10,
'features': 5,
'matrix_sum': 0,
})
def test_embedding_initializes_correctly(self, init_fn, num_embeddings,
features, matrix_sum):
"""Tests if the Embed class initializes with the requested initializer."""
embed = embedding.Embed(
num_embeddings=num_embeddings,
features=features,
embedding_init=init_fn)
inputs = np.expand_dims(np.arange(5, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
embedding_matrix = variables['params']['embedding']
self.assertEqual(int(np.sum(embedding_matrix)), matrix_sum)
def test_embedding_matrix_shape(self):
"""Tests that the embedding matrix has the right shape."""
num_embeddings = 10
features = 5
embed = embedding.Embed(num_embeddings=num_embeddings, features=features)
inputs = np.expand_dims(np.arange(features, dtype=np.int64), 1)
variables = embed.init(jax.random.PRNGKey(0), inputs)
embedding_matrix = variables['params']['embedding']
self.assertEqual((num_embeddings, features), embedding_matrix.shape)
def test_embedding_attend(self):
"""Tests that attending with ones returns sum of embedding vectors."""
features = 5
embed = embedding.Embed(num_embeddings=10, features=features)
inputs = np.array([[1]], dtype=np.int64)
variables = embed.init(jax.random.PRNGKey(0), inputs)
query = np.ones(features, dtype=np.float32)
result = embed.apply(variables, query, method=embed.attend)
expected = np.sum(variables['params']['embedding'], -1)
np.testing.assert_array_almost_equal(result, expected)
def test_embedding_axis_names(self):
rules = [
('my_batch_dim', 'data'),
('embed', None),
('vocab', None),
]
with flax_partitioning.axis_rules(rules):
embed = embedding.Embed(num_embeddings=10, features=5)
inputs = np.array([1], dtype=np.int64)
embed.init(
jax.random.PRNGKey(0), inputs, input_axis_names=('my_batch_dim',))
def test_embedding_axis_names_ctor(self):
rules = [
('my_batch_dim', 'data'),
('embed', None),
('vocab', None),
]
with flax_partitioning.axis_rules(rules):
embed = embedding.Embed(
num_embeddings=10, features=5, input_axis_names=('my_batch_dim',))
inputs = np.array([1], dtype=np.int64)
embed.init(jax.random.PRNGKey(0), inputs)
def test_embedding_axis_names_call_overrides(self):
rules = [
('my_batch_dim', 'data'),
('embed', None),
('vocab', None),
]
with flax_partitioning.axis_rules(rules):
embed = embedding.Embed(
num_embeddings=10, features=5, input_axis_names=('other_batch_dim',))
inputs = np.array([1], dtype=np.int64)
embed.init(
jax.random.PRNGKey(0), inputs, input_axis_names=('my_batch_dim',))
class MultiEmbedTest(parameterized.TestCase):
def test_multi_embed_returns_correct_shape(self):
"""Tests that we can build a generic combined embedder."""
features = 5
embedders = {
'token_embed': nn.Embed(num_embeddings=10, features=features),
'segment_embed': nn.Embed(num_embeddings=2, features=features),
'position_embed': nn.Embed(num_embeddings=12, features=features)
}
model = embedding.MultiEmbed(embedders)
token_ids = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int64)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int64)
position_ids = np.arange(3, dtype=np.int64)[None]
output, _ = model.init_with_output(
jax.random.PRNGKey(0),
token_embed=token_ids,
segment_embed=segment_ids,
position_embed=position_ids)
self.assertEqual(output.shape, token_ids.shape + (features,))
@parameterized.named_parameters(
{
'testcase_name': 'with_sum_method',
'method': embedding.EmbedCombineMethod.SUM,
'features': 7,
'expected_shape': (2, 3, 7),
}, {
'testcase_name': 'with_concat_method',
'method': embedding.EmbedCombineMethod.CONCAT,
'features': 7,
'expected_shape': (2, 3, 14),
})
def test_multi_embed_combines_embeddings_correctly(self, method, features,
expected_shape):
"""Tests that embeddings are correctly summed or concatenated."""
embedders = {
'token_embed': nn.Embed(num_embeddings=10, features=features),
'segment_embed': nn.Embed(num_embeddings=2, features=features)
}
model = embedding.MultiEmbed(embedders)
token_ids = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int64)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int64)
variables = model.init(
jax.random.PRNGKey(0), token_embed=token_ids, segment_embed=segment_ids)
embeddings = model.apply(
variables,
token_embed=token_ids,
segment_embed=segment_ids,
method=model.get_individual_embeddings)
output = model.apply(
variables,
embeddings,
combine_method=method,
method=model.combine_embeddings)
self.assertEqual(output.shape, expected_shape)
def test_multi_embed_combine_embeddings_raises_exception(self):
"""Tests that an exception is raised for an invalid combine method."""
model = embedding.MultiEmbed({
'token_embed': nn.Embed(num_embeddings=10, features=5),
})
token_ids = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int64)
variables = model.init(jax.random.PRNGKey(0), token_embed=token_ids)
embeddings = {'token_embed': np.ones([2, 3, 5], dtype=np.float32)}
with self.assertRaises(ValueError):
_ = model.apply(
variables,
embeddings,
combine_method='invalid_combine_method',
method=model.combine_embeddings)
def test_multi_embed_can_return_individual_embeddings(self):
"""Tests that MultiEmbed returns a dictionary with each embedded input."""
features = 5
embedders = {
'token_embed': nn.Embed(num_embeddings=10, features=features),
'segment_embed': nn.Embed(num_embeddings=2, features=features),
'position_embed': nn.Embed(num_embeddings=12, features=features)
}
model = embedding.MultiEmbed(embedders)
token_ids = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int64)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int64)
position_ids = np.arange(3, dtype=np.int64)[None]
variables = model.init(
jax.random.PRNGKey(0),
token_embed=token_ids,
segment_embed=segment_ids,
position_embed=position_ids)
embeddings = model.apply(
variables,
token_embed=token_ids,
segment_embed=segment_ids,
position_embed=position_ids,
method=model.get_individual_embeddings)
self.assertIn('token_embed', embeddings)
self.assertIn('segment_embed', embeddings)
self.assertIn('position_embed', embeddings)
def test_multi_embed_returns_nonempty_gradient(self):
"""Tests that we can capture a non-empty gradient from MultiEmbed."""
features = 5
embedders = {
'token_embed': nn.Embed(num_embeddings=10, features=features),
'segment_embed': nn.Embed(num_embeddings=2, features=features),
'position_embed': nn.Embed(num_embeddings=12, features=features)
}
model = embedding.MultiEmbed(
embedders, sow_intermediates=True, capture_gradients=True)
token_ids = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int64)
segment_ids = np.array([[0, 1, 1], [0, 0, 1]], dtype=np.int64)
position_ids = np.arange(3, dtype=np.int64)[None]
variables = model.init(
jax.random.PRNGKey(0),
token_embed=token_ids,
segment_embed=segment_ids,
position_embed=position_ids)
self.assertContainsSubset(('grads',), variables)
def fake_loss(variables, token_ids, segment_ids, position_ids):
"""Returns a loss."""
output, _ = model.apply(
variables,
token_embed=token_ids,
segment_embed=segment_ids,
position_embed=position_ids,
mutable=['grads'])
return output.sum()
grad_fn = jax.grad(fake_loss)
grads_variables = grad_fn(variables, token_ids, segment_ids, position_ids)
grads = grads_variables['grads']
self.assertContainsSubset(('output_grad',), grads)
self.assertNotAlmostEqual(grads['output_grad'].sum(), 0.0)
class EmbeddingTest(parameterized.TestCase):
def test_add_position_embs(self):
"""Tests that positional embeddings are correctly applied."""
positions = np.array([[[0, 1, 2], [0, 1, 2]]], dtype=np.int32)
position_embedder = embedding.PositionEmbed(
num_embeddings=10,
features=4,
dtype=np.float32,
embedding_init=jax.nn.initializers.ones)
variables = position_embedder.init(jax.random.PRNGKey(0), inputs=positions)
output_embeddings = position_embedder.apply(variables, inputs=positions)
np.testing.assert_array_equal(output_embeddings, 1)
def test_add_position_embs_decoder(self):
"""Tests that position embeddings are correctly applied in decoding mode."""
positions = np.array([[[0, 1, 2], [0, 1, 2]]], dtype=np.int32)
position_embedder = embedding.PositionEmbed(
num_embeddings=10,
features=3,
dtype=np.float32,
embedding_init=jax.nn.initializers.ones)
variables = position_embedder.init(
jax.random.PRNGKey(0), inputs=positions, decode=True)
state, params = pop(variables, 'params')
output_embeddings, state = position_embedder.apply(
{
'params': params,
**state
},
inputs=positions,
decode=True,
mutable=['cache'])
np.testing.assert_array_equal(output_embeddings, 1)
np.testing.assert_array_equal(state['cache']['position_embedder_index'], 1)
# Test that repeated access increments the cache_index.
output_embeddings, state = position_embedder.apply(
{
'params': params,
**state
},
inputs=positions,
decode=True,
mutable=['cache'])
np.testing.assert_array_equal(output_embeddings, 1)
np.testing.assert_array_equal(state['cache']['position_embedder_index'], 2)
def test_add_sinusoidal_position_embs(self):
"""Tests that sinusoidal positional embeddings are applied."""
positions = np.array([[0, 1]])
sinusoid_embedder = embedding.FixedEmbed(
embedding_init=initializers.sinusoidal(),
features=5,
max_length=10,
dtype=np.float32)
variables = sinusoid_embedder.init(jax.random.PRNGKey(0), inputs=positions)
output_embeddings = sinusoid_embedder.apply(variables, inputs=positions)
expected_output_embeddings = np.array(
[[[0.0, 0.0, 1.0, 1.0, 0.0], [0.84147, 0.0001, 0.540302, 1.0, 0.0]]],
dtype=np.float32)
np.testing.assert_array_almost_equal(output_embeddings,
expected_output_embeddings)
def test_regression_add_position_embeddings_sine(self):
sequence_length = 7
hidden_dim = 5
positions = np.arange(sequence_length)[None, :]
embed = embedding.FixedEmbed(
embedding_init=initializers.sinusoidal(),
features=hidden_dim,
max_length=32,
dtype=np.float32)
outputs, params = embed.init_with_output(
jax.random.PRNGKey(0), inputs=positions)
self.assertEqual(params, {})
expected = np.array([
[
[0.0, 0.0, 1.0, 1.0, 0.0],
[
0.8414709568023682, 9.999999747378752e-05, 0.5403022766113281,
1.0, 0.0
],
[
0.9092974066734314, 0.00019999999494757503, -0.416146844625473,
1.0, 0.0
],
[
0.14112000167369843, 0.00029999998514540493,
-0.9899924993515015, 0.9999999403953552, 0.0
],
[
-0.756802499294281, 0.00039999998989515007, -0.6536436080932617,
0.9999999403953552, 0.0
],
[
-0.9589242935180664, 0.0004999999655410647, 0.28366219997406006,
0.9999998807907104, 0.0
],
[
-0.279415488243103, 0.0005999999702908099, 0.9601702690124512,
0.9999998211860657, 0.0
],
],
])
np.testing.assert_allclose(outputs, expected, rtol=1e-5)
def test_regression_add_position_embeddings_learned(self):
sequence_length = 7
hidden_dim = 5
positions = np.arange(sequence_length)[None, :]
embeds = embedding.PositionEmbed(
num_embeddings=32,
features=hidden_dim,
embedding_init=jax.nn.initializers.normal(
stddev=1e-6), # Use learned embeds.
dtype=np.float32)
outputs, params = embeds.init_with_output(
jax.random.PRNGKey(0), inputs=positions)
param_shapes = jax.tree_map(lambda x: x.shape, params)
self.assertEqual(param_shapes['params'], {
'pos_embedding': (32, 5),
})
np.testing.assert_allclose(
outputs, [[[
4.486782358981145e-07, -3.5457077274259063e-07,
1.0727929122822388e-07, 1.3890753791656607e-07,
-7.792502287884417e-07
],
[
-2.3853303332543874e-07, 8.467901011499634e-07,
-8.650059157844225e-08, 3.407756707929366e-07,
-2.226069852895307e-07
],
[
-1.1156219414942825e-07, -6.10807489920262e-07,
1.5079198192324839e-06, 1.714607265057566e-07,
-1.9881858861481305e-06
],
[
2.980690965159738e-07, 4.950079102172822e-08,
1.9940485174174682e-07, 7.134963198041078e-07,
1.5179145975707797e-06
],
[
7.398467261054975e-08, -4.966278197571228e-07,
8.895806757891478e-08, -4.5914887891740364e-07,
1.1332289204801782e-06
],
[
1.3519083950086497e-06, -1.0249741535517387e-06,
-1.1988712458332884e-06, -6.748282288526752e-08,
1.18359389489342e-06
],
[
-1.6119436168082757e-06, 1.0756450308235799e-07,
-8.505542155035073e-07, 1.1778743100876454e-06,
-1.000783186100307e-06
]]],
rtol=1e-5)
class HashEmbedTest(parameterized.TestCase):
def test_hash_embedder(self):
"""Checks the parameters and return value shapes."""
num_tables = 2
num_embeddings_per_table = 32
features = 16
embedder = embedding.HashEmbed(
features=features,
num_embeddings_per_table=num_embeddings_per_table,
num_tables=num_tables)
key = jax.random.PRNGKey(0)
batch_size = 2
seq_len = 8
ids = np.ones([batch_size, seq_len], dtype=np.int32)
outputs, variables = embedder.init_with_output(key, ids)
self.assertSequenceEqual(outputs.shape, (batch_size, seq_len, features))
param_shapes = jax.tree_map(lambda x: list(x.shape), variables['params'])
self.assertSameStructure(
param_shapes, {
'hash_embedder_table_0': {
'embedding': [num_embeddings_per_table, features // num_tables]
},
'hash_embedder_table_1': {
'embedding': [num_embeddings_per_table, features // num_tables]
},
})
def test_hash_embedder_4d(self):
"""Checks the parameters and return value shapes."""
num_tables = 2
num_embeddings_per_table = 32
features = 16
embedder = embedding.HashEmbed(
features=features,
num_embeddings_per_table=num_embeddings_per_table,
num_tables=num_tables)
key = jax.random.PRNGKey(0)
batch_size = 2
seq_len = 8
another_dim = 4
ids = np.ones([batch_size, seq_len, another_dim], dtype=np.int32)
outputs, variables = embedder.init_with_output(key, ids)
self.assertSequenceEqual(outputs.shape,
(batch_size, seq_len, another_dim, features))
param_shapes = jax.tree_map(lambda x: list(x.shape), variables['params'])
self.assertSameStructure(
param_shapes, {
'hash_embedder_table_0': {
'embedding': [num_embeddings_per_table, features // num_tables]
},
'hash_embedder_table_1': {
'embedding': [num_embeddings_per_table, features // num_tables]
},
})
class NgramHashEmbedTest(parameterized.TestCase):
@parameterized.product(
batch_sizes=[(2,), (2, 3)],
use_segment_ids=[False, True],
)
def test_hash_embedder(self, batch_sizes, use_segment_ids):
"""Checks the parameters and return value shapes."""
num_tables = 2
num_embeddings_per_table = 32
features = 16
embedder = embedding.NgramHashEmbed(
ngram_orders=[1, 3, 4],
padding_id=0,
features=features,
num_embeddings_per_table=num_embeddings_per_table,
num_tables=num_tables)
key = jax.random.PRNGKey(0)
seq_len = 8
ids = np.ones([*batch_sizes, seq_len], dtype=np.int32)
segment_ids = (
np.tile([[0] * 5 + [1] * 3],
(*batch_sizes, 1)) if use_segment_ids else None)
outputs, variables = (
embedder.init_with_output(key, ids, segment_ids=segment_ids))
self.assertSequenceEqual(outputs.shape, (*batch_sizes, seq_len, features))
param_shapes = jax.tree_map(lambda x: list(x.shape), variables['params'])
expected_table_shape = [num_embeddings_per_table, features // num_tables]
self.assertSameStructure(
param_shapes, {
'1gram_hash_embed_table_0': {
'embedding': expected_table_shape
},
'1gram_hash_embed_table_1': {
'embedding': expected_table_shape
},
'3gram_hash_embed_table_0': {
'embedding': expected_table_shape
},
'3gram_hash_embed_table_1': {
'embedding': expected_table_shape
},
'4gram_hash_embed_table_0': {
'embedding': expected_table_shape
},
'4gram_hash_embed_table_1': {
'embedding': expected_table_shape
},
})
@parameterized.product(
batch_sizes=[(2,), (2, 3)],)
def test_packing_correctness(self, batch_sizes):
"""Checks the parameters and return value shapes."""
num_tables = 2
num_embeddings_per_table = 32
features = 16
embedder = embedding.NgramHashEmbed(
ngram_orders=[1, 3, 4],
padding_id=0,
features=features,
num_embeddings_per_table=num_embeddings_per_table,
num_tables=num_tables)
key = jax.random.PRNGKey(0)
seq_len = 8
segment_ids = np.tile([[0] * 5 + [1] * 3], (*batch_sizes, 1))
ids1 = np.ones([*batch_sizes, seq_len], dtype=np.int32)
outputs1, variables = (
embedder.init_with_output(key, ids1, segment_ids=segment_ids))
# Run the embedder again, with the same IDs passed in for segment=0 IDs, but
# different IDs for segment=1.
ids2 = ids1 + segment_ids
outputs2 = embedder.apply(variables, ids2, segment_ids=segment_ids)
# Verify that the change to segment=1 didn't alter the outputs of segment=0.
np.testing.assert_allclose(outputs1[..., :5, :], outputs2[..., :5, :])
class RotaryTest(absltest.TestCase):
def test_rotary_embedding(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 3
qheads = 4
d = 2 * 5
klen = 6
kheads = 7
maxlen = 8
q = np.ones((batch, qlen, qheads, d))
k = np.ones((batch, klen, kheads, d))
cos = np.ones((maxlen, d))
sin = np.ones((maxlen, d))
out_q, out_k = embedding.apply_rotary_embedding(q, k, cos, sin)
self.assertEqual(out_q.shape, q.shape)
self.assertEqual(out_k.shape, k.shape)
def test_rotary_embedding_multiquery(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 3
qheads = 4
d = 2 * 5
klen = 6
maxlen = 8
q = np.ones((batch, qlen, qheads, d))
k = np.ones((batch, klen, d))
cos = np.ones((maxlen, d))
sin = np.ones((maxlen, d))
out_q, out_k = embedding.apply_rotary_embedding(q, k, cos, sin)
self.assertEqual(out_q.shape, q.shape)
self.assertEqual(out_k.shape, k.shape)
def test_rotary_embedding_decode(self):
"""Checks the shape of rotary encodings."""
batch = 2
qlen = 1
qheads = 4
d = 2 * 5
klen = 6
maxlen = 8
q = np.ones((batch, qlen, qheads, d))
k = np.ones((batch, klen, d))
cos = np.ones((maxlen, d))
sin = np.ones((maxlen, d))
rotary_index = np.ones((batch,), dtype=np.int32)
out_q, out_k = embedding.apply_rotary_embedding(
q, k, cos, sin, decode=True, rotary_index=rotary_index)
self.assertEqual(out_q.shape, q.shape)
self.assertEqual(out_k.shape, k.shape)
if __name__ == '__main__':
absltest.main()
| 23,262 | 35.634646 | 80 | py |
flaxformer | flaxformer-main/flaxformer/components/initializers_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flaxformer.initializers."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
import numpy as np
from flaxformer.components import initializers
class InitializersTest(parameterized.TestCase):
def test_sinusoidal_returns_correct_shape(self):
"""Tests that we get the expected shape, starting with a 1 dimension."""
max_len = 10
min_scale = 1.0
max_scale = 10000.0
init_fn = initializers.sinusoidal(min_scale=min_scale, max_scale=max_scale)
rng = jax.random.PRNGKey(0)
features = 5
shape = (max_len, features)
result = init_fn(rng, shape, jnp.float32)
self.assertEqual(result.shape, shape)
def test_sinusoidal_has_deterministic_outputs(self):
"""Tests that we always get the same sinusoids given the same shape."""
max_len = 10
min_scale = 1.0
max_scale = 10000.0
init_fn = initializers.sinusoidal(min_scale=min_scale, max_scale=max_scale)
rng = jax.random.PRNGKey(0)
features = 5
shape = (max_len, features)
result = init_fn(rng, shape, jnp.float32)
expected = [
[
0.0000000e+00, 0.0000000e+00, 1.0000000e+00, 1.0000000e+00,
0.0000000e+00
],
[
8.4147096e-01, 9.9999997e-05, 5.4030228e-01, 1.0000000e+00,
0.0000000e+00
],
[
9.0929741e-01, 1.9999999e-04, -4.1614684e-01, 1.0000000e+00,
0.0000000e+00
],
[
1.4112000e-01, 2.9999999e-04, -9.8999250e-01, 9.9999994e-01,
0.0000000e+00
],
[
-7.5680250e-01, 3.9999999e-04, -6.5364361e-01, 9.9999994e-01,
0.0000000e+00
],
[
-9.5892429e-01, 4.9999997e-04, 2.8366220e-01, 9.9999988e-01,
0.0000000e+00
],
[
-2.7941549e-01, 5.9999997e-04, 9.6017027e-01, 9.9999982e-01,
0.0000000e+00
],
[
6.5698659e-01, 6.9999992e-04, 7.5390226e-01, 9.9999976e-01,
0.0000000e+00
],
[
9.8935825e-01, 7.9999992e-04, -1.4550003e-01, 9.9999970e-01,
0.0000000e+00
],
[
4.1211849e-01, 8.9999987e-04, -9.1113025e-01, 9.9999958e-01,
0.0000000e+00
],
]
np.testing.assert_array_almost_equal(result, expected)
def test_sinusoidal_raises_exception_for_incorrect_shape(self):
"""Tests that we get a ValueError if max_len does not match the shape."""
# TODO: Remove this test once max_len is removed from sinusoidal.
max_len = 10
init_fn = initializers.sinusoidal()
rng = jax.random.PRNGKey(0)
features = 5
shape = (max_len, features, 1) # Adding an extra dimension triggers error.
with self.assertRaises(ValueError):
init_fn(rng, shape, jnp.float32)
def test_truncated_normal_returns_correct_shape(self):
"""Tests that truncated normal returns the expected shape."""
init_fn = initializers.truncated_normal(stddev=1.0, dtype=jnp.float32)
rng = jax.random.PRNGKey(0)
num_embeddings = 10
features = 5
shape = (num_embeddings, features)
result = init_fn(rng, shape, jnp.float32)
self.assertEqual(result.shape, shape)
def test_truncated_normal_has_deterministic_outputs(self):
"""Tests that truncated normal returns the same outputs with fixed RNG."""
init_fn = initializers.truncated_normal(stddev=1.0, dtype=jnp.float32)
rng = jax.random.PRNGKey(0)
num_embeddings = 10
features = 5
shape = (num_embeddings, features)
result = init_fn(rng, shape, jnp.float32)
expected = [
[
-1.85987040e-01, -1.13764632e+00, -8.63419712e-01, 1.38558254e-01,
1.73241040e-03
],
[
-7.08928108e-01, -7.57956028e-01, 1.16832398e-01, -1.67809799e-01,
9.89689469e-01
],
[
-2.33572051e-01, -7.33788013e-01, 3.87833655e-01, 5.08138120e-01,
-1.24911046e+00
],
[
-1.79018974e+00, -2.59301901e-01, 1.25438678e+00, 2.95449466e-01,
3.30820709e-01
],
[
-3.51254076e-01, 2.62031645e-01, 1.12873232e+00, -2.84244955e-01,
-1.55112541e+00
],
[
1.40685475e+00, -5.01563549e-01, 1.24033138e-01, -1.18946660e+00,
-1.26286268e+00
],
[
5.54490983e-01, 4.36401725e-01, 3.97840403e-02, -5.70072941e-02,
2.93129623e-01
],
[
1.57007650e-01, -5.00848331e-02, 1.08628595e+00, 1.52689147e+00,
3.50468487e-01
],
[
-8.84301245e-01, -1.06949806e-01, 6.85548604e-01, 8.57080519e-01,
-9.17811871e-01
],
[
-3.04965496e-01, -1.29926765e+00, -9.85570103e-02, -8.27740490e-01,
-3.74757677e-01
],
]
np.testing.assert_array_almost_equal(result, expected)
def test_truncated_normal_returns_correct_mean(self):
"""Tests that truncated normal returns the requested mean."""
mean = 2.0
stddev = 1.0
init_fn = initializers.truncated_normal(
mean=mean, stddev=stddev, dtype=jnp.float32)
rng = jax.random.PRNGKey(0)
num_embeddings = 100
features = 100
shape = (num_embeddings, features)
result = init_fn(rng, shape, jnp.float32)
np.testing.assert_allclose(np.mean(result), mean, atol=0.01)
def test_truncated_normal_returns_correct_stddev(self):
"""Tests that truncated normal returns the requested stddev."""
mean = 0.0
stddev = 0.05
init_fn = initializers.truncated_normal(
mean=mean, stddev=stddev, dtype=jnp.float32)
rng = jax.random.PRNGKey(0)
num_embeddings = 100
features = 100
shape = (num_embeddings, features)
result = init_fn(rng, shape, jnp.float32)
np.testing.assert_allclose(np.std(result), stddev, atol=0.01)
if __name__ == '__main__':
absltest.main()
| 6,579 | 32.74359 | 79 | py |
flaxformer | flaxformer-main/flaxformer/components/convolution.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolution functions and classes."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic,g-multiple-import
from typing import Any, Optional, Sequence
from flax import linen as nn
from flax.linen import partitioning as flax_partitioning
from flax.training import common_utils
from jax import lax
import jax.numpy as jnp
from flaxformer.types import Array
def constant_init(value):
def _my_fn(key, shape, dtype=jnp.float32):
del key
return jnp.full(shape, value, dtype)
return _my_fn
def roll_with_zeros(x, shift):
"""Version of jax.numpy.roll that zeros out wraparound values.
Args:
x: input tensor
shift: a list with length equal to the rank of x.
Returns:
A tensor with the same shape as x.
"""
if len(shift) != len(x.shape):
raise ValueError('shift must have same length as x.shape got %s %s' %
(x, shift))
start_indices = []
limit_indices = []
padding = []
for dimsize, s in zip(x.shape, shift):
start_indices.append(max(0, -s))
limit_indices.append(min(dimsize, dimsize - s))
padding.append((max(0, s), max(0, -s)))
return jnp.pad(lax.slice(x, start_indices, limit_indices), padding)
def roll_with_zeros_along_axis(x, distance, axis):
shape = x.shape
rank = len(shape)
if axis < 0:
axis += rank
shift = [0] * rank
shift[axis] = distance
return roll_with_zeros(x, shift)
class Depthwise1dConv(nn.Module):
"""One-dimensional depthwise convolution.
If autoregressive=True, then position `i` receives information from positions
in the interval `[i-radius, i]`.
If autoregressive=False, then position `i` receives information from positions
in the interval `[i-radius, i+radius]`.
Attributes:
radius: Maximum distance to move information.
autoregressive: Whether to only look left.
dtype: the dtype of the computation (default: float32).
"""
axis_names: Sequence[str]
radius: int = 2
autoregressive: bool = True
dtype: Any = jnp.float32
length_dim: int = 1
num_feature_dims: int = 1
use_in_mlp_parallel_fused_layer: bool = False
@nn.compact
def __call__(self,
x: Array,
decode: bool = False,
prefill: bool = False,
prefill_lengths: Optional[Array] = None) -> Array:
"""Apply depthwise convolution to the input.
Args:
x: the inputs
decode: Whether to prepare and use an autoregressive cache.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
features = tuple(x.shape[-self.num_feature_dims:])
kernel_size = 1 + self.radius * (1 if self.autoregressive else 2)
def _make_scale_variable(shift_distance):
init_value = 0.5 if shift_distance == 0 else 0.5 / kernel_size
if shift_distance < 0:
name = 'conv_m%d' % -shift_distance
else:
name = 'conv_%d' % shift_distance
return flax_partitioning.param_with_axes(
name,
constant_init(init_value),
features,
jnp.float32,
axes=tuple(self.axis_names),
)
if prefill and decode:
raise ValueError('prefill and decode cannot both be true at the same'
'time. If you are using a prefix LM with bidirectional '
'attention on the inputs, please make a call with '
'prefill=True that includes an attention mask that '
'covers your inputs first and then make your decoding '
'calls.')
# During fast autoregressive decoding, we process one position at a time,
# and cache the few past activations we need.
if decode or prefill:
if not self.autoregressive:
raise ValueError(
'decode flag should never be set for non-autoregressive conv')
is_initialized = self.has_variable('cache', 'cached_x_0')
x_shape = list(x.shape)
if is_initialized and decode:
# actual incremental decoding
if x.shape[self.length_dim] != 1:
raise ValueError('penultimate dimension (length) must be 1 - got %s' %
(x.shape,))
else:
# Not actually decoding - just setting up loop vars
x_shape = (
x_shape[:self.length_dim] + [1] + x_shape[self.length_dim + 1:])
cached_x = []
for shift_distance in range(0, self.radius):
cached_x.append(
self.variable('cache', 'cached_x_%d' % shift_distance, jnp.zeros,
x_shape, x.dtype))
if is_initialized and decode:
values = [x] + [v.value for v in cached_x]
ret = sum([
v * _make_scale_variable(shift_distance)
for shift_distance, v in enumerate(values)
])
for shift_distance in range(0, self.radius):
cached_x[shift_distance].value = values[shift_distance]
return ret
elif prefill:
if prefill_lengths is None:
raise NotImplementedError(
'We need prefill lengths when prefill is set')
for shift_distance in range(0, self.radius):
length = x.shape[self.length_dim]
position = prefill_lengths - (1 + shift_distance)
onehot = common_utils.onehot(position, num_classes=length)
if len(x.shape) == 4 or self.use_in_mlp_parallel_fused_layer:
selected = jnp.einsum('...l, ...lmd->...md', onehot, x)
else:
selected = jnp.einsum('...l, ...ld->...d', onehot, x)
selected = jnp.expand_dims(selected, 1)
cached_x[shift_distance].value = selected
ret = x * _make_scale_variable(0)
x_shifted = x
for shift_distance in range(1, self.radius + 1):
# TODO: mask between packed sequences
x_shifted = roll_with_zeros_along_axis(x_shifted, 1, axis=self.length_dim)
ret += x_shifted * _make_scale_variable(shift_distance)
if not self.autoregressive:
x_shifted = x
for shift_distance in range(1, self.radius + 1):
# TODO: mask between packed sequences
x_shifted = roll_with_zeros_along_axis(
x_shifted, -1, axis=self.length_dim)
ret += x_shifted * _make_scale_variable(-shift_distance)
return ret
| 7,008 | 34.57868 | 81 | py |
flaxformer | flaxformer-main/flaxformer/components/relative_position_biases_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for relative_position_biases.
"""
from absl.testing import absltest
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer import sharding
from flaxformer import testing_utils
from flaxformer.components import relative_position_biases
expected_files = testing_utils.ExpectedJsonFiles(
'flaxformer/components/testdata')
class RelativePositionBiasesTest(absltest.TestCase):
def setUp(self):
self.num_heads = 3
self.query_len = 5
self.key_len = 7
self.relative_attention = relative_position_biases.RelativePositionBiases(
num_buckets=12,
max_distance=10,
num_heads=3,
dtype=jnp.float32,
)
super().setUp()
def test_relative_attention_renamed_head_axis(self):
"""Tests that the head axis renaming is as expected."""
self.relative_attention = relative_position_biases.RelativePositionBiases(
num_buckets=12,
max_distance=10,
num_heads=3,
dtype=jnp.float32,
head_axis_name='relpos_heads')
variables = self.relative_attention.init(
random.PRNGKey(0), self.query_len, self.key_len)
sharding.check_params_and_axis_names_match(variables)
for axis_names in jax.tree_leaves(sharding.get_axis_names(variables)):
for axis_name in axis_names:
self.assertIn(axis_name, {'relpos_heads', 'relpos_buckets'})
expected_files.check_params_and_axes(variables['params'],
variables['params_axes'],
'relpos_bias_renamed_head_axis.json')
def test_relative_attention_bidirectional_params(self):
"""Tests that bidirectional relative position biases have expected params."""
params = self.relative_attention.init(
random.PRNGKey(0),
self.query_len,
self.key_len,
bidirectional=True,
mutable=['params'])
param_shapes = jax.tree_map(lambda x: x.shape, params)
self.assertEqual(param_shapes, {
'params': {
'rel_embedding': (3, 12),
},
})
def test_regression_relative_attention_bidirectional_values(self):
"""Tests that bidirectional relative position biases match expected values."""
outputs, unused_params = self.relative_attention.init_with_output(
random.PRNGKey(0), self.query_len, self.key_len, bidirectional=True)
self.assertEqual(outputs.shape,
(1, self.num_heads, self.query_len, self.key_len))
self.assertAlmostEqual(outputs[0, 0, 0, 0], -0.10940, places=5)
self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.22087, places=5)
self.assertAlmostEqual(outputs[0, 1, 4, 6], 0.27360, places=5)
self.assertAlmostEqual(outputs[0, 2, 4, 6], -0.31798, places=5)
def test_relative_attention_unidirectional_params(self):
"""Tests that unidirectional relative position biases have expected params."""
params = self.relative_attention.init(
random.PRNGKey(0),
self.query_len,
self.key_len,
bidirectional=False,
mutable=['params'])
param_shapes = jax.tree_map(lambda x: x.shape, params)
self.assertEqual(param_shapes, {
'params': {
'rel_embedding': (3, 12),
},
})
def test_regression_relative_attention_unidirectional_values(self):
"""Tests that unidirectional relative position biases match expected values.
"""
outputs, unused_params = self.relative_attention.init_with_output(
random.PRNGKey(0), self.query_len, self.key_len, bidirectional=False)
self.assertEqual(outputs.shape,
(1, self.num_heads, self.query_len, self.key_len))
self.assertAlmostEqual(outputs[0, 0, 0, 0], -0.10940, places=5)
self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.22087, places=5)
self.assertAlmostEqual(outputs[0, 1, 4, 6], -0.18996, places=5)
self.assertAlmostEqual(outputs[0, 2, 4, 6], 0.3660492, places=5)
def test_relative_attention_decode_cache_error_with_init(self):
"""Tests that relative embedding init fails with decode == True."""
with self.assertRaisesRegex(
ValueError,
'decode-mode cannot be enabled during init. use model.apply to '
'initialize the decoding cache.'):
self.relative_attention.init(
jax.random.PRNGKey(0),
self.query_len,
self.key_len,
bidirectional=False,
decode=True)
def test_relative_attention_decode_cache_errror_with_bidirectional(self):
"""Tests that bidirectional relative embeddings fails when decoding."""
params = self.relative_attention.init(
jax.random.PRNGKey(0),
self.query_len,
self.key_len,
bidirectional=False,
decode=False)
with self.assertRaisesRegex(
ValueError,
'bidirectional RelativePositionBiases are not supported when decode=True.'
):
self.relative_attention.apply(
params,
self.query_len,
self.key_len,
bidirectional=True,
decode=True,
mutable=['cache'])
def test_relative_attention_decode_cache(self):
"""Tests that relative embeddings are correctly cached when decode=True."""
params = self.relative_attention.init(
jax.random.PRNGKey(0),
self.query_len,
self.key_len,
bidirectional=False,
decode=False)
# during init, cache is not actually initialized.
self.assertNotIn('cache', params)
outputs, state = self.relative_attention.apply(
params,
self.query_len,
self.key_len,
bidirectional=False,
decode=True,
mutable=['cache'])
self.assertEqual(outputs.shape,
(1, self.num_heads, self.query_len, self.key_len))
self.assertIn('cached_bias', state['cache'])
cached_bias = state['cache']['cached_bias']
self.assertAlmostEqual(outputs[0, 0, 0, 0], -0.10940, places=5)
self.assertAlmostEqual(outputs[0, 1, 2, 1], -0.22087, places=5)
self.assertAlmostEqual(outputs[0, 1, 4, 6], -0.18996, places=5)
self.assertAlmostEqual(outputs[0, 2, 4, 6], 0.3660492, places=5)
np.testing.assert_array_equal(outputs, state['cache']['cached_bias'])
params_with_cache = {
**params,
**state,
}
outputs, state = self.relative_attention.apply(
params_with_cache,
self.query_len,
self.key_len,
bidirectional=False,
decode=True,
mutable=['cache'])
np.testing.assert_array_equal(cached_bias, state['cache']['cached_bias'])
if __name__ == '__main__':
absltest.main()
| 7,197 | 33.605769 | 82 | py |
flaxformer | flaxformer-main/flaxformer/components/relative_position_biases.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for relative position biases.
T5 uses a form of relative attention which biases the attention matrix, so each
head effectively attends to things at different scales, irrespective of the
contents of keys and queries.
In the future, this class may be unified with classes which take into account
key and query contents, like the original relative position embeddings of Shaw
et al. and new proposals. However, this will rely on XLA to recover efficiency
for this class (especially when, as in the original T5, the same bias matrix
is shared for all layers).
"""
import abc
from typing import Any, Callable, Sequence
from flax import linen as nn
from flax.linen import partitioning
from jax import lax
import jax.numpy as jnp
import numpy as np
from flaxformer.types import Array
class RelativeAttentionAPI(metaclass=abc.ABCMeta):
"""Interface for relative attention APIs."""
@abc.abstractmethod
def __call__(self, qlen: int, klen: int, bidirectional: bool, decode: bool):
"""Produces relative position embedding attention biases.
This method should return position biases of shape `(1, num_heads, q_len,
k_len)`.
Args:
qlen: Attention query length.
klen: Attention key length.
bidirectional: Whether to allow positive memory-query relative position
embeddings.
decode: Whether to cache relative position bias during autoregressive
decoding.
"""
raise NotImplementedError()
class RelativePositionBiases(nn.Module, RelativeAttentionAPI):
"""Adds T5-style relative positional embeddings to the attention logits.
Attributes:
num_buckets: Number of buckets to bucket distances between key and query
positions into.
max_distance: Maximum distance before everything is lumped into the last
distance bucket.
num_heads: Number of heads in the attention layer. Each head will get a
different relative position weighting.
dtype: Type of arrays through this module.
embedding_init: initializer for relative embedding table.
head_axis_name: Axis to partition the relpos bias heads on. Setting this
field trades training performance for unbounded parallelism in mixed
models.
on_device_computation: Whether to compute "relative_position" on devices.
When turned off, all computation will be done with numpy and gets folded
into program constants. When turned on, computation will happen on
devices in runtime. The option is generally useful when exporting large
models with relatively giant (qlen,klen) pairs, so that the giant
constants will not be embedded into the program.
"""
num_buckets: int
max_distance: int
num_heads: int
dtype: Any
embedding_init: Callable[..., Array] = nn.linear.default_embed_init
head_axis_name: str = 'heads'
on_device_computation: bool = False
@staticmethod
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128,
computation_module=np):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative
positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been
trained on.
Args:
relative_position: an int32 array
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
computation_module: The module, i.e., numpy or jax.numpy to use when
conducting computation. Please refer to "on_device_computation" for more
information.
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).astype(computation_module.int32) * num_buckets
n = computation_module.abs(n)
else:
n = computation_module.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = (n < max_exact)
val_if_large = max_exact + (
computation_module.log(
n.astype(computation_module.float32) / max_exact +
computation_module.finfo(computation_module.float32).eps) /
computation_module.log(max_distance / max_exact) *
(num_buckets - max_exact)).astype(computation_module.int32)
val_if_large = computation_module.minimum(val_if_large, num_buckets - 1)
ret += computation_module.where(is_small, n, val_if_large)
return ret
@nn.compact
def __call__(self, qlen, klen, bidirectional=True, decode=False):
"""Produce relative position embedding attention biases.
Args:
qlen: attention query length.
klen: attention key length.
bidirectional: whether to allow positive memory-query relative position
embeddings.
decode: whether to cache relative position bias during autoregressive
decoding.
Returns:
output: `(1, num_heads, q_len, k_len)` attention bias
"""
# bidirectional embeddings don't make sense when decoding (and break cache).
if decode and bidirectional:
raise ValueError(
'bidirectional RelativePositionBiases are not supported when decode=True.'
)
# We only cache the bias if the model was already initialized, i.e. if this
# module is called with model.apply and decode = True. We raise an error if
# called with model.init and decode = True, since this can cache incorrect
# positional embeddings produced by random parameters.
is_initialized = self.has_variable('params', 'rel_embedding')
if decode and not is_initialized:
raise ValueError(
'decode-mode cannot be enabled during init. use model.apply to '
'initialize the decoding cache.')
# Return pre-computed relative position bias in cache during decode steps.
if decode and self.has_variable('cache', 'cached_bias'):
cached_bias = self.get_variable('cache', 'cached_bias')
expected_bias_shape = (1, self.num_heads, qlen, klen)
if cached_bias.shape != expected_bias_shape:
raise ValueError(f'The cached relative position attention bias was '
f'expected to have shape {expected_bias_shape} but '
f'instead has the shape {cached_bias.shape}.')
return cached_bias
computation_module = jnp if self.on_device_computation else np
context_position = computation_module.arange(qlen, dtype=jnp.int32)[:, None]
memory_position = computation_module.arange(klen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
computation_module=computation_module)
relative_attention_bias = partitioning.param_with_axes(
'rel_embedding',
self.embedding_init, (self.num_heads, self.num_buckets),
jnp.float32,
axes=(self.head_axis_name, 'relpos_buckets'))
relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)
# Instead of using a slow gather, we create a leading-dimension one-hot
# array from rp_bucket and use it to perform the gather-equivalent via a
# contraction, i.e.:
# (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).
# This is equivalent to relative_attention_bias[:, rp_bucket]
bcast_iota = lax.broadcasted_iota(jnp.int32, (self.num_buckets, 1, 1), 0)
rp_bucket_one_hot = jnp.array(
rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)
# --> shape (qlen, klen, num_heads)
values = lax.dot_general(
relative_attention_bias,
rp_bucket_one_hot,
(
((1,), (0,)), # rhs, lhs contracting dims
((), ()))) # no batched dims
# Add a singleton batch dimension.
# --> shape (1, num_heads, qlen, klen)
out = values[jnp.newaxis, ...]
# Store computed relative position bias in cache after first calculation.
if decode:
_ = self.variable('cache', 'cached_bias', lambda: out)
return out
| 9,449 | 40.814159 | 84 | py |
flaxformer | flaxformer-main/flaxformer/components/layer_norm.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5 layer norm, which omits subtraction of mean or bias."""
from typing import Optional
from flax import linen as nn
from flax.linen import partitioning
from jax import lax
from jax import numpy as jnp
from flaxformer.architectures.common import param_remapping
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
class T5LayerNorm(nn.Module, param_remapping.ParameterRemappable):
"""T5 Layer normalization.
Operates on the last axis of the input data.
Attributes:
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
scale_init: Initializer for scale, by default, one.
use_scale: boolean - whether to scale by a learned per-channel value
conv: optional convolution to happen after layer norm
center_scale_at_zero: boolean - If True, the scale weights will be
initialized to 0.0 and a constant 1.0 will be added before they are
applied. Preferable if you use weight decay in the optimizer.
scale_axis_name: Axis name of the scale variable.
"""
epsilon: float = 1e-6
dtype: DType = jnp.float32
scale_init: Initializer = nn.initializers.ones
use_scale: bool = True
conv: Optional[nn.Module] = None
center_scale_at_zero: bool = False
scale_axis_name: str = 'embed'
@nn.compact
def __call__(self,
x: Array,
decode: bool = False,
prefill: bool = False,
prefill_lengths: Optional[Array] = None) -> Array:
"""Applies layer normalization on the input.
Args:
x: the inputs
decode: Passed through to optional convolution. Whether to prepare and use
an autoregressive cache.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache.
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * lax.rsqrt(mean2 + self.epsilon), self.dtype)
if self.conv is not None:
y = self.conv( # pylint: disable=not-callable
y,
decode=decode,
prefill=prefill,
prefill_lengths=prefill_lengths)
if not self.use_scale:
return y
if self.center_scale_at_zero:
# use zero initialization
scale = partitioning.param_with_axes(
'scale',
nn.initializers.zeros, (features,),
jnp.float32,
axes=(self.scale_axis_name,))
scale += 1.0
scale = jnp.asarray(scale, self.dtype)
return y * scale
else:
scale = partitioning.param_with_axes(
'scale',
self.scale_init, (features,),
jnp.float32,
axes=(self.scale_axis_name,))
scale = jnp.asarray(scale, self.dtype)
return y * scale
| 3,570 | 33.669903 | 80 | py |
flaxformer | flaxformer-main/flaxformer/components/transforms.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for jax transforms used by flaxformer."""
import dataclasses
import functools
import inspect
import flax
from flax import linen as nn
from flax.core.lift import Out as ScanOut # pylint: disable=unused-import
from flax.linen import partitioning
import jax
from jax.experimental.pjit import with_sharding_constraint as jax_pjit_wsc
# TODO: this file contains JAX transform workarounds to fix/move
# upstream, primarily concerning the JAX checkpoint/remat transform and
# workarounds for issues with XLA SPMD and JAX scan transform.
# Workaround a scan(remat(...)) abstraction bug by manually implementing a
# static_argnums behavior for flax remat via closure before applying jax remat.
ScanIn = partitioning.ScanIn # used in t5_architecture.py
def core_remat_static(fn,
variables=True,
rngs=True,
prevent_cse=True,
static_argnums=(),
policy=None):
"""Flax functional core remat version with static_argnums."""
static_argnums = tuple(sorted(static_argnums))
def _repack_remat_args(dyn_args, static_args):
"""Remake arg list from static and dynamic args given static_argnums."""
args = []
s_cnt, d_cnt = 0, 0
for i in range(len(dyn_args) + len(static_args)):
if i in static_argnums:
args.append(static_args[s_cnt])
s_cnt += 1
else:
args.append(dyn_args[d_cnt])
d_cnt += 1
return tuple(args)
def inner(scope_fn, repack_fn, variable_groups, rng_groups, *args):
static_args = tuple(x for i, x in enumerate(args) if i in static_argnums)
dyn_args = tuple(x for i, x in enumerate(args) if i not in static_argnums)
@functools.partial(jax.remat, prevent_cse=prevent_cse, policy=policy)
@functools.wraps(fn)
def rematted(variable_groups, rng_groups, *dyn_args):
args = _repack_remat_args(dyn_args, static_args)
scope = scope_fn(variable_groups, rng_groups)
y = fn(scope, *args)
return y, repack_fn(scope)
return rematted(variable_groups, rng_groups, *dyn_args)
return flax.core.lift.pack(
inner, (variables,), (variables,), (rngs,), name='remat')
def remat(target,
variables=True,
rngs=True,
prevent_cse=True,
static_argnums=(),
policy=None,
methods=None):
"""Flax lifted remat that supports static_argnums."""
return flax.linen.transforms.lift_transform(
core_remat_static,
target,
variables=variables,
rngs=rngs,
prevent_cse=prevent_cse,
static_argnums=static_argnums,
policy=policy,
methods=methods)
# Allow use of scan/remat on factory functions that return module instances.
# Flaxformer uses keyword-only arguments in its methods, which
# aren't natively supported by most JAX transforms. We use canonicalizing
# method wrappers to present to jax a pure-positional version of the function.
def canonicalize_arguments(orig_fn):
"""Convert function to use positional arguments only.
Args:
orig_fn: callable with signature taking positional and keyword arguments,
but not variadic *args or **kwargs.
Returns:
A version of orig_fn taking only positional arguments, and
a conversion function that takes the original signature, binds
it to provided mixed arguments, applies defaults, and returns
a tuple of positional arguments to use with the transformed
function.
"""
sig = inspect.signature(orig_fn)
params = sig.parameters
def dekwarged_fn(*args):
new_args = []
new_kwargs = {}
if len(args) != len(params):
raise ValueError(f'Incorrect number of arguments: '
f'got {len(args)}, expected {len(params)}.')
for i, p in enumerate(params):
param = params[p]
if param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD):
new_args.append(args[i])
elif param.kind is param.KEYWORD_ONLY:
new_kwargs[p] = args[i]
elif param.kind is param.VAR_POSITIONAL:
new_args.extend(args[i])
elif param.kind is param.VAR_KEYWORD:
new_kwargs.update(args[i])
else:
raise ValueError('Unknown signature parameter type.')
return orig_fn(*new_args, **new_kwargs)
# We don't use functools.wraps because we are changing the signature, but
# we want function properties preserved.
dekwarged_fn.__dict__.update(orig_fn.__dict__)
def convert_to_args(*args, **kwargs):
bound = sig.bind(*args, **kwargs)
bound.apply_defaults()
return tuple(bound.arguments.values())
return dekwarged_fn, convert_to_args
def canonicalized_class_transform(trafo, clz, *t_args, **t_kwargs):
"""Applies kwarg canonicalization with flax transform to module class clz.
NB: This function only handles transforming the __call__ method.
Args:
trafo: flax lifted transform (e.g. nn.scan, nn.remat)
clz: nn.Module class to transform
*t_args: transform arguments
**t_kwargs: transform keyword-arguments
Returns:
A transformed version of clz whose __call__ function has been transformed,
additionally handling canonicalization of __call__'s signature to a purely
positional function before applying the transform.
"""
# Transform postitional only __call__ form of clz.
dekwarged_fn, convert_to_args = canonicalize_arguments(clz.__call__)
trafo_fn = trafo(dekwarged_fn, *t_args, **t_kwargs)
@functools.wraps(clz.__call__)
def post_fn(self, *args, **kwargs):
return trafo_fn(*convert_to_args(self, *args, **kwargs))
return type(trafo.__name__.capitalize() + clz.__name__, (clz,),
{'__call__': post_fn})
# Flaxformer uses factory functions instead of partial constructors, we need
# to add a explicit handler for dealing with this case as our usual lifting
# API has no way to distinguish a factory function from a class method.
def apply_transform_to_module_factory(trafo, factory, *args, **kwargs):
"""Fix to apply flax transforms to a module factories via re-instantiation."""
def new_factory():
# Create the Module instance from the factory in a disconnected dynamic
# context solely to collect the construction arguments and class.
nn.module._context.module_stack.append(None) # pylint: disable=protected-access
try:
inst = factory()
ctor_args = {
f.name: object.__getattribute__(inst, f.name)
for f in dataclasses.fields(inst)
if f.name not in ('parent', 'name')
}
finally:
nn.module._context.module_stack.pop() # pylint: disable=protected-access
# Instantiate the transformed module class with gathered construction args
# in the current dynamic context.
return canonicalized_class_transform(trafo, inst.__class__, *args,
**kwargs)(**ctor_args)
return new_factory
factory_remat = functools.partial(apply_transform_to_module_factory,
partitioning.remat)
factory_scan = functools.partial(apply_transform_to_module_factory,
partitioning.scan_with_axes)
factory_vmap = functools.partial(apply_transform_to_module_factory, nn.vmap)
# Scan inner-function SPMD re-annotation.
# The XLA SPMD subsystem currently "loses" annotations on parameter trees that
# pass through an XLA while loop. We should investigate fixing this at the XLA
# level, but the workaround for now is to re-apply the known sharding
# information for the scanned layer -inside- the functionalized scan body
# function using pjit's with_sharding_constraint.
def global_mesh_defined():
"""Checks if global xmap/pjit mesh resource environment is defined."""
maps_env = jax.experimental.maps.thread_resources.env
return maps_env.physical_mesh.devices.shape != () # pylint: disable=g-explicit-bool-comparison
def with_sharding_constraint(x, axis_resources):
"""Wrapper for pjit with_sharding_constraint, no-op on cpu or outside pjit."""
if jax.devices()[0].platform == 'cpu' or not global_mesh_defined():
return x
else:
return jax_pjit_wsc(x, axis_resources)
def inner_scan_spmd(annotation_tree, scan_axis):
"""Workaround to apply a sharding annotation pytree inside a scan body fn.
This creates a function to be passed to nn.scan's "data_transform" kwarg.
Args:
annotation_tree: pytree of PartitionSpecs
scan_axis: The axis along which layer parameters were scanned.
Returns:
A function to be used with nn.scan's data_transform kwarg to apply the
SPMD annotations on the inner scan body function.
"""
if annotation_tree is None:
return None
# The annotation tree fed through the model is the scan-expanded one,
# we need to remove the scan axis from these PartitionSpecs.
def del_axis(x):
tmp = list(x)
tmp.pop(scan_axis)
return type(x)(*tmp)
annotation_tree = jax.tree_map(del_axis, annotation_tree)
def annotate_fn(variable_groups, rng_groups):
broadcast_vars, carry_vars, *scan_variable_groups = variable_groups
def maybe_annotate_group(x):
if tuple(x[0].keys()) == ('params',):
return ({
'params': with_sharding_constraint(x[0]['params'], annotation_tree)
},)
else:
return x
scan_variable_groups = tuple(
map(maybe_annotate_group, scan_variable_groups))
variable_groups = (broadcast_vars, carry_vars) + scan_variable_groups
return variable_groups, rng_groups
return annotate_fn
| 10,102 | 35.082143 | 97 | py |
flaxformer | flaxformer-main/flaxformer/components/attention/memory_efficient_attention_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention classes."""
import dataclasses
import functools
from typing import Any, Callable, Optional
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from flax.core import freeze
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.components.attention import memory_efficient_attention
from flaxformer.types import Array
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class MultiQueryDotProductAttention(
dense_attention.MultiQueryDotProductAttention
):
"""Memory-efficient multi-query dot-product attention."""
attention_fn: Callable[[Array, Array, Array], Array] = (
memory_efficient_attention.dot_product_attention_multiquery
)
@dataclasses.dataclass(frozen=True)
class SelfAttentionArgs:
num_heads: int = 1
batch_size: int = 2
qkv_features: int = 8
out_features: int = 4
q_len: int = 5
features: int = 6
broadcast_dropout: bool = True
dropout_rate: float = 0.1
enable_dropout: bool = True
use_bias: bool = True
rescale_logits: bool = True
decode: bool = False
float32_logits: bool = False
use_rotary_embedding: bool = False
def __post_init__(self):
# If we are doing decoding, the query length should be 1, because are doing
# autoregressive decoding where we feed one position at a time.
assert not self.decode or self.q_len == 1
def init_args(self):
return dict(
num_heads=self.num_heads,
qkv_features=self.qkv_features,
out_features=self.out_features,
broadcast_dropout=self.broadcast_dropout,
dropout_rate=self.dropout_rate,
use_bias=self.use_bias,
rescale_logits=self.rescale_logits,
float32_logits=self.float32_logits,
use_rotary_embedding=self.use_rotary_embedding,
)
def apply_args(self, dtype=jnp.float32):
inputs_q = jnp.ones(
(self.batch_size, self.q_len, self.features), dtype=dtype
)
mask = jnp.ones(
(self.batch_size, self.num_heads, self.q_len, self.q_len), dtype=dtype
)
bias = jnp.ones(
(self.batch_size, self.num_heads, self.q_len, self.q_len), dtype=dtype
)
return {
'inputs_q': inputs_q,
'mask': mask,
'bias': bias,
'enable_dropout': self.enable_dropout,
}
class AttentionTest(parameterized.TestCase):
def test_memory_efficient_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5, 6
query = jnp.ones((batch_size, q_len, num_heads, qk_depth))
key = jnp.ones((batch_size, kv_len, qk_depth))
value = jnp.ones((batch_size, kv_len, v_depth))
bias = jnp.ones((batch_size, num_heads, q_len, kv_len))
args = dict(
query=query,
key=key,
value=value,
bias=bias,
rescale_logits=True,
dropout_rng=dropout_rng,
dropout_rate=0.5,
enable_dropout=True,
)
output = memory_efficient_attention.dot_product_attention_multiquery(
**args, broadcast_dropout=True
)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
# Make sure we also reach the code path where we don't broadcast dropout.
output = memory_efficient_attention.dot_product_attention_multiquery(
**args, broadcast_dropout=False
)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multiquery_dot_product_attention(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the query kernel has to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, d)
value_kernel = np.random.randn(f, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel.reshape(f, -1)},
'key': {'kernel': key_kernel},
'value': {'kernel': value_kernel},
'out': {'kernel': out_kernel.reshape(-1, f)},
}
y = MultiQueryDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv
)
query = np.einsum('bqf,fhd->bqhd', inputs_q, query_kernel)
key = np.einsum('bkf,fd->bkd', inputs_kv, key_kernel)
value = np.einsum('bkf,fd->bkd', inputs_kv, value_kernel)
logits = np.einsum('bqhd,bkd->bhqk', query, key)
weights = nn.softmax(logits, axis=-1)
combined_value = np.einsum('bhqk,bkd->bqhd', weights, value)
y_expected = np.einsum('bqhd,hdf->bqf', combined_value, out_kernel)
np.testing.assert_allclose(y, y_expected, atol=2e-4, rtol=1e-4)
def test_multiquery_dot_product_attention_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
base_args = SelfAttentionArgs(
num_heads=h, qkv_features=f, out_features=f, dropout_rate=0
)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, d, k)),
'cached_value': np.zeros((b, d, k)),
'cache_index': np.array(0),
}
inputs_q = np.random.randn(b, 1, f)
inputs_kv = np.random.randn(b, 1, f)
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
# For q, replace the projection with simple reshaping.
if x is inputs_q:
return x.reshape(b, -1, h, d)
# For k and v, the feature dim is sliced to mimic down-projection.
elif x is inputs_kv:
return x[:, :, :d]
with mock.patch.object(
dense.DenseGeneral, '__call__', new=mock_dense_general
):
_, mutated = MultiQueryDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=True,
mutable=['cache'],
)
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, d, k]
cache['cached_key'][:, :, 0] = key[:, 0, :]
cache['cached_value'][:, :, 0] = value[:, 0, :]
cache['cache_index'] = np.array(1)
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
@parameterized.parameters(
{'bias_gen': lambda *args: np.zeros(args)}, {'bias_gen': np.random.randn}
)
def test_dot_product_attention_multiquery(self, bias_gen: ...):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, d)
value = np.random.randn(b, k, d)
bias = bias_gen(b, h, q, k)
attn_out = memory_efficient_attention.dot_product_attention_multiquery(
query, key, value, bias=bias
)
logits = np.einsum('bqhd,bkd->bhqk', query, key)
weights = jax.nn.softmax(logits + bias, axis=-1)
expected_attn_out = np.einsum('bhqk,bkd->bqhd', weights, value)
np.testing.assert_allclose(attn_out, expected_attn_out, atol=1e-6)
@parameterized.parameters(
{}, # defaults
{'use_extra_logit': True},
{
'key_chunk_size': 1,
},
{
'query_chunk_size': 1,
},
{'key_chunk_size': 2, 'k': 12},
{'key_chunk_size': 2, 'k': 12, 'use_extra_logit': True},
{'query_chunk_size': 2, 'q': 6},
{'init_fn': lambda *args: np.zeros(args)},
{'causal_mask': True, 'b': 1, 'k': 2, 'q': 2, 'h': 1, 'd': 2},
{'causal_mask': True, 'k': 8, 'q': 8},
# Trigger the code path where some chunks are skipped.
{
'causal_mask': True,
'b': 1,
'k': 8,
'q': 8,
'h': 1,
'd': 7,
'key_chunk_size': 2,
'query_chunk_size': 2,
},
{'use_bias': False},
{'b': 1, 'k': 8192, 'q': 2048, 'h': 1, 'd': 128, 'use_extra_logit': True},
)
def test_memory_efficient_same_as_default(
self,
use_extra_logit: bool = False,
b: int = 2,
q: int = 3,
h: int = 5,
d: int = 7,
k: int = 11,
key_chunk_size: Optional[int] = None,
query_chunk_size: Optional[int] = None,
init_fn: Callable[[...], Any] = np.random.randn,
use_bias: bool = True,
causal_mask: bool = False,
):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
np.random.seed(0)
query = init_fn(b, q, h, d)
key = init_fn(b, k, d)
value = init_fn(b, k, d)
bias = None
if use_bias:
bias = init_fn(b, h, q, k)
attn_bias = bias
if causal_mask:
attn_bias += memory_efficient_attention._causal_bias(q, k, 0)
attn = dense_attention.dot_product_attention_multiquery(
query,
key,
value,
bias=attn_bias,
use_extra_logit=use_extra_logit,
)
m_attn_fn = memory_efficient_attention.dot_product_attention_multiquery
if key_chunk_size is not None:
m_attn_fn = functools.partial(m_attn_fn, key_chunk_size=key_chunk_size)
if query_chunk_size is not None:
m_attn_fn = functools.partial(
m_attn_fn, query_chunk_size=query_chunk_size
)
m_attn = m_attn_fn(
query,
key,
value,
# We use the bias version WITHOUT the causal mask, to test the
# causal_mask flag.
bias=bias,
use_extra_logit=use_extra_logit,
causal_mask=causal_mask,
)
np.testing.assert_allclose(attn, m_attn, atol=1e-5, rtol=1e-2)
@parameterized.parameters(
{'dropout_rate': 0.00001},
{'dropout_rate': 0.5},
{'dropout_rate': 1.0},
{'key_chunk_size': 2, 'k': 4, 'dropout_rate': 0.5},
{'query_chunk_size': 2, 'q': 4, 'dropout_rate': 0.5},
)
def test_dropout(
self,
dropout_rate: float,
b=2,
q=3,
h=5,
d=3,
k=5,
key_chunk_size=None,
query_chunk_size=None,
causal_mask: bool = False,
use_bias: bool = True,
):
# smoketest only
dropout_rng = jax.random.PRNGKey(0)
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, d)
value = np.random.randn(b, k, d)
bias = None
if use_bias:
bias = np.random.randn(b, h, q, k)
m_attn_fn = memory_efficient_attention.dot_product_attention_multiquery
if key_chunk_size is not None:
m_attn_fn = functools.partial(m_attn_fn, key_chunk_size=key_chunk_size)
if query_chunk_size is not None:
m_attn_fn = functools.partial(
m_attn_fn, query_chunk_size=query_chunk_size
)
m_attn_fn = functools.partial(
m_attn_fn,
query,
key,
value,
bias=bias,
causal_mask=causal_mask,
dropout_rate=dropout_rate,
)
m_attn = m_attn_fn(dropout_rng=dropout_rng)
if dropout_rate > 0.1 and dropout_rate < 0.9:
alt_dropout_rng = jax.random.PRNGKey(1)
alt_m_attn = m_attn_fn(dropout_rng=alt_dropout_rng)
if np.allclose(m_attn, alt_m_attn, atol=1e-6):
self.fail(
f'm_attn and alt_m_attn should differ:\n{m_attn=}\n{alt_m_attn=}'
)
class SelfAttention(dense_attention.MultiHeadDotProductAttention):
"""Self-attention special case of multi-head dot-product attention."""
attention_fn: Callable[[Array, Array, Array], Array] = (
memory_efficient_attention.dot_product_attention_multihead
)
@nn.compact
def __call__(
self,
inputs_q: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
enable_dropout: bool = True,
):
return super().__call__(
inputs_q, inputs_q, mask, bias, enable_dropout=enable_dropout
)
class MHAttentionTest(parameterized.TestCase):
def test_dot_product_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5, 6
query = jnp.ones((batch_size, q_len, num_heads, qk_depth))
key = jnp.ones((batch_size, kv_len, num_heads, qk_depth))
value = jnp.ones((batch_size, kv_len, num_heads, v_depth))
bias = jnp.ones((batch_size, num_heads, q_len, kv_len))
args = dict(
query=query,
key=key,
value=value,
bias=bias,
rescale_logits=True,
dropout_rng=dropout_rng,
dropout_rate=0.5,
enable_dropout=True,
)
output = memory_efficient_attention.dot_product_attention_multihead(
**args, broadcast_dropout=True
)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
# Make sure we also reach the code path where we don't broadcast dropout.
output = memory_efficient_attention.dot_product_attention_multihead(
**args, broadcast_dropout=False
)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
def test_dot_product_attention_no_batch_dim(self):
num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5
query = jnp.ones((q_len, num_heads, qk_depth))
key = jnp.ones((kv_len, num_heads, qk_depth))
value = jnp.ones((kv_len, num_heads, v_depth))
output = memory_efficient_attention.dot_product_attention_multihead(
query, key, value
)
self.assertEqual(output.shape, (q_len, num_heads, v_depth))
def test_self_attention(self):
# We only test MultiHeadDotProductAttention through SelfAttention because
# we are only shape checking anyway.
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs()
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_cast_logits_float32(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(float32_logits=True)
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_no_rescale_logits(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(rescale_logits=False)
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_no_out_features(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(out_features=None)
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.features))
def test_self_attention_no_masking(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs()
model = SelfAttention(**args.init_args())
apply_args = args.apply_args()
apply_args['mask'] = None
y, _ = model.init_with_output(rngs, **apply_args)
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_with_decoding(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(decode=True, q_len=1)
model = SelfAttention(**args.init_args())
apply_args = args.apply_args()
apply_args['mask'] = None
apply_args['bias'] = None
params = model.init(rngs, **apply_args)
y, _ = model.apply(
params,
**apply_args,
mutable=['cache'],
rngs={'dropout': random.PRNGKey(2)},
)
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel.reshape(f, -1)},
'key': {'kernel': key_kernel.reshape(f, -1)},
'value': {'kernel': value_kernel.reshape(f, -1)},
'out': {'kernel': out_kernel.reshape(-1, f)},
}
y = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv
)
query = np.einsum('bqf,fhd->bqhd', inputs_q, query_kernel)
key = np.einsum('bkf,fhd->bkhd', inputs_kv, key_kernel)
value = np.einsum('bkf,fhd->bkhd', inputs_kv, value_kernel)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = nn.softmax(logits, axis=-1)
combined_value = np.einsum('bhqk,bkhd->bqhd', weights, value)
y_expected = np.einsum('bqhd,hdf->bqf', combined_value, out_kernel)
np.testing.assert_allclose(y, y_expected, rtol=1e-5, atol=1e-5)
def test_dot_product_attention(self):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, h, d)
value = np.random.randn(b, k, h, d)
bias = np.random.randn(b, h, q, k)
attn_out = memory_efficient_attention.dot_product_attention_multihead(
query, key, value, bias=bias
)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = jax.nn.softmax(logits + bias, axis=-1)
expected = np.einsum('bhqk,bkhd->bqhd', weights, value)
np.testing.assert_allclose(attn_out, expected, atol=1e-6)
def test_with_rope_and_bfloat16(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs()
init_args = args.init_args()
init_args.update({'dtype': 'bfloat16', 'use_rotary_embedding': True})
model = SelfAttention(**init_args)
y, _ = model.init_with_output(rngs, **args.apply_args(dtype='bfloat16'))
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
if __name__ == '__main__':
absltest.main()
| 20,330 | 33.401015 | 81 | py |
flaxformer | flaxformer-main/flaxformer/components/attention/memory_efficient_attention.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of memory-efficient attention.
Original version published here: https://arxiv.org/abs/2112.05682
Also known as Flash Attention: https://arxiv.org/abs/2205.14135
"""
import functools
from typing import Callable, NamedTuple, Optional
import jax
from jax import lax
from jax import numpy as jnp
from jax import random
from flaxformer.types import DType
from flaxformer.types import PRNGKey
Array = jax.Array
def _causal_bias(
q_len: int,
k_len: int,
offset: Optional[int] = None,
mask_to_bias_factor: float = 1e6,
) -> Array:
q_idxs = lax.broadcasted_iota(dtype=jnp.int32, shape=(q_len, 1), dimension=0)
k_idxs = lax.broadcasted_iota(dtype=jnp.int32, shape=(1, k_len), dimension=1)
if offset is not None:
q_idxs += offset
inverted_mask = q_idxs < k_idxs # broadcasts to shape (q_len, k_len)
return inverted_mask * (-1 * mask_to_bias_factor)
def _local_causal_bias(
q_len: int,
k_len: int,
query_offset: int,
key_offset: int,
) -> Array:
offset = query_offset - key_offset
return _causal_bias(q_len, k_len, offset=offset)
class _AttentionSummary(NamedTuple):
"""The summary of the attention over a segment of keys and values."""
# Sum of the values weighted by the exponentiated scores. Array of shape
# `[batch, queries, heads, queries_per_head, value_features]`.
numerator: Array
# Sum of the exponentiated scores per query. Array of shape
# `[batch, queries, heads, queries_per_head]`.
denominator: Array
# Maximum score encountered per query. Array of shape
# `[batch, queries, heads, queries_per_head]`.
max_so_far: Array
def _summarize_chunk(
query: Array,
key: Array,
value: Array,
current_summary: _AttentionSummary,
bias: Optional[Array],
precision=None,
) -> _AttentionSummary:
"""Attention for a segment of queries, keys, and values.
Args:
query: An array of shape `[batch, q_length, heads, queries_per_head,
qk_depth_per_head]`.
key: An array of shape `[batch, kv_length, heads, qk_depth_per_head]`.
value: An array of shape `[batch, kv_length, heads, v_depth_per_head]`.
current_summary: The partially summarized queries so far, before adding the
summarization of this kv chunk.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch, heads, queries_per_head, q_length, kv_length]` This can be
used for incorporating causal masks, padding masks, proximity bias, etc.
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
The summary for this segment, consisting of sum of the sum of the
values weighted by their exponentiated attention scores, the exponentiated
attention scores, and the maximum score of this segment.
"""
batch, q_len, q_heads, queries_per_head, q_feat = query.shape
del q_feat
_, kv_len, _, v_feat = value.shape
(numerator, denominator, max_so_far) = current_summary
attn_weights = jnp.einsum(
'bqhnd,bkhd->bqhnk', query, key, precision=precision
)
if bias is not None:
bias = jnp.moveaxis(bias, -2, 1) # move sequence length outside
attn_weights += bias
previous_max = max_so_far
assert previous_max.shape == (batch, q_len, q_heads, queries_per_head)
chunk_maxima = jnp.max(attn_weights, axis=-1)
assert chunk_maxima.shape == (batch, q_len, q_heads, queries_per_head)
max_so_far = jnp.maximum(max_so_far, chunk_maxima)
max_so_far = jax.lax.stop_gradient(max_so_far)
correction = jnp.exp(previous_max - max_so_far)
assert correction.shape == (batch, q_len, q_heads, queries_per_head)
corrected_weights = jnp.exp(
attn_weights - max_so_far[:, :, :, :, jnp.newaxis]
)
assert corrected_weights.shape == (
batch,
q_len,
q_heads,
queries_per_head,
kv_len,
)
expected_numerator_shape = (batch, q_len, q_heads, queries_per_head, v_feat)
assert numerator.shape == expected_numerator_shape, (
f'numerator.shape is {numerator.shape}, but expected'
f' {expected_numerator_shape}.'
)
numerator = numerator * correction[:, :, :, :, jnp.newaxis]
numerator = numerator + jnp.einsum(
'bqhnv,bvhf->bqhnf', corrected_weights, value, precision=precision
)
assert denominator.shape == (batch, q_len, q_heads, queries_per_head)
denominator = denominator * correction
denominator = denominator + corrected_weights.sum(axis=-1)
return _AttentionSummary(
numerator,
denominator,
max_so_far,
)
def _memory_efficient_attention(
query,
key,
value,
bias_fn: Callable[[int, int], Array],
query_chunk_size: int,
key_chunk_size: int,
precision=None,
dtype=jnp.float32,
use_extra_logit: bool = False,
causal_mask: bool = False,
):
"""Computes dot-product multiquery-attention given query, key, and value."""
batch, num_q, heads, queries_per_head, q_feat = query.shape
batch, num_kv, heads, k_features = key.shape
batch, num_kv, heads, v_features = value.shape
num_q_chunks = num_q // query_chunk_size
num_kv_chunks = num_kv // key_chunk_size
query = query.reshape(
(batch, num_q_chunks, query_chunk_size, heads, queries_per_head, q_feat)
)
key = key.reshape((batch, num_kv_chunks, key_chunk_size, heads, k_features))
value = value.reshape(
(batch, num_kv_chunks, key_chunk_size, heads, v_features)
)
# We move the chunk_idx axis to the front to iterate over it with lax.map.
query = jnp.moveaxis(query, 1, 0)
key = jnp.moveaxis(key, 1, 0)
value = jnp.moveaxis(value, 1, 0)
# The zero_chunk is the output of _summarize_chunk when the inputs are zeros.
# We define the zero_chunk outside of the loops to prevent the compiler from
# re-creating these arrays in every loop iteration.
zero_chunk = _AttentionSummary(
# numerator
jnp.zeros(
(batch, query_chunk_size, heads, queries_per_head, v_features),
dtype=dtype,
),
# denominator
jnp.zeros(
(batch, query_chunk_size, heads, queries_per_head), dtype=dtype
),
# max_so_far
(-jnp.inf)
* jnp.ones(
(batch, query_chunk_size, heads, queries_per_head), dtype=dtype
),
)
def _query_chunk_attention(args):
query_chunk, query_chunk_idx = args
@functools.partial(jax.checkpoint, prevent_cse=False)
def conditional_summarize_fn(carry, args):
key_chunk, value_chunk, key_chunk_idx = args
skip_block = jnp.array(False)
if causal_mask:
skip_block = query_chunk_idx < key_chunk_idx
def cond_fn(query, key, value, carry, key_chunk_idx):
with jax.named_scope('compute_bias'):
chunk_bias = bias_fn(query_chunk_idx, key_chunk_idx)
return (
_summarize_chunk(
query, key, value, carry, chunk_bias, precision=precision
),
None,
)
return jax.lax.cond(
skip_block,
lambda a, b, c, carry, d: (carry, None),
cond_fn,
query_chunk,
key_chunk,
value_chunk,
carry,
key_chunk_idx,
)
(numerator, denominator, max_so_far), _ = jax.lax.scan(
conditional_summarize_fn,
zero_chunk,
xs=(key, value, jnp.arange(0, num_kv_chunks)),
)
if use_extra_logit:
denominator += jnp.exp(-max_so_far)
return numerator / denominator[:, :, :, :, jnp.newaxis]
res = lax.map(_query_chunk_attention, xs=(query, jnp.arange(0, num_q_chunks)))
expected_res_shape = (
num_q_chunks,
batch,
query_chunk_size,
heads,
queries_per_head,
v_features,
)
assert (
res.shape == expected_res_shape
), f'res.shape is {res.shape}, but expected {expected_res_shape}.'
res = jnp.moveaxis(res, 0, 1)
return res.reshape(batch, num_q, heads, queries_per_head, value.shape[-1])
def dot_product_attention_queries_per_head(
query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
causal_mask: bool = False,
query_chunk_size: int = 1024,
key_chunk_size: int = 2048,
) -> Array:
"""Computes dot-product attention given query, key, and value.
This is a variant of attention that generalizes both multi-head and
multi-query attention. It features an extra dimension for the query array,
that specifies the number of queries per head.
This function is improved by the memory-efficient attention algorithm
(https://arxiv.org/abs/2112.05682), which is also called FlashAttention
(https://arxiv.org/abs/2205.14135).
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, queries_per_head, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., kv_length,
num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, queries_per_head, q_length, kv_length]` This
can be used for incorporating causal masks, padding masks, proximity bias,
etc.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, whether to apply dropout
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
causal_mask: Apply a causal mask. This can be used alternatively or in
addition to the given bias.
query_chunk_size: Positive integer to control the size of the query chunks.
key_chunk_size: Positive integer to control the size of the key chunks.
Returns:
Output of shape `[batch..., length, num_heads, queries_per_head,
v_depth_per_head]`.
"""
assert (
key.ndim == value.ndim
), f'k, v must have same rank. key: {key.shape}, value: {value.shape}'
assert (
query.shape[:-4] == key.shape[:-3] == value.shape[:-3]
), f'q, k, v batch dim must match. query: {query.shape}'
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# Ensure that we have exactly one batch dimension
orig_batch_dims = query.shape[:-4]
query = query.reshape(-1, *query.shape[-4:])
key = key.reshape(-1, *key.shape[-3:])
value = value.reshape(-1, *value.shape[-3:])
if bias is not None:
bias = bias.reshape(-1, *bias.shape[-4:])
batch_size, query_length, heads, queries_per_head, _ = query.shape
_, key_length, _, _ = key.shape
# TODO: Consider automatic padding to remove this constraint.
if query_length % query_chunk_size != 0 and query_length > query_chunk_size:
raise ValueError(
'Sequence length of the query vector %d needs to be less '
'than, or a multiple of the query_chunk_size %d.'
% (query_length, query_chunk_size)
)
if key_length % key_chunk_size != 0 and key_length > key_chunk_size:
raise ValueError(
'Sequence length of the key/value vector %d needs to be less '
'than, or a multiple of the key_chunk_size %d.'
% (key_length, key_chunk_size)
)
query_chunk_size = min(query_chunk_size, query_length)
key_chunk_size = min(key_chunk_size, key_length)
if bias is not None:
broadcastable_to = (
batch_size,
heads,
queries_per_head,
query_length,
key_length,
)
# Check that bias is broadcastable as expected:
for bias_dim, broadcast_dim in zip(bias.shape, broadcastable_to):
if bias_dim not in [1, broadcast_dim]:
raise ValueError(
f'Expected bias dimensions {bias.shape} to be broadcastable to'
f' {broadcastable_to}.'
)
if enable_dropout and dropout_rate > 0.0:
# Precompute dropout
drop_shape = [batch_size, heads, queries_per_head, query_length, key_length]
if broadcast_dropout:
# We mimick the semantics of T5 and broadcast along the "length" dim.
drop_shape[-2] = 1 # query_length dim
precomputed_dropout = random.bernoulli(
dropout_rng, dropout_rate, drop_shape
)
def bias_fn(
query_chunk_idx: int,
key_chunk_idx: int,
) -> Array:
query_offset = query_chunk_idx * query_chunk_size
key_offset = key_chunk_idx * key_chunk_size
local_bias = jnp.zeros((1, 1, 1, 1, 1))
if bias is not None:
# If bias is not broadcasted yet, dynamic slice would fail with full slice
# size. In this case we keep the bias unbroadcasted.
slice_q_len = min(bias.shape[-2], query_chunk_size)
slice_k_len = min(bias.shape[-1], key_chunk_size)
local_bias = lax.dynamic_slice(
bias,
# query_offset and key_offset might be > 1 but bias dims might
# not yet be broadcasted. We rely on the protection against
# out-of-bounds array accesses built into dynamic_slice.
start_indices=(0, 0, 0, query_offset, key_offset),
slice_sizes=(*bias.shape[:3], slice_q_len, slice_k_len),
)
if causal_mask:
causal = _local_causal_bias(
query_chunk_size, key_chunk_size, query_offset, key_offset
)
# add batch, head, and queries_per_head dims
local_bias += causal.reshape(1, 1, 1, *causal.shape)
# We implement dropout as part of the bias, which is additive to the
# attention scores. In some other implementations it is treated as a
# multiplicative factor applied to the probabilities after softmax.
if enable_dropout and dropout_rate > 0.0:
with jax.named_scope('dropout'):
# If dropout is not broadcasted yet, we need the collapsed dims.
slice_q_len = min(precomputed_dropout.shape[-2], query_chunk_size)
slice_k_len = min(precomputed_dropout.shape[-1], key_chunk_size)
dropout_slice = lax.dynamic_slice(
precomputed_dropout,
# query_offset and key_offset might be > 1 but dropout dims might
# not yet be broadcasted. We rely on the protection against
# out-of-bounds array accesses built into dynamic_slice.
start_indices=(0, 0, 0, query_offset, key_offset),
slice_sizes=(
*precomputed_dropout.shape[:3],
slice_q_len,
slice_k_len,
),
)
local_bias -= dropout_slice * 1e6
return local_bias
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
if rescale_logits:
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
result = _memory_efficient_attention(
query,
key,
value,
bias_fn,
query_chunk_size=query_chunk_size,
key_chunk_size=key_chunk_size,
precision=precision,
dtype=dtype,
use_extra_logit=use_extra_logit,
causal_mask=causal_mask,
)
result = result.reshape(*orig_batch_dims, *result.shape[1:])
return result
def dot_product_attention_multiquery(
query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
causal_mask: bool = False,
query_chunk_size: int = 1024,
key_chunk_size: int = 2048,
) -> Array:
"""Computes dot-product multiquery-attention given query, key, and value.
This is a variant of the multi-head dot product attention introduced in
https://arxiv.org/abs/1706.03762 and implemented in `dot_product_attention`.
In this function, the key and the value have 1 head whereas query has 1 or
more heads. This variant is called "multi-query" attention.
This implementation is equivalent to
`dense_attention.dot_product_attention_multiquery`, but is improved by the
memory-efficient attention algorithm (https://arxiv.org/abs/2112.05682),
which is also called FlashAttention (https://arxiv.org/abs/2205.14135).
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., kv_length,
v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, whether to apply dropout
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
causal_mask: Apply a causal mask. This can be used alternatively or in
addition to the given bias.
query_chunk_size: Positive integer to control the size of the query chunks.
key_chunk_size: Positive integer to control the size of the key chunks.
Returns:
Output of shape `[batch..., length, num_heads, v_depth_per_head]`.
"""
# num_heads, treat current num_heads as queries_per_head
query = jnp.expand_dims(query, axis=-3)
# add num_heads dim
key = jnp.expand_dims(key, axis=-2)
# add num_heads dim
value = jnp.expand_dims(value, axis=-2)
if bias is not None:
# add num_heads, treat current num_heads dim as queries_per_head
bias = jnp.expand_dims(bias, axis=-4)
result = dot_product_attention_queries_per_head(
query,
key,
value,
bias,
broadcast_dropout=broadcast_dropout,
rescale_logits=rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
enable_dropout=enable_dropout,
dtype=dtype,
precision=precision,
use_extra_logit=use_extra_logit,
float32_logits=float32_logits,
causal_mask=causal_mask,
query_chunk_size=query_chunk_size,
key_chunk_size=key_chunk_size,
)
return jnp.squeeze(result, axis=-3) # remove head dim
def dot_product_attention_multihead(
query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
causal_mask: bool = False,
query_chunk_size: int = 1024,
key_chunk_size: int = 2048,
) -> Array:
"""Computes dot-product multi-head attention given query, key, and value.
This implementation is equivalent to `dense_attention.dot_product_attention`,
but is more memory-efficient.
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., kv_length,
num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, whether to apply dropout
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
causal_mask: Apply a causal mask. This can be used alternatively or in
addition to the given bias.
query_chunk_size: Positive integer to control the size of the query chunks.
key_chunk_size: Positive integer to control the size of the key chunks.
Returns:
Output of shape `[batch..., length, num_heads, v_depth_per_head]`.
"""
# queries_per_head
query = jnp.expand_dims(query, axis=-2)
if bias is not None:
# add num_heads, treat current num_heads dim as queries_per_head
bias = jnp.expand_dims(bias, axis=-3)
result = dot_product_attention_queries_per_head(
query,
key,
value,
bias,
broadcast_dropout=broadcast_dropout,
rescale_logits=rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
enable_dropout=enable_dropout,
dtype=dtype,
precision=precision,
use_extra_logit=use_extra_logit,
float32_logits=float32_logits,
causal_mask=causal_mask,
query_chunk_size=query_chunk_size,
key_chunk_size=key_chunk_size,
)
return jnp.squeeze(result, axis=-2) # remove queries_per_head dim
| 23,576 | 35.667185 | 80 | py |
flaxformer | flaxformer-main/flaxformer/components/attention/dense_attention.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dense attention classes and mask/weighting functions."""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import abc
import functools
from typing import Callable, Optional, Tuple, Union
from aqt.jax_legacy.jax import flax_layers as aqt_flax_layers
from aqt.jax_legacy.jax import quant_config as aqt_config
from aqt.jax_legacy.jax import quantization as aqt
import chex
from flax import linen as nn
from flax.core import variables
from flax.linen import initializers
from flax.linen import partitioning as flax_partitioning
from flax.linen.linear import default_kernel_init
from flax.training import common_utils
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
from flaxformer import activation_partitioning
from flaxformer.components import dense
from flaxformer.components import embedding
from flaxformer.types import Array
from flaxformer.types import DType
from flaxformer.types import Initializer
from flaxformer.types import PRNGKey
RulesFallback = flax_partitioning.RulesFallback
def _softmax_with_extra_logit(
x: Array,
axis: Optional[Union[int, Tuple[int, ...]]] = -1,
) -> Array:
"""Softmax function with an additional virtual logit equal to zero.
For compatibility with some previously trained models.
This is equivalent to adding one to the denominator.
In the context of attention, it allows you to attend to nothing.
Args:
x: input to softmax
axis: the axis or axes along which the softmax should be computed. Either an
integer or a tuple of integers.
Returns:
A tensor with the same shape as x.
"""
m = jnp.maximum(lax.stop_gradient(x.max(axis, keepdims=True)), 0)
unnormalized = jnp.exp(x - m)
# After shift, extra logit is -m. Add exp(-m) to denominator
denom = unnormalized.sum(axis, keepdims=True) + jnp.exp(-m)
return unnormalized / denom
# ------------------------------------------------------------------------------
# Fast attention layers.
# ------------------------------------------------------------------------------
def dot_product_attention_weights(
query: Array,
key: Array,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
) -> Array:
"""Computes dot-product attention weights given query and key.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key.
Note: query and key needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
num_heads, qk_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, whether to apply dropout
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
Returns:
Attention weights of shape `[batch..., num_heads, q_length, kv_length]`.
"""
assert query.ndim == key.ndim, 'q, k must have same rank.'
assert query.shape[:-3] == key.shape[:-3], 'q, k batch dims must match.'
assert query.shape[-2] == key.shape[-2], 'q, k num_heads must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# Calculate attention matrix.
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
if rescale_logits:
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
# `attn_weights` shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum(
'...qhd,...khd->...hqk', query, key, precision=precision
)
# Apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias.astype(attn_weights.dtype)
# Normalize the attention weights.
attn_weights = (
_softmax_with_extra_logit if use_extra_logit else jax.nn.softmax
)(attn_weights).astype(dtype)
# Apply attention dropout.
if enable_dropout and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
dropout_shape[-2] = 1
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
keep = jnp.broadcast_to(keep, attn_weights.shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
def apply_dot_product_attention_weights_to_values(
attention_weights: Array,
value: Array,
precision: Optional[lax.Precision] = None,
) -> Array:
"""Applies the attention weights to the values.
Args:
attention_weights: The attention weights, e.g., computed by
dot_product_attention_weights.
value: The values to apply the attention to.
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
The weighted sum over values for each query position.
"""
return jnp.einsum(
'...hqk,...khd->...qhd', attention_weights, value, precision=precision
)
def dot_product_attention(
query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights.
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., kv_length,
num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, whether to apply dropout
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
Returns:
Output of shape `[batch..., length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
assert (
query.shape[:-3] == key.shape[:-3] == value.shape[:-3]
), 'q, k, v batch dims must match.'
assert (
query.shape[-2] == key.shape[-2] == value.shape[-2]
), 'q, k, v num_heads must match.'
assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
attn_weights = dot_product_attention_weights(
query,
key,
bias=bias,
broadcast_dropout=broadcast_dropout,
rescale_logits=rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
enable_dropout=enable_dropout,
dtype=dtype,
precision=precision,
use_extra_logit=use_extra_logit,
float32_logits=float32_logits,
)
return apply_dot_product_attention_weights_to_values(
attn_weights, value, precision=precision
)
def dot_product_attention_multiquery(
query: Array,
key: Array,
value: Array,
bias: Optional[Array] = None,
broadcast_dropout: bool = True,
rescale_logits: bool = False,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
enable_dropout: bool = True,
dtype: DType = jnp.float32,
precision: Optional[lax.Precision] = None,
use_extra_logit: bool = False,
float32_logits: bool = False,
) -> Array:
"""Computes dot-product multiquery-attention given query, key, and value.
This is a variant of the multi-head dot product attention introduced in
https://arxiv.org/abs/1706.03762 and implemented in `dot_product_attention`.
In this function, the key and the value have 1 head whereas query has 1 or
more heads. This variant is called "multi-query" attention.
It calculates the attention weights given query and key and combines the
values using the attention weights.
Note: query, key, value needn't have any batch dimensions.
Args:
query: queries for calculating attention with shape of `[batch..., q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch..., kv_length,
qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch..., kv_length,
v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch..., num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
rescale_logits: bool. Whether to rescale `query` logits by 1/sqrt(depth_kq).
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
enable_dropout: bool, whether to apply dropout
dtype: the dtype of the computation (default: float32)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
Returns:
Output of shape `[batch..., length, num_heads, v_depth_per_head]`.
"""
assert (
key.ndim == value.ndim
), f'k, v must have same rank. key: {key.shape}, value: {value.shape}'
assert (
query.shape[:-3] == key.shape[:-2] == value.shape[:-2]
), f'q, k, v batch dims must match. query: {query.shape}'
assert key.shape[-2] == value.shape[-2], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# calculate attention matrix
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
if rescale_logits:
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum(
'...qhd,...kd->...hqk', query, key, precision=precision
)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias.astype(attn_weights.dtype)
# normalize the attention weights
attn_weights = (
_softmax_with_extra_logit if use_extra_logit else jax.nn.softmax
)(attn_weights).astype(dtype)
# apply attention dropout
if enable_dropout and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
dropout_shape[-2] = 1
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
keep = jnp.broadcast_to(keep, attn_weights.shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
# return weighted sum over values for each query position
return jnp.einsum(
'...hqk,...kd->...qhd', attn_weights, value, precision=precision
)
class DenseAttention(metaclass=abc.ABCMeta):
"""API for attention classes that compute a full key-query attention matrix.
This allows for 2D matrices masking or re-weighting the attention between
specific key/query pairs.
"""
@abc.abstractmethod
def __call__(
self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
precomputed_qkv: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
) -> Array:
"""Applies attention on the input data.
Args:
inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`.
inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`.
mask: attention mask of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
bias: attention bias of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
precomputed_qkv: when using fused implementations QKVO are defined outside
this module and we only use the module to run computations.
decode: Whether to prepare and use an autoregressive cache.
enable_dropout: Enables dropout if set to True.
Returns:
output of shape `[batch_sizes..., length, features]`.
"""
class MultiHeadDotProductAttention(nn.Module, DenseAttention):
"""Multi-head dot-product attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
head_dim: dimension of each head. If unspecified, it defaults to
qkv_features // num_heads.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
qkv_kernel_init: optional initializer for the fused qkv kernel. If None,
kernel_init will be used instead.
kv_kernel_init: optional initializer for the fused kv kernel. If None,
kernel_init will be used instead.
q_kernel_init: optional initializer for the query (q) kernel. If None,
kernel_init will be used instead.
bias_init: initializer for the bias of the Dense layers.
attention_fn: dot_product_attention or compatible function. Accepts query,
key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,,
num_heads, value_channels]``
use_extra_logit: whether to include a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
output_projection: Project the output of `attention_fn` to `out_features`.
If False, returns the output of `attention_fn` without a projection.
sow_intermediates: whether to track intermediates using Module.sow.
split_head_kernel: whether to store QKVO variables with a split head
dimension.
kernels_to_fuse: Which kernels to fuse, if any.
use_rotary_embedding: whether to use rotary embeddings.
"""
num_heads: int
use_bias: bool
dtype: DType = jnp.float32
qkv_features: Optional[int] = None
head_dim: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.0
precision: Optional[lax.Precision] = None
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
qkv_kernel_init: Optional[Initializer] = None
kv_kernel_init: Optional[Initializer] = None
q_kernel_init: Optional[Initializer] = None
bias_init: Initializer = initializers.zeros
rescale_logits: bool = False
attention_fn: Callable[[Array, Array, Array], Array] = staticmethod(
dot_product_attention
)
use_extra_logit: bool = False
float32_logits: bool = False
output_projection: bool = True
# TODO: Remove out_features and output_projection.
sow_intermediates: bool = False
split_head_kernel: bool = False
kernels_to_fuse: Optional[str] = None
use_rotary_embedding: bool = False
rotary_embedding_max_timescale: float = 1e4
# Whether to shard over the head dimension, setting this to False when the
# number of heads is not divisible your activation num_partitions
sharding_over_head_dimension: bool = True
q_conv: Optional[nn.Module] = None
k_conv: Optional[nn.Module] = None
v_conv: Optional[nn.Module] = None
dense_general_factory: Callable[..., nn.Module] = dense.DenseGeneral
def update_cache_prefill(
self,
key: Array,
value: Array,
cached_key: variables.Variable,
cached_value: variables.Variable,
cache_index: variables.Variable,
prefill_lengths: Array,
) -> Tuple[Array, Array, Array, Array, Array, Array]:
"""Update the autoregressive cache for multiple timesteps at once.
This is useful for things like a prefix-lm where the encoder section of the
input is visible bidirectionally. The key and value for this section need to
be computed in a single shot, as a step by step approach would result in
causal attention.
Args:
key: The calculated key used in attention. [batch..., length, num_heads,
features_per_head]
value: The calculated value used in attention. [batch..., length,
num_heads, features_per_head]
cached_key: The cache of previous keys. [batch..., num_heads,
features_per_head, length]
cached_value: The cache of previous values. [batch..., num_heads,
features_per_head, length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch]
prefill_lengths: The number of timesteps we should fill in the cache.
[batch]
Returns:
The key, value, and the last timestep we just filled in the cache.
We also return the new cache values for now because assigning to a
variable inside of a method doesn't work. These returns will be removed
eventually.
"""
# Make a reference to the data underlaying the variable for ease of
# use.
cache_index.value = prefill_lengths
# Note, the cache index is now a vector
# of batch size so that each example can start just after it's
# prefix which can be different lengths for different examples.
cur_index = cache_index.value
# Move the sequence dimension to the end to match the cache shapes.
key_cached = jnp.moveaxis(key, -3, -1)
value_cached = jnp.moveaxis(value, -3, -1)
# Reshape the index so the batch is at the beginning, default
# broadcasting behavior is to add singleton dims to the front but
# we need them at the end.
batch_first_index = jnp.reshape(
cur_index, (-1,) + tuple(1 for _ in range(cached_key.value.ndim - 1))
)
# Calculate a mask that will set any position past the prefix to zero
# when applied to the key.
key_mask = (
lax.broadcasted_iota(
jnp.int32, cached_key.value.shape, cached_key.value.ndim - 1
)
< batch_first_index
)
value_mask = (
lax.broadcasted_iota(
jnp.int32, cached_value.value.shape, cached_value.value.ndim - 1
)
< batch_first_index
)
# Set the caches with the calculated key and values but hide anything
# past the prefix.
cached_key_value = key_cached * key_mask
cached_value_value = value_cached * value_mask
return (
key,
value,
cur_index,
cached_key_value,
cached_value_value,
prefill_lengths,
)
def update_cache_decode(
self,
key: Array,
value: Array,
cached_key: variables.Variable,
cached_value: variables.Variable,
cache_index: variables.Variable,
) -> Tuple[Array, Array, Array, Array, Array, Array]:
"""Update the next timestep in the autoregressive cache.
This is used during step by step decoding where each key and value we get
are a single (the next) timestep.
Args:
key: The calculated key used in attention. [batch..., 1, num_heads,
features_per_head]
value: The calculated value used in attention. [batch..., 1, num_heads,
features_per_head]
cached_key: The cache of previous keys. [batch..., num_heads,
features_per_head, length]
cached_value: The cache of previous values. [batch..., num_heads,
features_per_head, length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch] if we are decoding after doing a prefill or [1] if we
are starting with step-by-step decoding.
Returns:
The key, value, and the last timestep we just filled in the cache. Note:
this index is the last timestep we just fill, the actual value of the
`cache_index` is already increased to point to the next timestep to fill.
We also return the new cache values for now because assigning to a
variable inside of a method doesn't work. These returns will be removed
eventually.
"""
cache_length = cached_key.value.shape[-1]
# Create a OHE of the current index. NOTE: the index is increased
# below.
# Note: We reshape the index into a column vector so that it will work
# if the index is a scalar or a vector with different cache positions
# from different elements in a batch.
cur_index = jnp.reshape(cache_index.value, (-1,))
one_hot_indices = jax.nn.one_hot(cur_index, cache_length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did
# for the cached ones above.
# Note these are currently the key and value of a single position,
# since we feed one position at a time.
one_token_key = jnp.moveaxis(key, -3, -1)
one_token_value = jnp.moveaxis(value, -3, -1)
# The one hot indices are now either [1, length] for a scalar index or
# [batch size, length] for examples where there are different lengths
# of prefixes. We need to add dims for num_heads and num_features as
# broadcasting doesn't work for the batched version.
one_hot_indices = jnp.expand_dims(
jnp.expand_dims(one_hot_indices, axis=1), axis=1
)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
# Key/Value have seq lengths of 1 while one_hot has a seq_length
# of length. key/value will broadcast their value to each timestep
# and the onehot will mask all but the correct timesteps.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key_value = key
cached_value_value = value
cache_index_value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -3)
value = jnp.moveaxis(value, -1, -3)
return (
key,
value,
cur_index,
cached_key_value,
cached_value_value,
cache_index_value,
)
@nn.compact
def __call__(
self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
precomputed_qkv: Optional[Array] = None,
decode: bool = False,
enable_dropout: bool = True,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode`.
During decoding mode, this method is called twice, by `init` and
`apply`. In the former, inputs_q: [batch..., length, qkv_features] and
inputs_kv: [batch..., length, qkv_features]
During apply, query, key and value all have the shape: [batch * beam, 1,
qkv_features] where the batch dimension is added to include multiple beams.
Note that the batch dimension is different during the init and apply calls.
This is because the cached variables are directly passed-in during `apply`
method. In other words, the cache variables such as `cached_key` are
initialized with `batch` dim, expanded by tiling in the beam search function
to `batch * beam` dimension, and passed to the `apply` method as part of a
variable dict.
Args:
inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`.
inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`.
mask: attention mask of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
bias: attention bias of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
precomputed_qkv: when using fused implementations QKVO are defined outside
this module and we only use the module to run computations.
decode: Whether to prepare and use an autoregressive cache.
enable_dropout: Enables dropout if set to True.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
Returns:
If output_projection is True, then output of shape
`[batch_sizes..., length, out_features]`, where out_features is set to
features if not provided. If output_projection is False, then output of
shape `[batch_sizes..., length, num_heads, head_dim]`.
"""
validate_dense_attention_call_parameter_shapes(
inputs_q, inputs_kv, mask, bias, self.num_heads
)
qkv_kernel_init = (
self.qkv_kernel_init
if self.qkv_kernel_init is not None
else self.kernel_init
)
kv_kernel_init = (
self.kv_kernel_init
if self.kv_kernel_init is not None
else self.kernel_init
)
q_kernel_init = (
self.q_kernel_init
if self.q_kernel_init is not None
else self.kernel_init
)
if precomputed_qkv is not None:
raise ValueError('Support for precomputed QKVO not implemented.')
rotary_index = None
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
if self.kernels_to_fuse and not self.split_head_kernel:
raise ValueError(
'Un-reshaped kernels are required when using QKV fused '
'kernel optimization.'
)
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = q_kernel_init
else:
if self.kernels_to_fuse:
raise ValueError(
'Cannot fold in logit normalization to query '
'initializer when using fused kernels.'
)
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: q_kernel_init(*args) / depth_scaling
make_dense = functools.partial(
self.dense_general_factory,
axis=-1,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
)
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch..., length, num_heads, features_per_head]
if self.kernels_to_fuse is None:
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query',
)(inputs_q)
key = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='key',
)(inputs_kv)
value = make_dense(
kernel_init=self.kernel_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='value',
)(inputs_kv)
# TODO: should we fuse/slice along depth or head dim?
elif self.kernels_to_fuse == 'qkv':
if inputs_q is not inputs_kv:
raise ValueError(
'qkv fusion is only supported in self-attention mode '
'(when inputs_q is inputs_kv).'
)
# 'qkv' fusion mode implies self-attention
qkv = make_dense(
kernel_init=qkv_kernel_init,
features=(3, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='qkv_fused',
)(inputs_q)
query = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 0, 1, -3), -3)
key = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 1, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(qkv, 2, 1, -3), -3)
elif self.kernels_to_fuse == 'kv':
query = make_dense(
kernel_init=query_init,
features=(self.num_heads, head_dim),
kernel_axis_names=['embed', 'heads', 'kv'],
name='query',
)(inputs_q)
kv = make_dense(
kernel_init=kv_kernel_init,
features=(2, self.num_heads, head_dim),
kernel_axis_names=['embed', 'stack', 'heads', 'kv'],
name='kv_fused',
)(inputs_kv)
key = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 0, 1, -3), -3)
value = jnp.squeeze(lax.dynamic_slice_in_dim(kv, 1, 1, -3), -3)
else:
raise ValueError('Incorrect kernel fusion mode specified.')
# Multi Dconv Head Attention options:
if self.q_conv is not None:
query = self.q_conv( # pylint: disable=not-callable
query, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths
)
if self.k_conv is not None:
key = self.k_conv( # pylint: disable=not-callable
key, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths
)
if self.v_conv is not None:
value = self.v_conv( # pylint: disable=not-callable
value, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths
)
if self.sharding_over_head_dimension:
# Note: We don't use `activation_partitioning.with_sharding_migration`
# here because we do often want this 2D sharded. However, if rules are
# valid, they should result in 2D sharding. We don't need to raise errors
# if both result in 2D sharding (which with_sharding_migration does).
if flax_partitioning.get_axis_rules():
query = flax_partitioning.with_sharding_constraint(
query, ('batch', 'length', 'heads', 'kv')
)
key = flax_partitioning.with_sharding_constraint(
key, ('batch', 'length', 'heads', 'kv')
)
value = flax_partitioning.with_sharding_constraint(
value, ('batch', 'length', 'heads', 'kv')
)
else:
query = activation_partitioning.with_sharding(query, 2)
key = activation_partitioning.with_sharding(key, 2)
value = activation_partitioning.with_sharding(value, 2)
query: Array = query # hint to quiet pytype.
key: Array = key
value: Array = value
if prefill and decode:
raise ValueError(
'prefill and decode cannot both be true at the same'
'time. If you are using a prefix LM with bidirectional '
'attention on the inputs, please make a call with '
'prefill=True that includes an attention mask that '
'covers your inputs first and then make your decoding '
'calls.'
)
if prefill or decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension
# [batch..., length, num_heads, features_per_head], but we cache them as
# [batch..., num_heads, features_per_head, length] as a TPU fusion
# optimization. This also enable the "scatter via one-hot broadcast"
# trick, which means we do a one-hot broadcast instead of a scatter/gather
# operations, which gives a 3-4x speedup in practice.
swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3])
cached_key = self.variable(
'cache', 'cached_key', jnp.zeros, swap_dims(key.shape), key.dtype
)
cached_value = self.variable(
'cache',
'cached_value',
jnp.zeros,
swap_dims(value.shape),
value.dtype,
)
cache_index = self.variable(
'cache', 'cache_index', lambda: jnp.array(0, dtype=jnp.int32)
)
rotary_index = cache_index.value
if is_initialized:
# Here we are in "apply()".
*batch_dims, num_heads, features_per_head, length = (
cached_key.value.shape
)
if prefill:
if prefill_lengths is None:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it.
prefill_lengths = jnp.sum(mask[:, 0, 0, :], axis=-1).astype(
cache_index.value.dtype
)
(
key,
value,
cur_index,
cached_key_value,
cached_value_value,
cache_index_value,
) = self.update_cache_prefill(
key, value, cached_key, cached_value, cache_index, prefill_lengths
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
elif decode:
# Check the shape of the cached key against the input query.
expected_shape = tuple(batch_dims) + (1, num_heads, features_per_head)
if expected_shape != query.shape:
raise ValueError(
'Autoregressive cache shape error, '
'expected query shape %s instead got %s.'
% (expected_shape, query.shape)
)
(
key,
value,
cur_index,
cached_key_value,
cached_value_value,
cache_index_value,
) = self.update_cache_decode(
key, value, cached_key, cached_value, cache_index
)
# Enforcing the Causal mask over previous positions and selecting only
# the bias value for the current index is only needed during decode
# mode where a single example is feed at a time. In prefill mode we
# uses these as provided, that same way it is done in a normal forward
# pass, like when computing logits during training.
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
#
# Add trailing dims to the current index so it can either
# broadcast over the batch dim or it can just be batch size.
mask = combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length), tuple(batch_dims) + (1, 1, length)
)
<= jnp.reshape(cur_index, (-1, 1, 1, 1)),
)
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
# If we are doing prefix decoding where cur index is a vector the
# result will be [batch, heads, 1, :]. If cur_index is a scalar
# like in encdec decoding, the result will be [1, heads, 1, :].
# We use a one-hot einsum rather than a slice to avoid introducing
# a Gather op that is currently lowered poorly by SPMD passes,
# adding expensive all-reduce and all-gather operations.
bias = jnp.einsum(
'bq, bhqk->bhk',
common_utils.onehot(cur_index, num_classes=length),
bias,
)
bias = jnp.expand_dims(bias, 2)
# Currently, updating a variable inside of a method is not handled
# in flax, so we return the actual values and assign them in the main
# compacted call for now.
# TODO: Move variable assignment inside of the
# cache update functions once variable references are tracked across
# transform boundaries.
cache_index.value = cache_index_value
cached_key.value = cached_key_value
cached_value.value = cached_value_value
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.0).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype),
)
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = combine_biases(attention_bias, bias)
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.0:
dropout_rng = self.make_rng('dropout')
if self.use_rotary_embedding:
# use rotary embeddings before attention
# https://arxiv.org/abs/2104.09864
# TODO: Put it in a new class
dim = query.shape[-1]
max_length = max(query.shape[1], key.shape[1])
sin, cos = embedding.generate_fixed_pos_embedding(
dim, max_length, max_timescale=self.rotary_embedding_max_timescale
)
sin = sin.astype(self.dtype)
cos = cos.astype(self.dtype)
query, key = embedding.apply_rotary_embedding(
query, key, cos, sin, decode=decode, rotary_index=rotary_index
)
x = self.attention_fn(
query,
key,
value,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits,
) # pytype: disable=wrong-keyword-args
# Compute attention.
if not self.output_projection:
return x
# Back to the original inputs dimensions.
out = self.dense_general_factory(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
reshape_kernel=not self.split_head_kernel,
kernel_axis_names=['heads', 'kv', 'embed'],
name='out',
)(
x
) # pytype: disable=wrong-keyword-args
return out
class MultiQueryDotProductAttention(nn.Module, DenseAttention):
"""Multi-query dot-product attention.
This is a variant of the MultiHeadDotProductAttention. The key and the value
have 1 head whereas query has 1 or more heads. This variant, called
"multi-query" attention, was introduced in Shazeer 2019
(https://arxiv.org/abs/1911.02150).
Attributes:
num_heads: number of attention heads for query. Features (i.e.
inputs_q.shape[-1]) should be divisible by the number of heads.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
head_dim: dimension of each head. If unspecified, it defaults to
qkv_features // num_heads.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
q_kernel_init: optional initializer for the query (q) kernel. If None,
kernel_init will be used instead.
bias_init: initializer for the bias of the Dense layers.
attention_fn: dot_product_attention or compatible function. Accepts query,
key, value, and returns output of shape `[bs, dim1, dim2, ..., dimN,,
num_heads, value_channels]``
use_extra_logit: whether to use a virtual extra logit equal to zero.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
use_rotary_embedding: whether to use RoPE embeddings.
use_aqt: whether to use aqt quantization.
weight_params: Parameters for weight quantization.
act_params: Parameters for acitvation quantization.
"""
num_heads: int
use_bias: bool
dtype: DType = jnp.float32
qkv_features: Optional[int] = None
head_dim: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.0
precision: Optional[lax.Precision] = None
kernel_init: Initializer = default_kernel_init # pytype: disable=annotation-type-mismatch # jax-types
q_kernel_init: Optional[Initializer] = None
bias_init: Initializer = initializers.zeros
rescale_logits: bool = False
attention_fn: Callable[[Array, Array, Array], Array] = staticmethod(
dot_product_attention_multiquery
)
use_extra_logit: bool = False
float32_logits: bool = False
use_rotary_embedding: bool = False
rotary_embedding_max_timescale: float = 1e4
split_head_kernel: bool = False
q_conv: Optional[nn.Module] = None
k_conv: Optional[nn.Module] = None
v_conv: Optional[nn.Module] = None
use_aqt: Optional[bool] = False
weight_params: Optional[aqt.QuantOps.WeightParams] = None
act_params: Optional[aqt.QuantOps.ActHParams] = None
possibly_use_quantized_vars: bool = False
def update_cache_prefill(
self,
key: Array,
value: Array,
cached_key: variables.Variable,
cached_value: variables.Variable,
cache_index: variables.Variable,
prefill_lengths: Array,
) -> Tuple[
Array,
Array,
Array,
variables.Variable,
variables.Variable,
variables.Variable,
]:
"""Update the autoregressive cache for multiple timesteps at once.
This is useful for things like a prefix-lm where the encoder section of the
input is visible bidirectionally. The key and value for this section need to
be computed in a single shot, as a step by step approach would result in
causal attention.
Args:
key: The calculated key used in attention. [batch..., length,
features_per_head]
value: The calculated value used in attention. [batch..., length,
features_per_head]
cached_key: The cache of previous keys. [batch..., features_per_head,
length]
cached_value: The cache of previous values. [batch..., features_per_head,
length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch]
prefill_lengths: The number of timesteps we should fill in the cache.
[batch]
Returns:
The key, value, and the last timestep we just filled in the cache.
"""
cache_index.value = prefill_lengths
# Make a reference to the data underlaying the variable for ease of
# use.
cur_index = cache_index.value
# Move the sequence dimension to the end to match the cache shapes.
key_cached = jnp.moveaxis(key, -2, -1)
value_cached = jnp.moveaxis(value, -2, -1)
# Reshape the index so the batch is at the beginning, default
# broadcasting behavior is to add singleton dims to the front but
# we need them at the end.
batch_first_index = jnp.reshape(
cur_index, (-1,) + tuple(1 for _ in range(cached_key.value.ndim - 1))
)
# Calculate a mask that will set any position past the prefix to zero
# when applied to the key.
key_mask = (
lax.broadcasted_iota(
jnp.int32, cached_key.value.shape, cached_key.value.ndim - 1
)
< batch_first_index
)
value_mask = (
lax.broadcasted_iota(
jnp.int32, cached_value.value.shape, cached_value.value.ndim - 1
)
< batch_first_index
)
# Set the caches with the calculated key and values but hide anything
# past the prefix.
cached_key_value = key_cached * key_mask
cached_value_value = value_cached * value_mask
return ( # pytype: disable=bad-return-type # jax-ndarray
key,
value,
cur_index,
cached_key_value,
cached_value_value,
prefill_lengths,
)
def update_cache_decode(
self,
key: Array,
value: Array,
cached_key: variables.Variable,
cached_value: variables.Variable,
cache_index: variables.Variable,
) -> Tuple[
Array,
Array,
Array,
variables.Variable,
variables.Variable,
variables.Variable,
]:
"""Update the next timestep in the autoregressive cache.
This is used during step by step decoding where each key and value we get
are a single (the next) timestep.
Args:
key: The calculated key used in attention. [batch..., 1,
features_per_head]
value: The calculated value used in attention. [batch..., 1,
features_per_head]
cached_key: The cache of previous keys. [batch..., features_per_head,
length]
cached_value: The cache of previous values. [batch..., features_per_head,
length]
cache_index: The timestep that we are currently calculating the key and
value for. [batch]
Returns:
The key, value, and the last timestep we just filled in the cache. Note:
this index is the last timestep we just fill, the actual value of the
`cache_index` is already increased to point to the next timestep to fill.
"""
cache_length = cached_key.value.shape[-1]
# Create a OHE of the current index.
# NOTE: the index is increased below.
cur_index = jnp.reshape(cache_index.value, (-1,))
one_hot_indices = jax.nn.one_hot(cur_index, cache_length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did
# for the cached ones above.
# Note these are currently the key and value of a single position,
# since we feed one position at a time.
# [batch..., length, features_per_head] -> [batch...,
# features_per_head, length]
one_token_key = jnp.moveaxis(key, -2, -1)
one_token_value = jnp.moveaxis(value, -2, -1)
# The one hot indices are now either [1, length] for a scalar index or
# [batch size, length] for examples where there are different lengths
# of prefixes. We need to add dims for and num_features as
# broadcasting doesn't work for the batched version.
one_hot_indices = jnp.expand_dims(one_hot_indices, axis=1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key_value = key
cached_value_value = value
cache_index_value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -2)
value = jnp.moveaxis(value, -1, -2)
return (
key,
value,
cur_index,
cached_key_value,
cached_value_value,
cache_index_value,
)
@nn.compact
def __call__(
self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
precomputed_qkv=None,
decode: bool = False,
enable_dropout: bool = True,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
) -> Array:
"""Applies multi-query dot product attention on the input data.
Projects the inputs into multi-headed query and single-headed key and value
vectors, applies dot-product attention and project the results to an output
vector.
Args:
inputs_q: input queries of shape `[batch_sizes..., q_length, q_features]`.
inputs_kv: key/values of shape `[batch_sizes..., kv_length, kv_features]`.
mask: attention mask of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
bias: attention bias of shape `[batch_sizes..., num_heads, q_length,
kv_length]`.
precomputed_qkv: 3-tuple of precomputed query, key, value arrays, only
used for parallel, fused-parameter optimizations.
decode: Whether to prepare and use an autoregressive cache.
enable_dropout: Enables dropout if set to True.
prefill: Whether to run a partial sequence to prefill the cache.
prefill_lengths: The length of each partial sequence we are filling in the
cache, lengths are inferred from the mask if not provided.
Returns:
output of shape `[batch_sizes..., length, features]`.
"""
validate_dense_attention_call_parameter_shapes(
inputs_q, inputs_kv, mask, bias, self.num_heads
)
q_kernel_init = (
self.q_kernel_init
if self.q_kernel_init is not None
else self.kernel_init
)
rotary_index = None
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
if self.head_dim is None:
head_dim = qkv_features // self.num_heads
else:
head_dim = self.head_dim
# Is attention logit rescaling explicit or folded into initializer?
if self.rescale_logits:
query_init = q_kernel_init
else:
depth_scaling = jnp.sqrt(head_dim).astype(self.dtype)
query_init = lambda *args: q_kernel_init(*args) / depth_scaling
def dense_output(
features,
axis,
kernel_init,
kernel_axis_names,
name,
inputs,
reshape_kernel=True,
):
if self.use_aqt:
if self.weight_params is None and self.act_params is None:
raise ValueError(
'If use_aqt is True, either of weights or acts quantization need '
'to be specified using arguments `weight_params` or `act_params`.'
)
# TODO: Push the "quantized vs not" decision down into
# the AQT library. Currently we make that decision here, because the AQT
# library doesn't support DenseGeneral.
aqt_context = aqt_config.DynamicContext(
update_bounds=False, collect_acts_stats=False
)
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = (
self.weight_params.half_shift if self.weight_params else False
)
aqt_hparams = aqt_flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=self.act_params, # currently supports fixed bounds only.
quant_type=aqt.QuantType.AQT,
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
return aqt_flax_layers.DenseAqt(
features=features,
hparams=aqt_hparams,
train=enable_dropout,
dynamic_context=aqt_context,
paxis_name=None,
# No "cross-replica" reduction expressed in the XLA graph at this
# stage. Will be imposed later, automatically, by XLA SPMD.
use_bias=self.use_bias,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
dtype=self.dtype,
kernel_axis_names=kernel_axis_names,
# we do not have reshape kernel option here but we explicitly
# reshape kernel.
precision=self.precision,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
name=name,
)(inputs, padding_mask=None)
else:
return dense.DenseGeneral(
axis=axis,
features=features,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
kernel_init=kernel_init,
precision=self.precision,
kernel_axis_names=kernel_axis_names,
reshape_kernel=reshape_kernel,
name=name,
)(inputs)
# Project inputs_q to multi-headed q and single-headed k and v
# query dimension is then [batch..., length, num_heads, features_per_head]
# key and value dimensions are [batch..., length, features_per_head].
if precomputed_qkv is None:
query = dense_output(
features=(self.num_heads, head_dim),
axis=-1,
kernel_init=query_init,
kernel_axis_names=['embed', 'heads', 'kv'],
name='query',
inputs=inputs_q,
reshape_kernel=not self.split_head_kernel,
)
key = dense_output(
features=head_dim,
axis=-1,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'kv'],
name='key',
inputs=inputs_kv,
)
value = dense_output(
features=head_dim,
axis=-1,
kernel_init=self.kernel_init,
kernel_axis_names=['embed', 'kv'],
name='value',
inputs=inputs_kv,
)
else:
query, key, value = precomputed_qkv
# Multi Dconv Head Attention options:
if self.q_conv is not None:
query = self.q_conv( # pylint: disable=not-callable
query, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths
)
if self.k_conv is not None:
key = self.k_conv( # pylint: disable=not-callable
key, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths
)
if self.v_conv is not None:
value = self.v_conv( # pylint: disable=not-callable
value, decode=decode, prefill=prefill, prefill_lengths=prefill_lengths
)
sharding_prefix = 'attn_decode' if decode else 'attn_encode'
bias_sharding = (
f'{sharding_prefix}_batch',
f'{sharding_prefix}_heads',
f'{sharding_prefix}_q_length',
f'{sharding_prefix}_kv_length',
)
# Note: We don't use `activation_partitioning.with_sharding_migration` here
# because we do often want this 2D sharded. However, if rules are valid,
# they should result in 2D sharding. We don't need to raise errors if both
# result in 2D sharding (which with_sharding_migration does).
if flax_partitioning.get_axis_rules():
query = flax_partitioning.with_sharding_constraint(
query, ('batch', 'length', 'heads', 'kv')
)
else:
query = activation_partitioning.with_sharding(query, 2)
if prefill and decode:
raise ValueError(
'prefill and decode cannot both be true at the same'
'time. If you are using a prefix LM with bidirectional '
'attention on the inputs, please make a call with '
'prefill=True that includes an attention mask that '
'covers your inputs first and then make your decoding '
'calls.'
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if prefill or decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension
# [batch..., length, features_per_head], but we cache them as
# [batch..., features_per_head, length] as a TPU fusion
# optimization. This also enable the "scatter via one-hot broadcast"
# trick, which means we do a one-hot broadcast instead of a scatter/gather
# operations, which gives a 3-4x speedup in practice.
swap_dims = lambda x: x[:-2] + tuple(x[i] for i in [-1, -2])
cached_key = flax_partitioning.variable_with_axes(
'cache',
'cached_key',
jnp.zeros,
swap_dims(key.shape),
key.dtype,
axes=('cache_batch', 'cache_kv', 'cache_length'),
fallback=RulesFallback.NO_CONSTRAINT,
)
cached_value = flax_partitioning.variable_with_axes(
'cache',
'cached_value',
jnp.zeros,
swap_dims(value.shape),
value.dtype,
axes=('cache_batch', 'cache_kv', 'cache_length'),
fallback=RulesFallback.NO_CONSTRAINT,
)
cache_index = flax_partitioning.variable_with_axes(
'cache',
'cache_index',
jnp.zeros,
query.shape[0],
jnp.int32,
axes=('cache_batch',),
fallback=RulesFallback.NO_CONSTRAINT,
)
rotary_index = cache_index.value
if is_initialized:
# Here we are in "apply()".
*batch_dims, features_per_head, length = cached_key.value.shape
if prefill:
# Figure out how far each element in the batch fills the cache based
# on the mask. We index each element in the batch, the first head
# dim (because this is always set to one), and the first query
# vector. If there is any prefix at all, the first element in the
# prefix would be part of it. Note, the cache index is now a vector
# of batch size so that each example can start just after it's
# prefix which can be different lengths for different examples.
if prefill_lengths is None:
prefill_lengths = jnp.sum(mask[:, 0, 0, :], axis=-1).astype(
cache_index.value.dtype
)
(
key,
value,
cur_index,
cached_key_value,
cached_value_value,
cache_index_value,
) = self.update_cache_prefill(
key, value, cached_key, cached_value, cache_index, prefill_lengths
)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
elif decode:
# Check the shape of the cached key against the input query.
expected_query_shape = tuple(batch_dims) + (
1,
self.num_heads,
features_per_head,
)
if expected_query_shape != query.shape:
raise ValueError(
'Autoregressive cache shape error, '
'expected query shape %s instead got %s.'
% (expected_query_shape, query.shape)
)
expected_key_shape = tuple(batch_dims) + (1, features_per_head)
if expected_key_shape != key.shape:
raise ValueError(
'Autoregressive cache shape error, '
'expected key shape %s instead got %s.'
% (expected_key_shape, key.shape)
)
# value and key should have the same shape.
if expected_key_shape != value.shape:
raise ValueError(
'Autoregressive cache shape error, '
'expected value shape %s instead got %s.'
% (expected_key_shape, value.shape)
)
(
key,
value,
cur_index,
cached_key_value,
cached_value_value,
cache_index_value,
) = self.update_cache_decode(
key, value, cached_key, cached_value, cache_index
)
# Enforcing the Causal mask over previous positions and selecting only
# the bias value for the current index is only needed during decode
# mode where a single example is feed at a time. In prefill mode we
# uses these as provided, that same way it is done in a normal forward
# pass, like when computing logits during training.
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
#
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
#
# Add trailing dims to the current index so it can either
# broadcast over the batch dim or it can just be batch size.
mask = combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length), tuple(batch_dims) + (1, 1, length)
)
<= jnp.reshape(cur_index, (-1, 1, 1, 1)),
)
mask = flax_partitioning.with_sharding_constraint(
mask,
(f'{sharding_prefix}_batch', None, None, None),
fallback=RulesFallback.NO_CONSTRAINT,
)
# Grab the correct relative attention bias during decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
# If we are doing prefix decoding where cur index is a vector the
# result will be [batch, heads, 1, :]. If cur_index is a scalar
# like in encdec decoding, the result will be [1, heads, 1, :]
# We use a one-hot einsum rather than a slice to avoid introducing
# a Gather op that is currently lowered poorly by SPMD passes,
# adding expensive all-reduce and all-gather operations.
bias = jnp.einsum(
'bq, bhqk->bhk',
common_utils.onehot(cur_index, num_classes=length),
bias,
)
bias = jnp.expand_dims(bias, 2)
bias = flax_partitioning.with_sharding_constraint(
bias, bias_sharding, fallback=RulesFallback.NO_CONSTRAINT
)
# Currently, updating a variable inside of a method is not handled
# in flax, so we return the actual values and assign them in the main
# compacted call for now.
# TODO: Move variable assignment inside of the
# cache update functions once variable references are tracked across
# transform boundaries.
cache_index.value = cache_index_value
cached_key.value = cached_key_value
cached_value.value = cached_value_value
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.0).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype),
)
attention_bias = flax_partitioning.with_sharding_constraint(
attention_bias, bias_sharding, fallback=RulesFallback.NO_CONSTRAINT
)
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = combine_biases(attention_bias, bias)
attention_bias = flax_partitioning.with_sharding_constraint(
attention_bias, bias_sharding, fallback=RulesFallback.NO_CONSTRAINT
)
dropout_rng = None
if enable_dropout and self.dropout_rate > 0.0:
dropout_rng = self.make_rng('dropout')
# During decode we typically want to reshard at this point from sharding by
# by head to sharding by batch. Give new names to the sharding axes to allow
# this reshard.
query = flax_partitioning.with_sharding_constraint(
query,
(
f'{sharding_prefix}_batch',
f'{sharding_prefix}_q_length',
f'{sharding_prefix}_heads',
'kv',
),
fallback=RulesFallback.NO_CONSTRAINT,
)
key = flax_partitioning.with_sharding_constraint(
key,
(f'{sharding_prefix}_batch', f'{sharding_prefix}_kv_length', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT,
)
value = flax_partitioning.with_sharding_constraint(
value,
(f'{sharding_prefix}_batch', f'{sharding_prefix}_kv_length', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT,
)
if self.use_rotary_embedding:
# use rotary embeddings before attention
# https://arxiv.org/abs/2104.09864
# TODO: Figure out if this should be put in a new class.
dim = query.shape[-1]
max_length = max(query.shape[1], key.shape[1])
sin, cos = embedding.generate_fixed_pos_embedding(
dim, max_length, max_timescale=self.rotary_embedding_max_timescale
)
sin = sin.astype(self.dtype)
cos = cos.astype(self.dtype)
query, key = embedding.apply_rotary_embedding(
query, key, cos, sin, decode=decode, rotary_index=rotary_index
)
# Apply attention.
x = self.attention_fn(
query,
key,
value,
bias=attention_bias,
broadcast_dropout=self.broadcast_dropout,
rescale_logits=self.rescale_logits,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
enable_dropout=enable_dropout,
dtype=self.dtype,
precision=self.precision,
use_extra_logit=self.use_extra_logit,
float32_logits=self.float32_logits,
) # pytype: disable=wrong-keyword-args
# During decode we typically want to reshard at this point from sharding by
# batch to sharding by head. Return to the old names of the sharding axes to
# allow this reshard.
x = flax_partitioning.with_sharding_constraint(
x,
(
f'{sharding_prefix}_batch',
f'{sharding_prefix}_q_length',
f'{sharding_prefix}_heads',
'kv',
),
fallback=RulesFallback.NO_CONSTRAINT,
)
x = flax_partitioning.with_sharding_constraint(
x,
('batch', 'length', 'heads', 'kv'),
fallback=RulesFallback.NO_CONSTRAINT,
)
if precomputed_qkv is None:
kernel_axis_names = ['heads', 'kv', 'embed']
# TODO: activation quantization support is unimplemented
# here.
if self.use_aqt and self.weight_params is not None:
weight_prec = self.weight_params.prec if self.weight_params else None
half_shift = (
self.weight_params.half_shift if self.weight_params else False
)
aqt_hparams = aqt_flax_layers.DenseGeneralAqt.HParams(
weight_prec=weight_prec,
weight_half_shift=half_shift,
quant_act=None, # currently supports fixed bounds only.
weight_quant_granularity=aqt_config.QuantGranularity.PER_CHANNEL,
)
out = aqt_flax_layers.DenseGeneralAqt(
hparams=aqt_hparams,
train=enable_dropout,
possibly_use_quantized_vars=self.possibly_use_quantized_vars,
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
kernel_axis_names=kernel_axis_names,
reshape_kernel=not self.split_head_kernel,
name='out',
)( # pytype: disable=wrong-arg-types
x
)
else:
# Back to the original inputs dimensions.
out = dense.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
kernel_axis_names=kernel_axis_names,
reshape_kernel=not self.split_head_kernel,
name='out',
)( # pytype: disable=wrong-arg-types
x
)
else:
# in fused parallel layer, fused outer dense operation is external
out = x
return out
class LocalAttentionLayer(nn.Module, DenseAttention):
"""Performs attention on separate chunks and concatenates the results.
This attention is local in that attention happens only within each chunk
(as defined by the length of the strides).
For example usage, see CANINE, that uses this to attend over character chunks.
Attributes:
localized_attention: The attention layer that will be applied to each chunk.
Typically this is a `MultiHeadDotProductAttention`.
q_chunk_width: The width of each chunk in `inputs_q`. A value of 128 is
optimal for TPUs.
q_chunk_stride: The number of elements to skip when moving to the next chunk
in `inputs_q`. Typically this is equal to `q_chunk_width`.
kv_chunk_width: The width of each chunk in `inputs_kv`. A value of 128 is
optimal for TPUs.
kv_chunk_stride: The number of elements to skip when moving to the next
chunk in `inputs_kv`. Typically this is equal to `kv_chunk_width`.
always_attend_to_first_position: Should all chunks be able to attend to the
`inputs_kv`'s first position (e.g. a [CLS] position)?
first_position_attends_to_all: Should the `inputs_q`'s first position be
able to attend to all positions within the `inputs_q`?
"""
localized_attention: DenseAttention
q_chunk_width: int
q_chunk_stride: int
kv_chunk_width: int
kv_chunk_stride: int
always_attend_to_first_position: bool = False
first_position_attends_to_all: bool = False
def setup(self) -> None:
if self.q_chunk_stride > self.q_chunk_width:
raise ValueError(
'`q_chunk_stride` < `q_chunk_width` '
'would cause `inputs_q` positions to get skipped.'
)
if self.kv_chunk_stride > self.kv_chunk_width:
raise ValueError(
'`kv_chunk_stride` > `kv_chunk_width` '
'would cause `inputs_kv` positions to get skipped.'
)
def __call__(
self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
decode: bool = False,
enable_dropout: bool = True,
precomputed_qkv: Optional[Array] = None,
) -> Array:
"""Applies local attention.
Args:
inputs_q: <float>[batch_sizes..., q_len, q_features].
inputs_kv: <float>[batch_sizes..., kv_len, kv_features].
mask: <int32>[batch_sizes..., num_heads, q_len, kv_len]. The values should
be 1 or 0. The attention scores will effectively be set to -∞ for any
positions in the mask that are 0, and will be unchanged for positions
that are 1.
bias: Unsupported.
decode: Unsupported.
enable_dropout: Enables dropout if set to True.
precomputed_qkv: Precomputed QKV arrays. Not currently supported.
Returns:
An array with the same shape as would be expected from calling
`localized_attention` directly.
"""
chex.assert_shape(inputs_q, (..., None, None))
num_batch_dims = inputs_q.ndim - 2
batch_sizes = inputs_q.shape[:num_batch_dims]
chex.assert_shape(inputs_kv, (*batch_sizes, None, None))
q_len, _ = inputs_q.shape[num_batch_dims:]
kv_len, _ = inputs_kv.shape[num_batch_dims:]
if mask is not None:
chex.assert_shape(mask, (*batch_sizes, None, q_len, kv_len))
if bias is not None:
chex.assert_shape(
bias, (*({b, 1} for b in batch_sizes), None, q_len, kv_len)
)
if decode:
raise ValueError(f'{type(self).__name__} does not support decoding mode')
# Determine the chunks that we will attend *from*.
q_chunks = []
if self.first_position_attends_to_all:
q_chunks.append((0, 1))
# We must skip this first position so that our output sequence is the
# correct length (this matters in the *from* sequence only).
q_start = 1
else:
q_start = 0
for chunk_start in range(q_start, q_len, self.q_chunk_stride):
chunk_end = min(q_len, chunk_start + self.q_chunk_width)
q_chunks.append((chunk_start, chunk_end))
# Determine the chunks that we will attend *to*.
kv_chunks = []
if self.first_position_attends_to_all:
kv_chunks.append((0, kv_len))
for chunk_start in range(0, kv_len, self.kv_chunk_stride):
chunk_end = min(kv_len, chunk_start + self.kv_chunk_width)
kv_chunks.append((chunk_start, chunk_end))
if len(q_chunks) != len(kv_chunks):
raise ValueError(
f'Expected to have same number of `q_chunks` ({q_chunks}) and '
f'`kv_chunks` ({kv_chunks}). Check strides.'
)
# TODO: Can we save a bit of extra compute by slicing the Q/K/V
# projected versions of these instead of recomputing those projections?
# This only helps when the Q stride isn't the same as the K/V stride.
# Length of `attention_output_chunks` and therefore `attention_output` is
# determined by `q_chunks` to ensure correctness. The correspondence with
# `kv_chunks` is somewhat best effort. We need to do more to enforce this.
attention_output_chunks = []
for q_chunk, kv_chunk in zip(q_chunks, kv_chunks):
q_start, q_end = q_chunk
kv_start, kv_end = kv_chunk
inputs_q_chunk = inputs_q[..., q_start:q_end, :]
inputs_kv_chunk = inputs_kv[..., kv_start:kv_end, :]
if mask is not None:
mask_chunk = mask[..., q_start:q_end, kv_start:kv_end]
if bias is not None:
bias_chunk = bias[..., q_start:q_end, kv_start:kv_end]
if self.always_attend_to_first_position:
if mask is not None:
cls_mask = mask[..., q_start:q_end, 0:1]
mask_chunk = jnp.concatenate([cls_mask, mask_chunk], axis=-1)
if bias is not None:
cls_bias = bias[..., q_start:q_end, 0:1]
bias_chunk = jnp.concatenate([cls_bias, bias_chunk], axis=-1)
kv_cls = inputs_kv[..., 0:1, :]
inputs_kv_chunk = jnp.concatenate([kv_cls, inputs_kv_chunk], axis=-2)
attention_output_chunk = self.localized_attention(
inputs_q=inputs_q_chunk,
inputs_kv=inputs_kv_chunk,
mask=mask_chunk if mask is not None else None,
bias=bias_chunk if bias is not None else None,
enable_dropout=enable_dropout,
)
chex.assert_shape(
attention_output_chunk, (*inputs_q_chunk.shape[:-1], ...)
)
attention_output_chunks.append(attention_output_chunk)
# Concatenate along the length dim (which directly follows the batch dims).
return jnp.concatenate(attention_output_chunks, axis=num_batch_dims)
# ------------------------------------------------------------------------------
# Mask-making utility functions.
# ------------------------------------------------------------------------------
def make_attention_mask(
query_input: Array,
key_input: Array,
pairwise_fn: Callable = jnp.multiply,
extra_batch_dims: int = 0,
dtype: DType = jnp.float32,
) -> Array:
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the
attention weights will be `[batch..., heads, len_q, len_kv]` and this
function will produce `[batch..., 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
extra_batch_dims: number of extra batch dims to add singleton axes for, none
by default
dtype: mask return dtype
Returns:
A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention.
"""
# [batch..., len_q, len_kv]
mask = pairwise_fn(
# [batch..., len_q] -> [batch..., len_q, 1]
jnp.expand_dims(query_input, axis=-1),
# [batch..., len_q] -> [batch..., 1, len_kv]
jnp.expand_dims(key_input, axis=-2),
)
# [batch..., 1, len_q, len_kv]. This creates the head dim.
mask = jnp.expand_dims(mask, axis=-3)
mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))
return mask.astype(dtype)
def make_causal_mask(
x: Array, extra_batch_dims: int = 0, dtype: DType = jnp.float32
) -> Array:
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights
will be `[batch..., heads, len, len]` and this function will produce a
causal mask of shape `[batch..., 1, len, len]`.
Note that a causal mask does not depend on the values of x; it only depends on
the shape. If x has padding elements, they will not be treated in a special
manner.
Args:
x: input array of shape `[batch..., len]`
extra_batch_dims: number of batch dims to add singleton axes for, none by
default
dtype: mask return dtype
Returns:
A `[batch..., 1, len, len]` shaped causal mask for 1d attention.
"""
idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)
return make_attention_mask(
idxs,
idxs,
jnp.greater_equal,
extra_batch_dims=extra_batch_dims,
dtype=dtype,
)
def combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):
"""Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: final mask dtype
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(
map(lambda x: x.ndim == masks[0].ndim, masks)
), f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}'
mask, *other_masks = masks
for other_mask in other_masks:
mask = jnp.logical_and(mask, other_mask)
return mask.astype(dtype)
def combine_biases(*masks: Optional[Array]):
"""Combine attention biases.
Args:
*masks: set of attention bias arguments to combine, some can be None.
Returns:
Combined mask, reduced by summation, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(
map(lambda x: x.ndim == masks[0].ndim, masks)
), f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}'
mask, *other_masks = masks
for other_mask in other_masks:
mask = mask + other_mask
return mask
def make_decoder_mask(
decoder_target_tokens: Array,
dtype: DType,
decoder_causal_attention: Optional[Array] = None,
decoder_segment_ids: Optional[Array] = None,
) -> Array:
"""Compute the self-attention mask for a decoder.
Decoder mask is formed by combining a causal mask, a padding mask and an
optional packing mask. If decoder_causal_attention is passed, it makes the
masking non-causal for positions that have value of 1.
A prefix LM is applied to a dataset which has a notion of "inputs" and
"targets", e.g., a machine translation task. The inputs and targets are
concatenated to form a new target. `decoder_target_tokens` is the concatenated
decoder output tokens.
The "inputs" portion of the concatenated sequence can attend to other "inputs"
tokens even for those at a later time steps. In order to control this
behavior, `decoder_causal_attention` is necessary. This is a binary mask with
a value of 1 indicating that the position belonged to "inputs" portion of the
original dataset.
Example:
Suppose we have a dataset with two examples.
ds = [{"inputs": [6, 7], "targets": [8]},
{"inputs": [3, 4], "targets": [5]}]
After the data preprocessing with packing, the two examples are packed into
one example with the following three fields (some fields are skipped for
simplicity).
decoder_target_tokens = [[6, 7, 8, 3, 4, 5, 0]]
decoder_segment_ids = [[1, 1, 1, 2, 2, 2, 0]]
decoder_causal_attention = [[1, 1, 0, 1, 1, 0, 0]]
where each array has [batch, length] shape with batch size being 1. Then,
this function computes the following mask.
mask = [[[[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]]
mask[b, 1, :, :] represents the mask for the example `b` in the batch.
Because mask is for a self-attention layer, the mask's shape is a square of
shape [query length, key length].
mask[b, 1, i, j] = 1 means that the query token at position i can attend to
the key token at position j.
Args:
decoder_target_tokens: decoder output tokens. [batch..., length]
dtype: dtype of the output mask.
decoder_causal_attention: a binary mask indicating which position should
only attend to earlier positions in the sequence. Others will attend
bidirectionally. [batch..., length]
decoder_segment_ids: decoder segmentation info for packed examples.
[batch..., length]
Returns:
the combined decoder mask.
"""
masks = []
# The same mask is applied to all attention heads. So the head dimension is 1,
# i.e., the mask will be broadcast along the heads dim.
# [batch..., 1, length, length]
causal_mask = make_causal_mask(decoder_target_tokens, dtype=dtype)
# Positions with value 1 in `decoder_causal_attention` can attend
# bidirectionally.
if decoder_causal_attention is not None:
# [batch..., 1, length, length]
inputs_mask = make_attention_mask(
decoder_causal_attention,
decoder_causal_attention,
jnp.logical_and,
dtype=dtype,
)
masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype))
else:
masks.append(causal_mask)
# Padding mask.
masks.append(
make_attention_mask(
decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=dtype
)
)
# Packing mask
if decoder_segment_ids is not None:
masks.append(
make_attention_mask(
decoder_segment_ids, decoder_segment_ids, jnp.equal, dtype=dtype
)
)
return combine_masks(*masks, dtype=dtype) # pytype: disable=bad-return-type # jax-ndarray
def validate_dense_attention_call_parameter_shapes(
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array],
bias: Optional[Array],
num_heads: Optional[int],
):
"""Validates the shapes of parameters to DenseAttention call methods."""
if inputs_q.ndim != inputs_kv.ndim:
raise ValueError(
'Mismatched inputs rank: expected '
f'inputs_q.ndim ({inputs_q.ndim}) == '
f'inputs_kv.ndim ({inputs_kv.ndim})'
)
if inputs_q.ndim < 3:
raise ValueError(f'Expected rank of inputs >= 3, was {inputs_q.ndim}')
if inputs_q.shape[:-3] != inputs_kv.shape[:-3]:
raise ValueError(
'Mismatched batch dims: expected '
f'inputs_q.shape[:-3] ({inputs_q.shape[:-3]}) == '
f'inputs_kv.shape[:-3] ({inputs_kv.shape[:-3]})'
)
if mask is not None:
if mask.ndim != inputs_q.ndim + 1:
raise ValueError(
'Mismatched ranks: expected '
f'mask.ndim ({mask.ndim}) to be one more than '
f'inputs_q.ndim ({inputs_q.ndim})'
)
if num_heads is not None:
if mask.shape[-3] not in (1, num_heads):
raise ValueError(
'Mismatched num_heads: expected '
f'mask.shape[-3] ({mask.shape[-3]}) == '
f'num_heads ({num_heads}), or 1'
)
else:
num_heads = mask.shape[-3]
if mask.shape[-2] not in (1, inputs_q.shape[-2]):
raise ValueError(
'Mismatched q_length: expected '
f'mask.shape[-2] ({mask.shape[-2]}) == '
f'inputs_q.shape[-2] ({inputs_q.shape[-2]}), or 1'
)
if mask.shape[-1] != inputs_kv.shape[-2]:
raise ValueError(
'Mismatched kv_length: expected '
f'mask.shape[-1] ({mask.shape[-1]}) == '
f'inputs_kv.shape[-2] ({inputs_kv.shape[-2]})'
)
if bias is not None:
if bias.ndim != inputs_q.ndim + 1:
raise ValueError(
'Mismatched ranks: expected '
f'bias.ndim ({bias.ndim}) to be one less than '
f'inputs_q.ndim ({inputs_q.ndim})'
)
if num_heads is not None:
if bias.shape[-3] not in (1, num_heads):
raise ValueError(
'Mismatched num_heads: expected '
f'bias.shape[-3] ({bias.shape[-3]}) == '
f'num_heads ({num_heads}), or 1'
)
else:
num_heads = bias.shape[-3]
if bias.shape[-2] != inputs_q.shape[-2]:
if inputs_q.shape[-2] != 1: # TODO: Remove this exception?
raise ValueError(
'Mismatched q_length: expected '
f'bias.shape[-2] ({bias.shape[-2]}) == '
f'inputs_q.shape[-2] ({inputs_q.shape[-2]})'
)
if bias.shape[-1] != inputs_kv.shape[-2]:
if inputs_kv.shape[-2] != 1: # TODO: Remove this exception?
raise ValueError(
'Mismatched kv_length: expected '
f'bias.shape[-1] ({bias.shape[-1]}) == '
f'inputs_kv.shape[-2] ({inputs_kv.shape[-2]})'
)
def shift_left(x, axis=-1):
"""Shift the input array to the left by one unit and pad by 0 to the right."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (0, 1)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0)
)
return jax.lax.slice_in_dim(padded, 1, x.shape[axis] + 1, axis=axis)
def get_decoder_logit_mask(decoder_input_tokens, dtype):
"""Gets a mask that zeros-out the padding tokens in the attention logits.
This function creates a mask that can be used to zero out the logits
corresponding to the padding tokens. Typically, a large negative number is
added to the attention logits so that those tokens are not attended. If the
magnitude of the attention logits of the non-padding tokens is on the same
order as the large negative number, the attention pattern gets affected and
results in an unexpected behavior. Empirically, this can happen for large
models and using this mask can help stabilizing the training.
In T5 models, the token id of 0 serves as the padding id as well as the BOS id
in the decoder. This requires a special handling of the BOS token ids.
Furthermore, the input sequences can be packed where each sequences.
TODO: Make T5 models use a separate BOS ID.
```
Example: Here booleans are represented as 1 and 0 for simplicity and the batch
dimension is skipped.
decoder_input_tokens = [0, 3, 9, 0, 4, 8, 0, 0]
---------------------------------------------------
decoder_input_tokens_0 = [1, 0, 0, 0, 0, 0, 0, 0]
shifted > 0 = [1, 1, 0, 1, 1, 0, 0, 0]
decoder_input_tokens > 0 = [0, 1, 1, 0, 1, 1, 0, 0]
---------------------------------------------------
logit_mask = [1, 1, 1, 1, 1, 1, 0, 0]
```
NB: this function assumes that the each sequence in the packed
`decoder_input_tokens` has exactly one 0-BOS token.
Args:
decoder_input_tokens: input_tokens to the decoder with one 0-BOS id per
sequence.
dtype: output (logit mask) data type.
Returns:
logit mask with zeros and ones with a shape [batch, length, 1]
"""
# We don't want to mask the initial shifted '0-BOS' logit of decoder inputs.
decoder_input_tokens_0 = (
jax.lax.broadcasted_iota(
jnp.int32, decoder_input_tokens.shape, decoder_input_tokens.ndim - 1
)
== 0
)
return jnp.expand_dims(
jnp.array(
(decoder_input_tokens > 0)
| (shift_left(decoder_input_tokens) > 0)
| decoder_input_tokens_0,
dtype=dtype,
),
axis=-1,
)
| 91,983 | 37.992794 | 105 | py |
flaxformer | flaxformer-main/flaxformer/components/attention/dense_attention_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention classes."""
import dataclasses
import functools
import itertools
from typing import Optional
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from aqt.jax_legacy.jax import quantization as aqt
from flax import linen as nn
from flax.core import freeze
from flax.core import unfreeze
from flax.linen import partitioning as flax_partitioning
import jax
from jax import dtypes
from jax import random
import jax.numpy as jnp
import numpy as np
from flaxformer import testing_utils
from flaxformer.components import dense
from flaxformer.components.attention import dense_attention
from flaxformer.types import Array
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
AxisMetadata = flax_partitioning.AxisMetadata
class SelfAttention(dense_attention.MultiHeadDotProductAttention):
"""Self-attention special case of multi-head dot-product attention."""
@nn.compact
def __call__(
self,
inputs_q: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
enable_dropout: bool = True,
):
return super().__call__(
inputs_q, inputs_q, mask, bias, enable_dropout=enable_dropout
)
@dataclasses.dataclass(frozen=True)
class SelfAttentionArgs:
num_heads: int = 1
batch_size: int = 2
qkv_features: int = 3
out_features: int = 4
q_len: int = 5
features: int = 6
broadcast_dropout: bool = True
dropout_rate: float = 0.1
enable_dropout: bool = True
use_bias: bool = True
rescale_logits: bool = True
decode: bool = False
float32_logits: bool = False
use_rotary_embedding: bool = False
def __post_init__(self):
# If we are doing decoding, the query length should be 1, because are doing
# autoregressive decoding where we feed one position at a time.
assert not self.decode or self.q_len == 1
def init_args(self):
return dict(
num_heads=self.num_heads,
qkv_features=self.qkv_features,
out_features=self.out_features,
broadcast_dropout=self.broadcast_dropout,
dropout_rate=self.dropout_rate,
use_bias=self.use_bias,
rescale_logits=self.rescale_logits,
float32_logits=self.float32_logits,
use_rotary_embedding=self.use_rotary_embedding,
)
def apply_args(self):
inputs_q = jnp.ones((self.batch_size, self.q_len, self.features))
mask = jnp.ones((self.batch_size, self.num_heads, self.q_len, self.q_len))
bias = jnp.ones((self.batch_size, self.num_heads, self.q_len, self.q_len))
return {
'inputs_q': inputs_q,
'mask': mask,
'bias': bias,
'enable_dropout': self.enable_dropout,
}
class AttentionTest(parameterized.TestCase):
def _mock_initializer(self, key, shape, dtype=jnp.float_, val=1.0): # pylint: disable=unused-argument
return jnp.ones(shape, dtypes.canonicalize_dtype(dtype)) * val
def test_dot_product_attention_shape(self):
# This test only checks for shape but tries to make sure all code paths are
# reached.
dropout_rng = random.PRNGKey(0)
batch_size, num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5, 6
query = jnp.ones((batch_size, q_len, num_heads, qk_depth))
key = jnp.ones((batch_size, kv_len, num_heads, qk_depth))
value = jnp.ones((batch_size, kv_len, num_heads, v_depth))
bias = jnp.ones((batch_size, num_heads, q_len, kv_len))
args = dict(
query=query,
key=key,
value=value,
bias=bias,
rescale_logits=True,
dropout_rng=dropout_rng,
dropout_rate=0.5,
enable_dropout=True,
)
output = dense_attention.dot_product_attention(
**args, broadcast_dropout=True
)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
# Make sure we also reach the code path where we don't broadcast dropout.
output = dense_attention.dot_product_attention(
**args, broadcast_dropout=False
)
self.assertEqual(output.shape, (batch_size, q_len, num_heads, v_depth))
def test_dot_product_attention_no_batch_dim(self):
num_heads, q_len, kv_len, qk_depth, v_depth = 1, 2, 3, 4, 5
query = jnp.ones((q_len, num_heads, qk_depth))
key = jnp.ones((kv_len, num_heads, qk_depth))
value = jnp.ones((kv_len, num_heads, v_depth))
output = dense_attention.dot_product_attention(query, key, value)
self.assertEqual(output.shape, (q_len, num_heads, v_depth))
def test_self_attention(self):
# We only test MultiHeadDotProductAttention through SelfAttention because
# we are only shape checking anyway.
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs()
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_cast_logits_float32(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(float32_logits=True)
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_no_rescale_logits(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(rescale_logits=False)
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_no_out_features(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(out_features=None)
model = SelfAttention(**args.init_args())
y, _ = model.init_with_output(rngs, **args.apply_args())
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.features))
def test_self_attention_no_masking(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs()
model = SelfAttention(**args.init_args())
apply_args = args.apply_args()
apply_args['mask'] = None
y, _ = model.init_with_output(rngs, **apply_args)
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_self_attention_with_decoding(self):
rngs = {'params': random.PRNGKey(0), 'dropout': random.PRNGKey(1)}
args = SelfAttentionArgs(decode=True, q_len=1)
model = SelfAttention(**args.init_args())
apply_args = args.apply_args()
apply_args['mask'] = None
apply_args['bias'] = None
params = model.init(rngs, **apply_args)
y, _ = model.apply(
params,
**apply_args,
mutable=['cache'],
rngs={'dropout': random.PRNGKey(2)},
)
self.assertEqual(y.shape, (args.batch_size, args.q_len, args.out_features))
def test_make_attention_mask_multiply_pairwise_fn(self):
decoder_target_tokens = jnp.array([[7, 0, 0], [8, 5, 0]])
attention_mask = dense_attention.make_attention_mask(
decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=jnp.int32
)
expected0 = jnp.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
expected1 = jnp.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]])
self.assertEqual(attention_mask.shape, (2, 1, 3, 3))
np.testing.assert_array_equal(attention_mask[0, 0], expected0)
np.testing.assert_array_equal(attention_mask[1, 0], expected1)
def test_make_attention_mask_equal_pairwise_fn(self):
segment_ids = jnp.array([[1, 1, 2, 2, 2, 0], [1, 1, 1, 2, 0, 0]])
attention_mask = dense_attention.make_attention_mask(
segment_ids, segment_ids, pairwise_fn=jnp.equal, dtype=jnp.int32
)
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected0 = jnp.array([
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1],
])
expected1 = jnp.array([
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
])
self.assertEqual(attention_mask.shape, (2, 1, 6, 6))
np.testing.assert_array_equal(attention_mask[0, 0], expected0)
np.testing.assert_array_equal(attention_mask[1, 0], expected1)
def test_make_causal_mask_with_padding(self):
x = jnp.array([[7, 0, 0], [8, 5, 0]])
y = dense_attention.make_causal_mask(x)
self.assertEqual(y.shape, (2, 1, 3, 3))
# Padding is not treated in a special way. So they need to be zeroed out
# separately.
expected_y = jnp.array(
[[[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]]], jnp.float32
)
np.testing.assert_allclose(y[0], expected_y)
np.testing.assert_allclose(y[1], expected_y)
def test_make_causal_mask_extra_batch_dims(self):
x = jnp.ones((3, 3, 5))
y = dense_attention.make_causal_mask(x, extra_batch_dims=2)
self.assertEqual(y.shape, (1, 1, 3, 3, 1, 5, 5))
def test_make_causal_mask(self):
x = jnp.ones((1, 3))
y = dense_attention.make_causal_mask(x)
self.assertEqual(y.shape, (1, 1, 3, 3))
expected_y = jnp.array(
[[[[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]]]], jnp.float32
)
np.testing.assert_allclose(y, expected_y)
def test_combine_masks(self):
masks = [
jnp.array([0, 1, 0, 1], jnp.float32),
None,
jnp.array([1, 1, 1, 1], jnp.float32),
jnp.array([1, 1, 1, 0], jnp.float32),
]
y = dense_attention.combine_masks(*masks)
np.testing.assert_allclose(y, jnp.array([0, 1, 0, 0], jnp.float32))
def test_combine_biases(self):
masks = [
jnp.array([0, 1, 0, 1], jnp.float32),
None,
jnp.array([0, 1, 1, 1], jnp.float32),
jnp.array([0, 1, 1, 0], jnp.float32),
]
y = dense_attention.combine_biases(*masks)
np.testing.assert_allclose(y, jnp.array([0, 3, 2, 2], jnp.float32))
def test_make_decoder_mask_lm_unpacked(self):
decoder_target_tokens = jnp.array([6, 7, 3, 0])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens, dtype=jnp.float32
)
expected_mask = jnp.array(
[[[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]]]
)
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_lm_packed(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 5, 0]])
decoder_segment_ids = jnp.array([[1, 1, 1, 2, 2, 0]])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_segment_ids=decoder_segment_ids,
)
expected_mask = jnp.array([[[
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_unpacked(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 3, 4, 0]])
decoder_causal_attention = jnp.array([[1, 1, 1, 0, 0, 0]])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
)
expected_mask = jnp.array(
[[[
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
]]],
dtype=jnp.float32,
)
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_packed(self):
decoder_target_tokens = jnp.array([[5, 6, 7, 8, 3, 4, 0]])
decoder_segment_ids = jnp.array([[1, 1, 1, 2, 2, 2, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 1, 1, 0, 0]])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids,
)
expected_mask = jnp.array([[[
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
]]])
np.testing.assert_array_equal(mask, expected_mask)
def test_make_decoder_mask_prefix_lm_unpacked_multiple_elements(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 0], [4, 5, 0, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0], [1, 0, 0, 0]])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
)
expected_mask0 = jnp.array(
[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]]
)
expected_mask1 = jnp.array(
[[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
)
self.assertEqual(mask.shape, (2, 1, 4, 4))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
np.testing.assert_array_equal(mask[1, 0], expected_mask1)
def test_make_decoder_mask_composite_causal_attention(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 8, 9, 0]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0, 1, 1, 0]])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
)
expected_mask0 = jnp.array([
[1, 1, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
])
self.assertEqual(mask.shape, (1, 1, 7, 7))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
def test_make_decoder_mask_composite_causal_attention_packed(self):
decoder_target_tokens = jnp.array([[6, 7, 3, 4, 8, 9, 2, 3, 4]])
decoder_segment_ids = jnp.array([[1, 1, 1, 1, 1, 1, 2, 2, 2]])
decoder_causal_attention = jnp.array([[1, 1, 0, 0, 1, 1, 1, 1, 0]])
mask = dense_attention.make_decoder_mask(
decoder_target_tokens=decoder_target_tokens,
dtype=jnp.float32,
decoder_causal_attention=decoder_causal_attention,
decoder_segment_ids=decoder_segment_ids,
)
expected_mask0 = jnp.array([
[1, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1],
])
self.assertEqual(mask.shape, (1, 1, 9, 9))
np.testing.assert_array_equal(mask[0, 0], expected_mask0)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel.reshape(f, -1)},
'key': {'kernel': key_kernel.reshape(f, -1)},
'value': {'kernel': value_kernel.reshape(f, -1)},
'out': {'kernel': out_kernel.reshape(-1, f)},
}
y = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv
)
query = np.einsum('bqf,fhd->bqhd', inputs_q, query_kernel)
key = np.einsum('bkf,fhd->bkhd', inputs_kv, key_kernel)
value = np.einsum('bkf,fhd->bkhd', inputs_kv, value_kernel)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = nn.softmax(logits, axis=-1)
combined_value = np.einsum('bhqk,bkhd->bqhd', weights, value)
y_expected = np.einsum('bqhd,hdf->bqf', combined_value, out_kernel)
np.testing.assert_allclose(y, y_expected, rtol=1e-5, atol=1e-5)
def test_multihead_dot_product_attention_prefill_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
prefill_lengths = np.array([3, 1])
base_args = SelfAttentionArgs(
num_heads=h, qkv_features=f, out_features=f, dropout_rate=0
)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, h, d, k)),
'cached_value': np.zeros((b, h, d, k)),
'cache_index': np.array([0, 0]),
}
inputs_q = np.random.randn(b, k, f)
inputs_kv = np.random.randn(b, k, f)
# Mock dense general such that q, k, v projections are replaced by simple
# reshaping.
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
return x.reshape(b, -1, h, d)
with mock.patch.object(
dense.DenseGeneral, '__call__', new=mock_dense_general
):
_, mutated = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=False,
prefill=True,
prefill_lengths=prefill_lengths,
mutable=['cache'],
)
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, h, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, h, d, k]
# Update the our gold cache with the key and values that are part of the
# prefix that we are prefilling the cache with. Explicit loops here avoid a
# confusing transpose.
for b, prefill_length in enumerate(prefill_lengths):
for i in range(prefill_length):
cache['cached_key'][b, :, :, i] = key[b, i, :, :]
cache['cached_value'][b, :, :, i] = value[b, i, :, :]
cache['cache_index'][b] = prefill_length
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
def test_multihead_dot_product_attention_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
base_args = SelfAttentionArgs(
num_heads=h, qkv_features=f, out_features=f, dropout_rate=0
)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, h, d, k)),
'cached_value': np.zeros((b, h, d, k)),
'cache_index': np.array(0),
}
inputs_q = np.random.randn(b, 1, f)
inputs_kv = np.random.randn(b, 1, f)
# Mock dense general such that q, k, v projections are replaced by simple
# reshaping.
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
return x.reshape(b, -1, h, d)
with mock.patch.object(
dense.DenseGeneral, '__call__', new=mock_dense_general
):
_, mutated = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=True,
mutable=['cache'],
)
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, h, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, h, d, k]
cache['cached_key'][:, :, :, 0] = key[:, 0, :, :]
cache['cached_value'][:, :, :, 0] = value[:, 0, :, :]
cache['cache_index'] = np.array(1)
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
def test_dot_product_attention(self):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, h, d)
value = np.random.randn(b, k, h, d)
bias = np.random.randn(b, h, q, k)
attn_out = dense_attention.dot_product_attention(
query, key, value, bias=bias
)
logits = np.einsum('bqhd,bkhd->bhqk', query, key)
weights = jax.nn.softmax(logits + bias, axis=-1)
expected = np.einsum('bhqk,bkhd->bqhd', weights, value)
np.testing.assert_allclose(attn_out, expected, atol=1e-6)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multiquery_dot_product_attention(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the query kernel has to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, d)
value_kernel = np.random.randn(f, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel.reshape(f, -1)},
'key': {'kernel': key_kernel},
'value': {'kernel': value_kernel},
'out': {'kernel': out_kernel.reshape(-1, f)},
}
y = dense_attention.MultiQueryDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv
)
query = np.einsum('bqf,fhd->bqhd', inputs_q, query_kernel)
key = np.einsum('bkf,fd->bkd', inputs_kv, key_kernel)
value = np.einsum('bkf,fd->bkd', inputs_kv, value_kernel)
logits = np.einsum('bqhd,bkd->bhqk', query, key)
weights = nn.softmax(logits, axis=-1)
combined_value = np.einsum('bhqk,bkd->bqhd', weights, value)
y_expected = np.einsum('bqhd,hdf->bqf', combined_value, out_kernel)
np.testing.assert_allclose(y, y_expected, atol=2e-4, rtol=1e-4)
@parameterized.named_parameters([
dict(
testcase_name='multi_head',
attn_class=dense_attention.MultiHeadDotProductAttention,
),
dict(
testcase_name='multi_query',
attn_class=dense_attention.MultiQueryDotProductAttention,
),
])
def test_attention_prefill_logits_match_forward(self, attn_class):
"""Make sure values during a cache prefill match values from training."""
# b: batch, k: kv_len, h: num_head, d: head_dim t: sequence length
b, h, d, t = 2, 3, 5, 6
ls = np.array([6, 4]).astype(np.int32)
f = h * d
base_args = SelfAttentionArgs(
num_heads=h, qkv_features=f, out_features=f, dropout_rate=0
)
args = base_args.init_args()
inputs_q = np.random.randn(b, t, f).astype(np.float32)
inputs_kv = np.random.randn(b, t, f).astype(np.float32)
bias = np.random.randn(1, h, t, t).astype(np.float32)
mask = dense_attention.make_decoder_mask(
(np.arange(t) < np.reshape(ls, (-1, 1))).astype(inputs_q.dtype),
dtype=inputs_q.dtype,
).astype(np.float32)
attn = attn_class(**args)
params = attn.init(jax.random.PRNGKey(0), inputs_q, inputs_kv, mask, bias)[
'params'
]
# Calculate logits as done during training, no caching or anything.
logits = attn.apply(
{'params': params},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
enable_dropout=False,
decode=False,
prefill=False,
)
# Initialize the cache.
_, variables_with_cache = attn.apply(
{'params': params},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
decode=True,
prefill=False,
mutable=['cache'],
)
cache = variables_with_cache['cache']
# Calculate the logits returned during the cache prefill step. Actions
# taken to facilitate caching should not effect the output.
prefill_logits, _ = attn.apply(
{'params': params, 'cache': cache},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
enable_dropout=False,
decode=False,
prefill=True,
prefill_lengths=ls,
mutable=['cache'],
)
np.testing.assert_allclose(
prefill_logits, logits, err_msg='logits do not match.'
)
@parameterized.named_parameters([
dict(
testcase_name='multi_head',
attn_class=dense_attention.MultiHeadDotProductAttention,
),
dict(
testcase_name='multi_query',
attn_class=dense_attention.MultiQueryDotProductAttention,
),
dict(
testcase_name='one_head',
attn_class=dense_attention.MultiHeadDotProductAttention,
num_heads=1,
),
])
def test_rotary_embedding_attention(self, attn_class, num_heads=3):
"""Makes sure enabling rotary embeddings works."""
# b: batch, k: kv_len, h: num_head, d: head_dim t: sequence length
b, h, d, t = 2, num_heads, 4, 8
ls = np.array([6, 4]).astype(np.int32)
f = h * d
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
use_rotary_embedding=True,
)
args = base_args.init_args()
inputs_q = np.random.randn(b, t, f).astype(np.float32)
inputs_kv = np.random.randn(b, t, f).astype(np.float32)
bias = np.random.randn(1, h, t, t).astype(np.float32)
mask = dense_attention.make_decoder_mask(
(np.arange(t) < np.reshape(ls, (-1, 1))).astype(inputs_q.dtype),
dtype=inputs_q.dtype,
).astype(np.float32)
attn = attn_class(**args)
params = attn.init(jax.random.PRNGKey(0), inputs_q, inputs_kv, mask, bias)[
'params'
]
# Calculate logits as done during training, no caching or anything.
logits = attn.apply(
{'params': params},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
enable_dropout=False,
decode=False,
prefill=False,
)
self.assertEqual(logits.shape, (b, t, f))
@parameterized.named_parameters([
dict(
testcase_name='multi_head_causal',
attn_class=dense_attention.MultiHeadDotProductAttention,
causal=True,
),
dict(
testcase_name='multi_query_causal',
attn_class=dense_attention.MultiQueryDotProductAttention,
causal=True,
),
dict(
testcase_name='multi_head',
attn_class=dense_attention.MultiHeadDotProductAttention,
causal=False,
),
dict(
testcase_name='multi_query',
attn_class=dense_attention.MultiQueryDotProductAttention,
causal=False,
),
])
def test_final_prefill_logits_match_first_decode(self, attn_class, causal):
"""Check logits of final input position matches in prefill and decode.
The position of the final input token is a special case where the input to
the model is the last input token but the output is the logits for the first
output token. During decoding, we need to use these logits for the first
outputs to select the next token to feed into the model. This means we
cannot pre-cache this final position, it needs to be calculated as the first
step in decode model.
However, when using a prefix-LM with full visibility within the inputs, we
also need to include this position in calculation of the rest of the tokens.
This test validates that this final position is considered during prefilling
and is calculated with the same attention mask by checking the value of the
logits. During prefilling, this position should have full visibility to all
previous tokens (either via bidirectional attention in the input or by
virtue of being the last token with a causal mask). During decoding, it will
also have full visibility via the causal mask. Therefore, the logits for
this position that is output from the prefill call should match the
(re-computation of this position) in decode mode.
Args:
attn_class: The class for the attention type we are testing.
causal: Whether the input tokens have causal masking or bidirectional
attention.
"""
with jax.default_matmul_precision('float32'):
# b: batch, k: kv_len, h: num_head, d: head_dim t: sequence length
b, h, d, t = 2, 3, 5, 8
lengths = np.array([6, 4]).astype(np.int32)
f = h * d
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
float32_logits=True,
)
args = base_args.init_args()
inputs_q = np.random.randn(b, t, f).astype(np.float32)
inputs_kv = np.random.randn(b, t, f).astype(np.float32)
bias = np.random.randn(1, h, t, t).astype(np.float32)
# For this test we need the final token (at our prefill length) to be
# considered in the attention like it will when it is the first decode
# token.
valid_tokens = (np.arange(t) <= np.reshape(lengths, (-1, 1))).astype(
inputs_q.dtype
)
last_valid = np.take_along_axis(
valid_tokens, np.expand_dims(lengths, axis=-1), axis=1
)
assert np.all(last_valid == np.ones((2, 1), dtype=last_valid.dtype))
mask = dense_attention.make_decoder_mask(
valid_tokens,
# Use bidirectional attention in the input.
decoder_causal_attention=None if causal else valid_tokens,
dtype=inputs_q.dtype,
)
attn = attn_class(**args, precision='float32')
params = attn.init(
jax.random.PRNGKey(0), inputs_q, inputs_kv, mask, bias
)['params']
# Initialize the cache
_, variables_with_cache = attn.apply(
{'params': params},
inputs_q,
inputs_kv,
decode=True,
prefill=False,
mutable=['cache'],
)
cache = variables_with_cache['cache']
# Prefill the cache and select the logits from the position of the final
# input token.
prefilled_logits, vars_with_new_cache = attn.apply(
{
'params': params,
'cache': cache,
},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
enable_dropout=False,
decode=False,
prefill=True,
prefill_lengths=lengths,
mutable=['cache'],
)
prefilled_cache = vars_with_new_cache['cache']
lengths_index = jnp.reshape(lengths, (-1, 1, 1))
final_prefilled_logits = jnp.take_along_axis(
prefilled_logits, lengths_index, axis=1
)
# Do a single decode step, with the final input token as input.
decode_logits, _ = attn.apply(
{'params': params, 'cache': prefilled_cache},
jnp.take_along_axis(inputs_q, lengths_index, axis=1),
jnp.take_along_axis(inputs_kv, lengths_index, axis=1),
mask=None,
bias=bias,
enable_dropout=False,
decode=True,
prefill=False,
mutable=['cache'],
)
np.testing.assert_allclose(
decode_logits, final_prefilled_logits, atol=1e-6
)
@parameterized.named_parameters([
dict(
testcase_name='multi_head',
attn_class=dense_attention.MultiHeadDotProductAttention,
),
dict(
testcase_name='multi_query',
attn_class=dense_attention.MultiQueryDotProductAttention,
),
])
def test_attention_causal_prefill_and_decode_match_decode(self, attn_class):
"""Make sure causal prefill->decode is the same as just decode."""
with jax.default_matmul_precision('float32'):
# b: batch, k: kv_len, h: num_head, d: head_dim t: sequence length
b, h, d, t = 2, 3, 5, 8
ls = np.array([6, 4]).astype(np.int32)
f = h * d
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
float32_logits=True,
)
args = base_args.init_args()
inputs_q = np.random.randn(b, t, f).astype(np.float32)
inputs_kv = np.random.randn(b, t, f).astype(np.float32)
bias = np.random.randn(1, h, t, t).astype(np.float32)
mask = dense_attention.make_decoder_mask(
(np.arange(t) < np.reshape(ls, (-1, 1))).astype(inputs_q.dtype),
dtype=inputs_q.dtype,
).astype(np.float32)
attn = attn_class(**args, precision='float32')
params = attn.init(
jax.random.PRNGKey(0), inputs_q, inputs_kv, mask, bias
)['params']
# Pure Decoding
# Initialize the cache
_, variables_with_cache = attn.apply(
{'params': params},
inputs_q,
inputs_kv,
decode=True,
prefill=False,
mutable=['cache'],
)
decoded_cache = variables_with_cache['cache']
# Run decoding for each input element.
decoded_logits = []
for i in range(t):
logits, vars_with_new_cache = attn.apply(
{'params': params, 'cache': decoded_cache},
inputs_q[:, i, np.newaxis],
inputs_kv[:, i, np.newaxis],
mask=None,
bias=bias,
enable_dropout=False,
decode=True,
prefill=False,
mutable=['cache'],
)
decoded_logits.append(logits)
decoded_cache = vars_with_new_cache['cache']
decoded_logits = jnp.concatenate(decoded_logits, axis=1)
# Prefilled Cache
# Initialize the cache
_, variables_with_cache = attn.apply(
{'params': params},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
decode=True,
prefill=False,
mutable=['cache'],
)
prefilled_cache = variables_with_cache['cache']
# Prefill the cache with values calculated via causal attention.
prefilled_logits, vars_with_new_cache = attn.apply(
{'params': params, 'cache': prefilled_cache},
inputs_q,
inputs_kv,
mask=mask,
bias=bias,
enable_dropout=False,
decode=False,
prefill=True,
prefill_lengths=ls,
mutable=['cache'],
)
prefilled_cache = vars_with_new_cache['cache']
# Run decoding, starting from where we finished prefilling.
prefilled_decode_logits = []
# The prefill step has two different lengths, so for the shorter one to
# reach the max number of steps we need the longer one to do some extra
# work which will be discarded. Here we pad out the input so that while we
# are running real decode steps on the shorter sequence, the longer one
# will have values to consume.
decode_steps = t - np.min(ls)
padding = decode_steps + np.max(ls) - t
padding = np.zeros((b, padding, f), dtype=inputs_q.dtype)
padded_inputs_q = np.concatenate([inputs_q, padding], axis=1)
padded_inputs_kv = np.concatenate([inputs_kv, padding], axis=1)
# Run decoding steps.
for i in range(decode_steps):
idx = np.reshape(ls + i, (-1, 1, 1))
logits, vars_with_new_cache = attn.apply(
{'params': params, 'cache': prefilled_cache},
# Select the next element based on our cache index + the number of
# decode steps taken.
np.take_along_axis(padded_inputs_q, idx, axis=1),
np.take_along_axis(padded_inputs_kv, idx, axis=1),
mask=None,
bias=bias,
enable_dropout=False,
decode=True,
prefill=False,
mutable=['cache'],
)
prefilled_cache = vars_with_new_cache['cache']
prefilled_decode_logits.append(logits)
prefilled_decode_logits = np.concatenate(prefilled_decode_logits, axis=1)
prefilled_logits = np.array(prefilled_logits)
# Copy the decode step logits into the original logits array, while
# making sure to discard any of the busy work steps.
for i, l in enumerate(ls):
prefilled_logits[i, l:] = prefilled_decode_logits[i, : t - l]
prefilled_logits[i, l:] = prefilled_decode_logits[i, : t - l]
# `DenseGeneral`, used in the attention class to project q, k, and v, can
# have some comparatively large difference when running on a slice with
# a sequence length of 1 vs a the full q, k, or v. As such, our
# comparisons need to have larger tolerances than normal.
# Check caches match
np.testing.assert_allclose(
prefilled_cache['cached_key'],
decoded_cache['cached_key'],
atol=1e-6,
err_msg='cached keys do not match',
)
np.testing.assert_allclose(
prefilled_cache['cached_value'],
decoded_cache['cached_value'],
atol=1e-6,
err_msg='cached values do not match',
)
# Check outputs match
np.testing.assert_allclose(
prefilled_logits,
decoded_logits,
atol=1e-6,
err_msg='logits do not match',
)
def test_multiquery_dot_product_attention_prefill_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
prefill_lengths = np.array([3, 1])
base_args = SelfAttentionArgs(
num_heads=h, qkv_features=f, out_features=f, dropout_rate=0
)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, d, k)),
'cached_value': np.zeros((b, d, k)),
'cache_index': np.array([0, 0]),
}
inputs_q = np.random.randn(b, k, f)
inputs_kv = np.random.randn(b, k, f)
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
# For q, replace the projection with simple reshaping.
if x is inputs_q:
return x.reshape(b, -1, h, d)
# For k and v, the feature dim is sliced to mimic down-projection.
elif x is inputs_kv:
return x[:, :, :d]
with mock.patch.object(
dense.DenseGeneral, '__call__', new=mock_dense_general
):
_, mutated = dense_attention.MultiQueryDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=False,
prefill=True,
prefill_lengths=prefill_lengths,
mutable=['cache'],
)
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, h, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, h, d, k]
# Update the our gold cache with the key and values that are part of the
# prefix that we are prefilling the cache with. Explicit loops here avoid a
# confusing transpose.
for b, prefill_length in enumerate(prefill_lengths):
for i in range(prefill_length):
cache['cached_key'][b, :, i] = key[b, i, :]
cache['cached_value'][b, :, i] = value[b, i, :]
cache['cache_index'][b] = prefill_length
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
def test_multiquery_dot_product_attention_caching(self):
# b: batch, f: qkv_features, k: kv_len, h: num_head, d: head_dim
b, h, d, k = 2, 3, 4, 5
f = h * d
base_args = SelfAttentionArgs(
num_heads=h, qkv_features=f, out_features=f, dropout_rate=0
)
args = base_args.init_args()
cache = {
'cached_key': np.zeros((b, d, k)),
'cached_value': np.zeros((b, d, k)),
'cache_index': np.array(0),
}
inputs_q = np.random.randn(b, 1, f)
inputs_kv = np.random.randn(b, 1, f)
def mock_dense_general(self, x, **kwargs): # pylint: disable=unused-argument
# For q, replace the projection with simple reshaping.
if x is inputs_q:
return x.reshape(b, -1, h, d)
# For k and v, the feature dim is sliced to mimic down-projection.
elif x is inputs_kv:
return x[:, :, :d]
with mock.patch.object(
dense.DenseGeneral, '__call__', new=mock_dense_general
):
_, mutated = dense_attention.MultiQueryDotProductAttention(**args).apply(
{'cache': freeze(cache)},
inputs_q,
inputs_kv,
decode=True,
mutable=['cache'],
)
updated_cache = mutated['cache']
# Perform the same mocked projection to generate the expected cache.
# (key|value): [b, 1, d]
key = mock_dense_general(None, inputs_kv)
value = mock_dense_general(None, inputs_kv)
# cached_(key|value): [b, d, k]
cache['cached_key'][:, :, 0] = key[:, 0, :]
cache['cached_value'][:, :, 0] = value[:, 0, :]
cache['cache_index'] = np.array(1)
for name, array in cache.items():
np.testing.assert_allclose(array, updated_cache[name])
def test_dot_product_attention_multiquery(self):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
np.random.seed(0)
query = np.random.randn(b, q, h, d)
key = np.random.randn(b, k, d)
value = np.random.randn(b, k, d)
bias = np.random.randn(b, h, q, k)
attn_out = dense_attention.dot_product_attention_multiquery(
query, key, value, bias=bias
)
logits = np.einsum('bqhd,bkd->bhqk', query, key)
weights = jax.nn.softmax(logits + bias, axis=-1)
expected_attn_out = np.einsum('bhqk,bkd->bqhd', weights, value)
np.testing.assert_allclose(attn_out, expected_attn_out, atol=1e-6)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention_split_head(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel.reshape(f, -1)},
'key': {'kernel': key_kernel.reshape(f, -1)},
'value': {'kernel': value_kernel.reshape(f, -1)},
'out': {'kernel': out_kernel.reshape(-1, f)},
}
y = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv
)
params = {
'query': {'kernel': query_kernel},
'key': {'kernel': key_kernel},
'value': {'kernel': value_kernel},
'out': {'kernel': out_kernel},
}
args_split_head_kernel = dict(args)
args_split_head_kernel['split_head_kernel'] = True
y_split_head_kernel = dense_attention.MultiHeadDotProductAttention(
**args_split_head_kernel
).apply({'params': freeze(params)}, inputs_q, inputs_kv)
np.testing.assert_allclose(y, y_split_head_kernel, rtol=1e-5, atol=1e-5)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention_fuse_kernels_kv(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d, k = 2, 3, 4, 5, 6
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
args['split_head_kernel'] = True
args['rescale_logits'] = True
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
inputs_kv = np.random.randn(b, k, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel},
'key': {'kernel': key_kernel},
'value': {'kernel': value_kernel},
'out': {'kernel': out_kernel},
}
y = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_kv
)
fused_kernel = np.stack([key_kernel, value_kernel], axis=1)
params = {
'query': {'kernel': query_kernel},
'kv_fused': {'kernel': fused_kernel},
'out': {'kernel': out_kernel},
}
args_fused_kernels = dict(args)
args_fused_kernels['kernels_to_fuse'] = 'kv'
y_fused_kernels = dense_attention.MultiHeadDotProductAttention(
**args_fused_kernels
).apply({'params': freeze(params)}, inputs_q, inputs_kv)
np.testing.assert_allclose(y, y_fused_kernels, rtol=1e-5, atol=1e-5)
@parameterized.parameters({'f': 20}, {'f': 22})
def test_multihead_dot_product_attention_fuse_kernels_qkv(self, f):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
b, q, h, d = 2, 3, 4, 5
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
args['split_head_kernel'] = True
args['rescale_logits'] = True
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.random.randn(f, h, d)
key_kernel = np.random.randn(f, h, d)
value_kernel = np.random.randn(f, h, d)
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.random.randn(h, d, f)
params = {
'query': {'kernel': query_kernel},
'key': {'kernel': key_kernel},
'value': {'kernel': value_kernel},
'out': {'kernel': out_kernel},
}
y = dense_attention.MultiHeadDotProductAttention(**args).apply(
{'params': freeze(params)}, inputs_q, inputs_q
)
fused_kernel = np.stack([query_kernel, key_kernel, value_kernel], axis=1)
params = {
'qkv_fused': {'kernel': fused_kernel},
'out': {'kernel': out_kernel},
}
args_fused_kernels = dict(args)
args_fused_kernels['kernels_to_fuse'] = 'qkv'
y_fused_kernels = dense_attention.MultiHeadDotProductAttention(
**args_fused_kernels
).apply({'params': freeze(params)}, inputs_q, inputs_q)
np.testing.assert_allclose(y, y_fused_kernels, rtol=1e-5, atol=1e-5)
@parameterized.named_parameters([
('no_fuse_kernel_none', None, False, False, False),
('no_fuse_kernel_qkv', None, True, False, False),
('no_fuse_kernel_qkv_kv', None, True, True, False),
('no_fuse_kernel_qkv_kv_q', None, True, True, True),
('qkv_fuse_kernel_none', 'qkv', False, False, False),
('qkv_fuse_kernel_qkv', 'qkv', True, False, False),
('qkv_fuse_kernel_qkv_kv', 'qkv', True, True, False),
('qkv_fuse_kernel_qkv_kv_q', 'qkv', True, True, True),
('kv_fuse_kernel_none', 'kv', False, False, False),
('kv_fuse_kernel_qkv', 'kv', True, False, False),
('kv_fuse_kernel_qkv_kv', 'kv', True, True, False),
('kv_fuse_kernel_qkv_kv_q', 'kv', True, True, True),
])
def test_multihead_dot_product_attention_kernel_kernel_init(
self,
fused_kernels,
set_qkv_kernel_init,
set_kv_kernel_init,
set_q_kernel_init,
):
# b: batch, f: qkv_features, q: q_len, k: kv_len, h: num_head, d: head_dim
f = 20
b, q, h, d = 2, 3, 4, 5
base_args = SelfAttentionArgs(
num_heads=h,
qkv_features=f,
out_features=f,
dropout_rate=0,
rescale_logits=False,
use_bias=False,
)
args = base_args.init_args()
args['split_head_kernel'] = True
args['rescale_logits'] = True
args['kernel_init'] = functools.partial(self._mock_initializer, val=1.0)
if fused_kernels:
args['kernels_to_fuse'] = fused_kernels
if set_qkv_kernel_init:
args['qkv_kernel_init'] = functools.partial(
self._mock_initializer, val=2.0
)
if set_kv_kernel_init:
args['kv_kernel_init'] = functools.partial(
self._mock_initializer, val=3.0
)
if set_q_kernel_init:
args['q_kernel_init'] = functools.partial(self._mock_initializer, val=4.0)
if f != h * d:
args['head_dim'] = d
np.random.seed(0)
inputs_q = np.random.randn(b, q, f)
params = dense_attention.MultiHeadDotProductAttention(**args).init(
random.PRNGKey(0), inputs_q, inputs_q, enable_dropout=False
)
# Construct expected param
# Projection: [b, q, f] -> [b, q, h, d]
# So the kernels have to be [f, h, d]
query_kernel = np.ones((f, h, d))
key_kernel = np.ones((f, h, d))
value_kernel = np.ones((f, h, d))
# `out` calculation: [b, q, h, d] -> [b, q, f]
# So kernel has to be [h, d, f]
out_kernel = np.ones((h, d, f))
if fused_kernels is None:
if set_q_kernel_init:
query_kernel = np.ones((f, h, d)) * 4.0
expected_params = {
'query': {'kernel': query_kernel.tolist()},
'key': {'kernel': key_kernel.tolist()},
'value': {'kernel': value_kernel.tolist()},
'out': {'kernel': out_kernel.tolist()},
}
elif fused_kernels == 'qkv':
if set_qkv_kernel_init:
query_kernel = np.ones((f, h, d)) * 2.0
key_kernel = np.ones((f, h, d)) * 2.0
value_kernel = np.ones((f, h, d)) * 2.0
fused_kernel = np.stack([query_kernel, key_kernel, value_kernel], axis=1)
expected_params = {
'qkv_fused': {'kernel': fused_kernel.tolist()},
'out': {'kernel': out_kernel.tolist()},
}
elif fused_kernels == 'kv':
if set_kv_kernel_init:
key_kernel = np.ones((f, h, d)) * 3.0
value_kernel = np.ones((f, h, d)) * 3.0
if set_q_kernel_init:
query_kernel = np.ones((f, h, d)) * 4.0
kv_fused_kernel = np.stack([key_kernel, value_kernel], axis=1)
expected_params = {
'kv_fused': {'kernel': kv_fused_kernel.tolist()},
'query': {'kernel': query_kernel.tolist()},
'out': {'kernel': out_kernel.tolist()},
}
self.assertDictEqual(
jax.tree_map(lambda a: a.tolist(), unfreeze(params['params'])),
expected_params,
)
def test_decoder_logits_mask_unpacked(self):
# [batch, length]
decoder_input_tokens = jnp.array(
[[0, 3, 9, 4, 1, 0, 0], [0, 8, 5, 3, 1, 0, 0]]
)
expected = jnp.array(
[[1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 0]], dtype=jnp.float32
)
# [batch, length, 1]
expected = jnp.expand_dims(expected, axis=-1)
logit_mask = dense_attention.get_decoder_logit_mask(
decoder_input_tokens, jnp.float32
)
self.assertEqual(logit_mask.dtype, jnp.float32)
np.testing.assert_array_equal(logit_mask, expected)
def test_decoder_logits_mask_packed(self):
# Two sequences packed together for each batch elements.
# [batch, length]
decoder_input_tokens = jnp.array(
[[0, 3, 9, 0, 4, 8, 0, 0], [0, 8, 5, 8, 0, 9, 0, 0]]
)
expected = jnp.array(
[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0]], dtype=jnp.float32
)
# [batch, length, 1]
expected = jnp.expand_dims(expected, axis=-1)
logit_mask = dense_attention.get_decoder_logit_mask(
decoder_input_tokens, jnp.float32
)
self.assertEqual(logit_mask.dtype, jnp.float32)
np.testing.assert_array_equal(logit_mask, expected)
class LocalAttentionLayerTest(parameterized.TestCase):
@parameterized.parameters(
itertools.product(
[True, False],
[True, False],
[True, False],
)
)
def test_shapes(
self,
always_attend_to_first_position,
first_position_attends_to_all,
output_projection,
):
"""Checks the local attention layer's shapes are correct."""
num_heads = 2
head_dim = 5
out_features = 11
model = dense_attention.LocalAttentionLayer(
dense_attention.MultiHeadDotProductAttention(
num_heads=num_heads,
head_dim=head_dim,
use_bias=True,
dropout_rate=0.0,
output_projection=output_projection,
out_features=out_features if output_projection else None,
),
q_chunk_width=4,
q_chunk_stride=4,
kv_chunk_width=6,
kv_chunk_stride=6,
always_attend_to_first_position=always_attend_to_first_position,
first_position_attends_to_all=first_position_attends_to_all,
)
batch_size = 3
q_len = 8
q_features = 7
kv_len = 12
kv_features = 9
inputs_q = np.ones([batch_size, q_len, q_features], dtype=np.float32)
inputs_kv = np.ones([batch_size, kv_len, kv_features], dtype=np.float32)
mask = np.ones([batch_size, 1, q_len, kv_len], dtype=np.int32)
bias = np.ones([batch_size, 1, q_len, kv_len], dtype=np.int32)
key = random.PRNGKey(0)
outputs, _ = model.init_with_output(key, inputs_q, inputs_kv, mask, bias)
if output_projection:
self.assertSequenceEqual(outputs.shape, (batch_size, q_len, out_features))
else:
self.assertSequenceEqual(
outputs.shape, (batch_size, q_len, num_heads, head_dim)
)
class QuantizedAttentionTest(parameterized.TestCase):
def test_quantization_no_params_specified(self):
module = dense_attention.MultiQueryDotProductAttention(
num_heads=2,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
use_bias=True,
use_aqt=True,
)
inputs_q = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32,
)
inputs_kv = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32,
)
with self.assertRaisesRegex(
ValueError, 'If use_aqt is True, either of weights or acts quantization'
):
module.init(random.PRNGKey(0), inputs_q, inputs_kv, enable_dropout=False)
def test_multiquery_dot_product_attention_quantized_weights(self):
module = dense_attention.MultiQueryDotProductAttention(
num_heads=2,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
use_bias=True,
use_aqt=True,
weight_params=aqt.QuantOps.WeightParams(
prec=8, half_shift=False, axis=None
),
)
inputs_q = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32,
)
inputs_kv = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32,
)
expected_params = {
'params': {
'query': {
'kernel': jnp.array(
[
[[0.89760804], [-0.7743368]],
[[-0.27043915], [-0.09338999]],
],
dtype=jnp.float32,
),
'bias': jnp.array(
[3.8685133e-07, -5.7897455e-07], dtype=jnp.float32
),
},
'key': {
'kernel': jnp.array(
[[-1.2404252], [0.6276205]], dtype=jnp.float32
),
'bias': jnp.array([9.180263e-07], dtype=jnp.float32),
},
'value': {
'kernel': jnp.array(
[[-0.8634736], [-0.9621272]], dtype=jnp.float32
),
'bias': jnp.array([8.859404e-07], dtype=jnp.float32),
},
'out': {
'kernel': jnp.array(
[[0.8359484, 0.9604499], [-1.0830641, 1.0543139]],
dtype=jnp.float32,
),
'bias': jnp.array(
[-9.7886084e-07, 1.3396599e-06], dtype=jnp.float32
),
},
},
'params_axes': {
'query': {
'kernel_axes': AxisMetadata(names=('embed', 'heads', 'kv')),
'bias_axes': AxisMetadata(names=('kv',)),
},
'key': {
'kernel_axes': AxisMetadata(names=('embed', 'kv')),
'bias_axes': AxisMetadata(names=('kv',)),
},
'value': {
'kernel_axes': AxisMetadata(names=('embed', 'kv')),
'bias_axes': AxisMetadata(names=('kv',)),
},
'out': {
'kernel_axes': AxisMetadata(names=('joined_kv', 'embed')),
'bias_axes': AxisMetadata(names=('embed',)),
},
},
}
result, params = module.init_with_output(
random.PRNGKey(0), inputs_q, inputs_kv, enable_dropout=False
)
jax.tree_map(
functools.partial(np.testing.assert_allclose, rtol=1e-6),
unfreeze(params),
expected_params,
)
np.testing.assert_allclose(
result.tolist(),
[
[
[0.3442336916923523, -4.3061041831970215],
[0.3442336916923523, -4.3061041831970215],
[0.36651411652565, -4.258667469024658],
],
[
[0.807983934879303, -7.265725612640381],
[0.799161970615387, -7.264179706573486],
[0.807983934879303, -7.265725612640381],
],
],
rtol=1e-6,
)
def test_multiquery_dot_product_attention_materialized_weights(self):
weight_params = aqt.QuantOps.WeightParams(
prec=8, half_shift=False, axis=None
)
module = dense_attention.MultiQueryDotProductAttention(
num_heads=2,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
dtype=jnp.float32,
use_bias=True,
use_aqt=True,
weight_params=weight_params,
possibly_use_quantized_vars=True,
)
# enable_dropout
inputs_q = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32,
)
inputs_kv = np.array(
[
# Batch 1.
[[1, 1], [1, 1], [1, 2]],
# Batch 2.
[[2, 2], [3, 1], [2, 2]],
],
dtype=np.float32,
)
result, params = module.init_with_output(
random.PRNGKey(0), inputs_q, inputs_kv, enable_dropout=False
)
expected_params = {
'params': {
'query': {
'qkernel': jnp.array([[[0], [0]], [[0], [0]]], dtype=jnp.int8),
'qscale': jnp.array(
[[[3.8685133e-07], [-5.7897455e-07]]], dtype=jnp.float32
),
'bias': jnp.array(
[1.1104368e-06, 2.4920448e-06], dtype=jnp.float32
),
},
'key': {
'qkernel': jnp.array([[0], [0]], dtype=jnp.int8),
'qscale': jnp.array([[9.180263e-07]], dtype=jnp.float32),
'bias': jnp.array([5.054643e-07], dtype=jnp.float32),
},
'value': {
'qkernel': jnp.array([[0], [0]], dtype=jnp.int8),
'qscale': jnp.array([[8.859404e-07]], dtype=jnp.float32),
'bias': jnp.array([4.5408714e-07], dtype=jnp.float32),
},
'out': {
'qkernel': jnp.array([[0, 0], [0, 0]], dtype=jnp.int8),
'qscale': jnp.array(
[[-9.7886084e-07, 1.3396599e-06]], dtype=jnp.float32
),
'bias': jnp.array(
[-3.5336794e-07, -3.4736888e-07], dtype=jnp.float32
),
},
},
'params_axes': {
'query': {
'qkernel_axes': AxisMetadata(names=('embed', 'heads', 'kv')),
'qscale_axes': AxisMetadata(
names=('embed_qscale', 'heads', 'kv')
),
'bias_axes': AxisMetadata(names=('kv',)),
},
'key': {
'qkernel_axes': AxisMetadata(names=('embed', 'kv')),
'qscale_axes': AxisMetadata(names=('embed_qscale', 'kv')),
'bias_axes': AxisMetadata(names=('kv',)),
},
'value': {
'qkernel_axes': AxisMetadata(names=('embed', 'kv')),
'qscale_axes': AxisMetadata(names=('embed_qscale', 'kv')),
'bias_axes': AxisMetadata(names=('kv',)),
},
'out': {
'qkernel_axes': AxisMetadata(names=('joined_kv', 'embed')),
'qscale_axes': AxisMetadata(
names=('joined_kv_qscale', 'embed')
),
'bias_axes': AxisMetadata(names=('embed',)),
},
},
}
jax.tree_map(
functools.partial(np.testing.assert_allclose, rtol=1e-6),
unfreeze(params),
expected_params,
)
self.assertDictEqual(
testing_utils.param_dtypes_shapes_axes(
params['params'], params['params_axes']
),
{
'key': {
'bias': ['float32', 'kv=1'],
'qkernel': ['int8', 'embed=2', 'kv=1'],
'qscale': ['float32', 'embed_qscale=1', 'kv=1'],
},
'out': {
'bias': ['float32', 'embed=2'],
'qkernel': ['int8', 'joined_kv=2', 'embed=2'],
'qscale': ['float32', 'joined_kv_qscale=1', 'embed=2'],
},
'query': {
'bias': ['float32', 'kv=2'],
'qkernel': ['int8', 'embed=2', 'heads=2', 'kv=1'],
'qscale': ['float32', 'embed_qscale=1', 'heads=2', 'kv=1'],
},
'value': {
'bias': ['float32', 'kv=1'],
'qkernel': ['int8', 'embed=2', 'kv=1'],
'qscale': ['float32', 'embed_qscale=1', 'kv=1'],
},
},
)
np.testing.assert_allclose(
result.tolist(),
[
[
[-3.5336794e-07, -3.4736888e-07],
[-3.5336794e-07, -3.4736888e-07],
[-3.5336794e-07, -3.4736888e-07],
],
[
[-3.5336794e-07, -3.4736888e-07],
[-3.5336794e-07, -3.4736888e-07],
[-3.5336794e-07, -3.4736888e-07],
],
],
rtol=1e-6,
)
if __name__ == '__main__':
absltest.main()
| 65,324 | 33.970557 | 104 | py |
flaxformer | flaxformer-main/flaxformer/components/attention/attention_benchmarks.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for attention mechanisms."""
import functools
import itertools
import timeit
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.random
import jax.sharding
from jax.sharding import PartitionSpec as P
from flaxformer.components.attention import dense_attention
from flaxformer.components.attention import memory_efficient_attention
Array = jax.Array
class AttentionBenchmark(parameterized.TestCase):
"""Test harness for attention mechanism benchmarks."""
def time_and_hbm(self, query, key, value, runner):
"""Returns the runtime and total HBM usage for an attention mechanism."""
num_trials = 2
runner = jax.jit(runner)
# We compile first, to warm up the JIT compile cache.
compiled = runner.lower(query, key, value).compile()
duration_in_seconds = timeit.timeit(
lambda: runner(query, key, value).block_until_ready(), number=num_trials
)
memory_analysis = compiled.memory_analysis()
total_hbm_use_in_bytes = (
memory_analysis.argument_size_in_bytes
+ memory_analysis.output_size_in_bytes
- memory_analysis.alias_size_in_bytes
+ memory_analysis.temp_size_in_bytes
)
return duration_in_seconds, total_hbm_use_in_bytes
def show_deltas(
self,
query,
key,
value,
baseline_name,
baseline_runner,
experiment_runner,
experiment_name,
config_name,
):
"""Prints runtime and HBM use deltas between two attention mechanisms."""
baseline_seconds, baseline_bytes = self.time_and_hbm(
query, key, value, baseline_runner
)
experiment_seconds, experiment_bytes = self.time_and_hbm(
query, key, value, experiment_runner
)
print(f"{baseline_name} {config_name} wall time: {baseline_seconds:.2f}s")
print(
f"{experiment_name} {config_name} wall time: {experiment_seconds:.2f}s"
)
print(
f"{baseline_name} {config_name} HBM:"
f" {(baseline_bytes / (1024**3)):.2f}GB"
)
print(
f"{experiment_name} {config_name} HBM use:"
f" {(experiment_bytes / (1024**3)):.2}GB"
)
def test_performance_multihead(self):
"""Benchmarks multi-head attention."""
batch_size = 8
num_queries = 2**13
num_kvs = 2**13
num_heads = 16
head_dim = 96
prng_key = jax.random.PRNGKey(0xFEDE)
mesh = jax.sharding.Mesh(jax.devices(), ("model",))
sharding = functools.partial(jax.sharding.NamedSharding, mesh)
query = jax.device_put(
jax.random.normal(
prng_key, (batch_size, num_queries, num_heads, head_dim)
),
sharding(P(None, None, "model", None)),
)
key = jax.device_put(
jax.random.normal(prng_key, (batch_size, num_kvs, num_heads, head_dim)),
sharding(P(None, None, "model", None)),
)
value = jax.device_put(
jax.random.normal(prng_key, (batch_size, num_kvs, num_heads, head_dim)),
sharding(P(None, None, "model", None)),
)
def run_memory_efficient(query, key, value):
return memory_efficient_attention.dot_product_attention_multihead(
query,
key,
value,
float32_logits=True,
query_chunk_size=1024,
key_chunk_size=1024,
)
def run_baseline(query, key, value):
return dense_attention.dot_product_attention(
query,
key,
value,
float32_logits=True,
)
self.show_deltas(
query,
key,
value,
"Baseline",
run_baseline,
run_memory_efficient,
"Memory efficient",
"multihead",
)
def test_performance_multiquery(self):
"""Benchmarks multi-query attention."""
batch_size = 4
num_queries = 2**13
num_kvs = 2**13
num_heads = 16
head_dim = 96
prng_key = jax.random.PRNGKey(0xFEDE)
mesh = jax.sharding.Mesh(jax.devices(), ("model",))
sharding = functools.partial(jax.sharding.NamedSharding, mesh)
query = jax.device_put(
jax.random.normal(
prng_key, (batch_size, num_queries, num_heads, head_dim)
),
sharding(P(None, None, "model", None)),
)
key = jax.device_put(
jax.random.normal(prng_key, (batch_size, num_kvs, head_dim)),
sharding(P(None, None, None)),
)
value = jax.device_put(
jax.random.normal(prng_key, (batch_size, num_kvs, head_dim)),
sharding(P(None, None, None)),
)
def run_memory_efficient(query, key, value):
return memory_efficient_attention.dot_product_attention_multiquery(
query,
key,
value,
float32_logits=True,
query_chunk_size=1024,
key_chunk_size=1024,
)
def run_baseline(query, key, value):
return dense_attention.dot_product_attention_multiquery(
query,
key,
value,
float32_logits=True,
)
self.show_deltas(
query,
key,
value,
"Baseline",
run_baseline,
run_memory_efficient,
"Memory efficient",
"multiquery",
)
self.show_deltas(
query,
key,
value,
"Baseline",
run_baseline,
run_memory_efficient,
"Memory-efficient",
"multiquery",
)
@parameterized.parameters(
list(
itertools.product(
(2**13, 2**14, 2**15, 2**16), (16, 32, 64), (64, 96)
)
)
)
def test_length_scaling_multiquery(self, input_length, num_heads, head_dim):
"""Benchmarks a range of configurations for peak HBM use and wall time."""
batch_size = 4
num_queries = input_length
num_kvs = input_length
prng_key = jax.random.PRNGKey(0xFEDE)
mesh = jax.sharding.Mesh(jax.devices(), ("model",))
sharding = functools.partial(jax.sharding.NamedSharding, mesh)
query = jax.device_put(
jax.random.normal(
prng_key, (batch_size, num_queries, num_heads, head_dim)
),
sharding(P(None, None, "model", None)),
)
key = jax.device_put(
jax.random.normal(prng_key, (batch_size, num_kvs, head_dim)),
sharding(P(None, None, None)),
)
value = jax.device_put(
jax.random.normal(prng_key, (batch_size, num_kvs, head_dim)),
sharding(P(None, None, None)),
)
def run_memory_efficient(query, key, value):
return memory_efficient_attention.dot_product_attention_multiquery(
query,
key,
value,
float32_logits=True,
query_chunk_size=1024,
key_chunk_size=1024,
)
run_seconds, run_bytes = self.time_and_hbm(
query, key, value, run_memory_efficient
)
print(f"{input_length=}, {num_heads=}, {head_dim=}")
print(f"Multiquery wall time: {run_seconds:.2f}s")
print(f"Multiquery HBM: {(run_bytes / (1024**3)):.2f}GB")
if __name__ == "__main__":
absltest.main()
| 7,573 | 27.473684 | 80 | py |
flaxformer | flaxformer-main/flaxformer/t5x/checkpointing_util.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for enabling parameter remapping in T5X via Gin configuration."""
from typing import Any, Callable, Dict, Mapping, Sequence, Tuple
from absl import logging
from flax import linen as nn
from flax import traverse_util
from flax.core import frozen_dict
from t5x import checkpoints
from flaxformer.architectures.common import param_remapping
# frozen_dict.unfreeze is incorrectly typed, so introduce an alias.
def _unfreeze(x: Mapping[str, Any]) -> Dict[str, Any]:
return frozen_dict.unfreeze(x) # pytype: disable=wrong-arg-types
def _flattened_names(state_dict: Mapping[str, Any]) -> Sequence[str]:
return [
'/'.join(k)
for k in traverse_util.flatten_dict(state_dict, keep_empty_nodes=True)
]
def _apply_remap_fn(remap_fn: Callable[[Mapping[str, Any]], Mapping[str, Any]],
state_dict: Mapping[str, Any]) -> Mapping[str, Any]:
result = _unfreeze(state_dict)
result['state']['param_states'] = remap_fn(result['state']['param_states'])
result['target'] = remap_fn(result['target'])
return result
def make_to_save_format_fn(
module: nn.Module) -> checkpoints.SaveStateTransformationFn:
"""Returns a t5x on-save state transformation function.
Args:
module: A Flax module inheriting from param_remapping.ParamRemappable.
"""
if not isinstance(module, param_remapping.ParameterRemappable):
raise ValueError('Expected `module` to be a `ParameterRemappable`, but was '
f'{type(module)}')
def remap(params: Mapping[str, Any]) -> Mapping[str, Any]:
return module.apply({}, params, method=module.to_save_format)
def to_save_format(
state_dict: checkpoints.PyTreeDef,
parameter_infos: checkpoints.PyTreeDef,
) -> Tuple[checkpoints.PyTreeDef, checkpoints.PyTreeDef]:
for name in _flattened_names(state_dict):
logging.info('to_save_format input state_dict: %s', name)
for name in _flattened_names(parameter_infos):
logging.info('to_save_format input parameter_infos: %s', name)
result_state_dict = _apply_remap_fn(remap, state_dict)
result_parameter_infos = _apply_remap_fn(remap, parameter_infos)
flat_parameter_infos = traverse_util.flatten_dict(
result_parameter_infos, keep_empty_nodes=True)
result_parameter_infos = traverse_util.unflatten_dict({
k: (v if k[-1] != param_remapping.VERSION_KEY else None)
for k, v in flat_parameter_infos.items()
})
for name in _flattened_names(result_state_dict):
logging.info('to_save_format output state_dict: %s', name)
for name in _flattened_names(result_parameter_infos):
logging.info('to_save_format output parameter_infos: %s', name)
return result_state_dict, result_parameter_infos
return to_save_format
def make_from_save_format_fn(
module: nn.Module) -> checkpoints.RestoreStateTransformationFn:
"""Returns a t5x on-restore state transformation function.
Args:
module: A Flax module inheriting from param_remapping.ParamRemappable.
"""
if not isinstance(module, param_remapping.ParameterRemappable):
raise ValueError('Expected `module` to be a `ParameterRemappable`, but was '
f'{type(module)}')
def remap(params: Mapping[str, Any]) -> Mapping[str, Any]:
return module.apply({}, params, method=module.from_save_format)
def from_save_format(state_dict: checkpoints.PyTreeDef,
target_state_dict: checkpoints.PyTreeDef,
*,
is_resuming: bool = False) -> checkpoints.PyTreeDef:
del target_state_dict # Unused.
del is_resuming # Unused.
for name in _flattened_names(state_dict):
logging.info('from_save_format input state_dict: %s', name)
result_state_dict = _apply_remap_fn(remap, state_dict)
for name in _flattened_names(result_state_dict):
logging.info('from_save_format output state_dict: %s', name)
return result_state_dict
return from_save_format
| 4,551 | 36.619835 | 80 | py |
flaxformer | flaxformer-main/flaxformer/t5x/configs/h_transformer/gin_configs_encoder_decoder_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for encoder_decoder gin configs in this directory."""
# "Unused" imports below are needed by gin configs.
# pylint: disable=unused-import
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import gin
from jax import numpy as jnp
from jax import random
import numpy as np
from t5x import models as t5x_models
class GinConfigsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(GinConfigsTest, cls).setUpClass()
cls.root = os.path.join(
absltest.get_default_test_srcdir(),
'flaxformer/t5x/configs/h_transformer')
gin.add_config_file_search_path(cls.root)
def setUp(self):
super().setUp()
gin.clear_config()
@parameterized.named_parameters(
dict(
testcase_name='1d_encoder_decoder_base',
filename='h_transformer_1d_encoder_decoder_base.gin'),
dict(
testcase_name='1d_encoder_decoder_small',
filename='h_transformer_1d_encoder_decoder_small.gin'),
dict(
testcase_name='1d_encoder_decoder_large',
filename='h_transformer_1d_encoder_decoder_large.gin'),
)
def test_model_gin_config(self, filename):
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (e.g. `t5x.models.EncoderDecoderModel`).
model: t5x_models.BaseModel = model_config_ref.scoped_configurable_fn()
encoder_input_tokens = jnp.array([[1, 2, 1, 0], [1, 3, 0, 0]])
decoder_input_tokens = jnp.array([[1, 2, 0, 0], [4, 5, 0, 0]])
decoder_target_tokens = jnp.array([[1, 2, 0, 0], [4, 5, 0, 0]])
decoder_loss_weights = jnp.array([[1, 1, 0, 0], [1, 1, 0, 0]])
variables = model.module.init(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False)
output = model.module.apply({'params': variables['params']},
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False)
del output # Unused.
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
res = model.score_batch(variables['params'], batch)
del res # Unused.
def test_architecture_gin_config(self):
filename = 'h_transformer_1d_encoder_decoder.gin'
path = os.path.join(self.root, 'architectures', filename)
gin.parse_config_file(path)
gin.parse_config("""
NUM_HEADS = 2
NUM_DECODER_LAYERS = 2
NUM_ENCODER_LAYERS = 2
EMBED_DIM = 8
MLP_DIM = 8
NUM_EMBEDDINGS = 128
""")
gin.finalize() # Check for required values, etc.
arch_config_ref: gin.ConfigurableReference = gin.query_parameter(
'%ARCHITECTURE')
# Instantiate architecture.
arch: nn.Module = arch_config_ref.scoped_configurable_fn()
shape = [4, 8]
encoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_target_tokens = np.ones(shape, dtype=np.int32)
output, variables = arch.init_with_output(
random.PRNGKey(0),
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False)
del output # Unused.
# Call with expected arrays (e.g. Call `__call__` with concrete sequences).
_ = arch.apply(
variables,
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False)
if __name__ == '__main__':
absltest.main()
| 4,830 | 33.755396 | 79 | py |
flaxformer | flaxformer-main/flaxformer/t5x/configs/h_transformer/gin_configs_decoder_only_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gin configs in this directory."""
# "Unused" imports below are needed by gin configs.
# pylint: disable=unused-import
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import gin
from jax import numpy as jnp
from jax import random
import numpy as np
from t5x import models as t5x_models
class GinConfigsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(GinConfigsTest, cls).setUpClass()
cls.root = os.path.join(
absltest.get_default_test_srcdir(),
'flaxformer/t5x/configs/h_transformer')
gin.add_config_file_search_path(cls.root)
def setUp(self):
super().setUp()
gin.clear_config()
@parameterized.named_parameters(
dict(
testcase_name='1d_decoder_only_base',
filename='h_transformer_1d_decoder_only_base.gin'),
dict(
testcase_name='1d_decoder_only_small',
filename='h_transformer_1d_decoder_only_small.gin'),
dict(
testcase_name='1d_decoder_only_large',
filename='h_transformer_1d_decoder_only_large.gin'),
)
def test_model_gin_config(self, filename):
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (e.g. `t5x.models.DecoderOnlyModel`).
model: t5x_models.BaseModel = model_config_ref.scoped_configurable_fn()
input_tokens = jnp.array([[1, 2, 1, 0], [1, 3, 0, 0]])
input_padding_mask = jnp.array([[1, 1, 1, 0], [1, 1, 0, 0]])
variables = model.module.init(
random.PRNGKey(0),
inputs=input_tokens,
inputs_mask=input_padding_mask,
enable_dropout=False)
output = model.module.apply({'params': variables['params']},
inputs=input_tokens,
inputs_mask=input_padding_mask,
enable_dropout=False)
del output # Unused.
batch = {
'decoder_input_tokens': input_tokens,
'decoder_target_tokens': input_tokens,
'decoder_loss_weights': input_padding_mask
}
res = model.score_batch(variables['params'], batch)
del res # Unused.
def test_architecture_gin_config(self):
filename = 'h_transformer_1d_decoder_only.gin'
path = os.path.join(self.root, 'architectures', filename)
gin.parse_config_file(path)
gin.parse_config("""
NUM_HEADS = 2
NUM_DECODER_LAYERS = 2
NUM_LAYERS = 2
HEAD_DIM = 4
EMBED_DIM = 8
MLP_DIM = 8
NUM_EMBEDDINGS = 128
""")
gin.finalize() # Check for required values, etc.
arch_config_ref: gin.ConfigurableReference = gin.query_parameter(
'%ARCHITECTURE')
# Instantiate architecture.
arch: nn.Module = arch_config_ref.scoped_configurable_fn()
shape = [4, 8]
input_tokens = np.ones(shape, dtype=np.int32)
output, variables = arch.init_with_output(
random.PRNGKey(0), inputs=input_tokens, enable_dropout=False)
del output # Unused.
# Call with expected arrays (e.g. Call `__call__` with concrete sequences).
_ = arch.apply(variables, inputs=input_tokens)
if __name__ == '__main__':
absltest.main()
| 3,949 | 30.854839 | 79 | py |
flaxformer | flaxformer-main/flaxformer/t5x/configs/moe/gin_configs_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mixture of Experts gin configs."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import gin
from jax import numpy as jnp
from jax import random
import numpy as np
from t5x import models as t5x_models
# "Unused" imports below are needed by gin configs.
# pylint: disable=unused-import
from t5x import utils
from t5x.contrib.moe import adafactor_utils
from t5x.contrib.moe import models
# pylint: enable=unused-import
class GinConfigsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(GinConfigsTest, cls).setUpClass()
cls.root = os.path.join(
absltest.get_default_test_srcdir(),
'flaxformer/t5x/configs/moe')
gin.add_config_file_search_path(cls.root)
def setUp(self):
super().setUp()
gin.clear_config()
@parameterized.parameters(
'experts_choose_small.gin',
'experts_choose_tiny.gin',
'tokens_choose_small.gin',
'tokens_choose_tiny.gin',
)
def test_encoder_decoder_model_gin_config(self, filename):
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.parse_config("""
NUM_EXPERTS = 2
NUM_MODEL_PARTITIONS = 1
""")
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (`t5x.contrib.moe.models.MoeEncoderDecoderModel`).
model: t5x_models.BaseModel = model_config_ref.scoped_configurable_fn()
encoder_input_tokens = jnp.ones((2, 4))
# For this test, decoder input and target tokens are fake values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_loss_weights = jnp.array([[1, 1, 1, 0], [0, 1, 0, 1]])
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
variables = model.module.init(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
_ = model.module.apply({'params': variables['params']},
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
_ = model.score_batch(variables['params'], batch)
def test_architecture_gin_config(self):
path = os.path.join(self.root, 'architectures', 'moe.gin')
gin.parse_config_file(path)
gin.parse_config("""
NUM_HEADS = 2
NUM_ENCODER_LAYERS = 2
NUM_DECODER_LAYERS = 2
NUM_ENCODER_SPARSE_LAYERS = 1
NUM_DECODER_SPARSE_LAYERS = 1
HEAD_DIM = 4
EMBED_DIM = 8
MLP_DIM = 8
NUM_EMBEDDINGS = 128
NUM_EXPERTS = 2
NUM_MODEL_PARTITIONS = 1
""")
gin.finalize() # Check for required values, etc.
arch_config_ref: gin.ConfigurableReference = gin.query_parameter(
'%ARCHITECTURE')
# Instantiate architecture.
arch: nn.Module = arch_config_ref.scoped_configurable_fn()
shape = [4, 8]
encoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_target_tokens = np.ones(shape, dtype=np.int32)
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
_, variables = arch.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
decode=False,
max_decode_length=None,
**encoder_kwargs)
# Call with expected arrays (e.g. Call `__call__` with concrete sequences).
_ = arch.apply(
variables,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
**encoder_kwargs)
if __name__ == '__main__':
absltest.main()
| 4,893 | 31.845638 | 79 | py |
flaxformer | flaxformer-main/flaxformer/t5x/configs/calm/gin_configs_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for CALM gin configs."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import gin
from jax import numpy as jnp
from jax import random
import numpy as np
# "Unused" imports below are needed by gin configs.
# pylint: disable=unused-import
from t5x import utils
from t5x.contrib.calm import decoding as calm_decoding
from t5x.contrib.calm import models as calm_models
from flaxformer.architectures.calm_t5 import calm_architecture
# pylint: enable=unused-import
class GinConfigsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(GinConfigsTest, cls).setUpClass()
cls.root = os.path.join(
absltest.get_default_test_srcdir(),
'flaxformer/t5x/configs/calm')
gin.add_config_file_search_path(cls.root)
def setUp(self):
super().setUp()
gin.clear_config()
@parameterized.parameters(
'calm_t5_1_1_base.gin',
'calm_t5_1_1_small.gin',
)
def test_encoder_decoder_model_gin_config(self, filename):
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.parse_config("""
FIRST_EXIT = 0
EXIT_INTERVAL = 1
LOSS_AGGR_WEIGHT = 1
APPLY_EARLY_INFER = False
RETURN_ALL_LOGITS = False
""")
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (`t5x.contrib.calm.models.EncoderDecoderModel`).
model: calm_models.BaseModel = model_config_ref.scoped_configurable_fn()
encoder_input_tokens = jnp.ones((2, 4))
# For this test, decoder input and target tokens are fake values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_loss_weights = jnp.array([[1, 1, 1, 0], [0, 1, 0, 1]])
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
variables = model.module.init(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
_ = model.module.apply({'params': variables['params']},
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
_ = model.score_batch(variables['params'], batch)
def test_architecture_gin_config(self):
path = os.path.join(self.root, 'architectures',
'calm_t5_1_1_flaxformer.gin')
gin.parse_config_file(path)
gin.parse_config("""
NUM_ENCODER_LAYERS = 2
NUM_DECODER_LAYERS = 2
NUM_HEADS = 2
HEAD_DIM = 4
EMBED_DIM = 8
MLP_DIM = 8
NUM_EMBEDDINGS = 128
RETURN_ALL_LOGITS = False
FIRST_EXIT = 0
EXIT_INTERVAL = 1
""")
gin.finalize() # Check for required values, etc.
arch_config_ref: gin.ConfigurableReference = gin.query_parameter(
'%ARCHITECTURE')
# Instantiate architecture.
arch: nn.Module = arch_config_ref.scoped_configurable_fn()
shape = [4, 8]
encoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_target_tokens = np.ones(shape, dtype=np.int32)
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
_, variables = arch.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
decode=False,
max_decode_length=None,
**encoder_kwargs)
# Call with expected arrays (e.g. Call `__call__` with concrete sequences).
_ = arch.apply(
variables,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
**encoder_kwargs)
if __name__ == '__main__':
absltest.main()
| 4,936 | 32.134228 | 79 | py |
flaxformer | flaxformer-main/flaxformer/t5x/configs/t5/gin_configs_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gin configs in this directory."""
# "Unused" imports below are needed by gin configs.
# pylint: disable=unused-import
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import gin
from jax import numpy as jnp
from jax import random
import numpy as np
from t5x import models as t5x_models
from t5x import utils
class GinConfigsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(GinConfigsTest, cls).setUpClass()
cls.root = os.path.join(
absltest.get_default_test_srcdir(),
'flaxformer/t5x/configs/t5')
gin.add_config_file_search_path(cls.root)
def setUp(self):
super().setUp()
gin.clear_config()
@parameterized.parameters(
'byt5_small.gin',
'mt5_small.gin',
't5_1_1_small.gin',
't5_small.gin',
)
def test_model_gin_config(self, filename):
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (e.g. `t5x.models.EncoderDecoderModel`).
model: t5x_models.BaseModel = model_config_ref.scoped_configurable_fn()
encoder_input_tokens = jnp.ones((2, 3))
# For this test, decoder input and target tokens are fake values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_loss_weights = jnp.array([[1, 1, 1, 0], [0, 1, 0, 1]])
if 'lamda' in filename:
encoder_kwargs = {}
else:
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
variables = model.module.init(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
output = model.module.apply({'params': variables['params']},
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
del output # Unused.
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
res = model.score_batch(variables['params'], batch)
del res # Unused.
@parameterized.parameters('t5_1_1_flaxformer.gin', 't5_flaxformer.gin')
def test_architecture_gin_config(self, filename):
path = os.path.join(self.root, 'architectures', filename)
gin.parse_config_file(path)
gin.parse_config("""
NUM_HEADS = 2
NUM_ENCODER_LAYERS = 2
NUM_DECODER_LAYERS = 2
NUM_LAYERS = 2
HEAD_DIM = 4
EMBED_DIM = 8
MLP_DIM = 8
NUM_EMBEDDINGS = 128
""")
gin.finalize() # Check for required values, etc.
arch_config_ref: gin.ConfigurableReference = gin.query_parameter(
'%ARCHITECTURE')
# Instantiate architecture.
arch: nn.Module = arch_config_ref.scoped_configurable_fn()
shape = [4, 8]
encoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_target_tokens = np.ones(shape, dtype=np.int32)
if 'lamda' in filename:
encoder_kwargs = {}
else:
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
output, variables = arch.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
decode=False,
max_decode_length=None,
**encoder_kwargs)
del output # Unused.
# Call with expected arrays (e.g. Call `__call__` with concrete sequences).
_ = arch.apply(
variables,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
**encoder_kwargs)
if __name__ == '__main__':
absltest.main()
| 4,841 | 31.496644 | 79 | py |
flaxformer | flaxformer-main/flaxformer/t5x/configs/longt5/gin_configs_test.py | # Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gin configs in this directory."""
# "Unused" imports below are needed by gin configs.
# pylint: disable=unused-import
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import gin
import jax
from jax import numpy as jnp
from jax import random
import numpy as np
from t5x import models as t5x_models
from t5x import utils
class GinConfigsTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(GinConfigsTest, cls).setUpClass()
cls.root = os.path.join(
absltest.get_default_test_srcdir(),
'flaxformer/t5x/configs/longt5')
gin.add_config_file_search_path(cls.root)
def setUp(self):
super().setUp()
gin.clear_config()
@parameterized.parameters(
'longt5_1_1_base.gin',
'longt5_1_1_transient_global_base.gin',
)
def test_model_gin_config(self, filename):
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (e.g. `t5x.models.EncoderDecoderModel`).
model: t5x_models.BaseModel = model_config_ref.scoped_configurable_fn()
encoder_input_tokens = jnp.ones((2, 3))
# For this test, decoder input and target tokens are fake values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_loss_weights = jnp.array([[1, 1, 1, 0], [0, 1, 0, 1]])
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
variables = model.module.init(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
output = model.module.apply({'params': variables['params']},
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
del output # Unused.
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
res = model.score_batch(variables['params'], batch)
del res # Unused.
@parameterized.parameters(
'longt5_1_1_large.gin',
'longt5_1_1_xl.gin',
'longt5_1_1_xxl.gin',
'longt5_1_1_transient_global_large.gin',
'longt5_1_1_transient_global_xl.gin',
'longt5_1_1_transient_global_xxl.gin',
)
def test_model_gin_config_symbolically(self, filename):
# For the large model sizes we just test shapes symbolically to avoid
# excessive resource usage.
path = os.path.join(self.root, 'models', filename)
gin.parse_config_file(path)
gin.finalize() # Check for required values, etc.
model_config_ref: gin.ConfigurableReference = gin.query_parameter('%MODEL')
# Instantiate T5X model (e.g. `t5x.models.EncoderDecoderModel`).
model: t5x_models.BaseModel = model_config_ref.scoped_configurable_fn()
encoder_input_tokens = jnp.ones((2, 3))
# For this test, decoder input and target tokens are fake values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
def init_and_apply_model(encoder_input_tokens, decoder_input_tokens,
decoder_target_tokens):
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
variables = model.module.init(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
return model.module.apply({'params': variables['params']},
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
**encoder_kwargs)
result = jax.eval_shape(
init_and_apply_model,
encoder_input_tokens=encoder_input_tokens,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens)
self.assertLen(result.shape, 3)
np.testing.assert_array_equal([2, 4], result.shape[:2])
@parameterized.parameters('longt5_1_1_flaxformer.gin',
'longt5_1_1_transient_global_flaxformer.gin')
def test_architecture_gin_config(self, filename):
path = os.path.join(self.root, 'architectures', filename)
gin.parse_config_file(path)
gin.parse_config("""
NUM_HEADS = 2
NUM_ENCODER_LAYERS = 2
NUM_DECODER_LAYERS = 2
NUM_LAYERS = 2
HEAD_DIM = 4
EMBED_DIM = 8
MLP_DIM = 8
NUM_EMBEDDINGS = 128
""")
gin.finalize() # Check for required values, etc.
arch_config_ref: gin.ConfigurableReference = gin.query_parameter(
'%ARCHITECTURE')
# Instantiate architecture.
arch: nn.Module = arch_config_ref.scoped_configurable_fn()
shape = [4, 8]
encoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_input_tokens = np.ones(shape, dtype=np.int32)
decoder_target_tokens = np.ones(shape, dtype=np.int32)
encoder_kwargs = {'encoder_input_tokens': encoder_input_tokens}
output, variables = arch.init_with_output(
random.PRNGKey(0),
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
enable_dropout=False,
decode=False,
max_decode_length=None,
**encoder_kwargs)
del output # Unused.
# Call with expected arrays (e.g. Call `__call__` with concrete sequences).
_ = arch.apply(
variables,
decoder_input_tokens=decoder_input_tokens,
decoder_target_tokens=decoder_target_tokens,
**encoder_kwargs)
if __name__ == '__main__':
absltest.main()
| 6,865 | 34.210256 | 79 | py |
pytket | pytket-main/examples/python/pytket-qujax-classification.py | from jax import numpy as jnp, random, vmap, value_and_grad, jit
from pytket import Circuit
from pytket.circuit.display import render_circuit_jupyter
from pytket.extensions.qujax import tk_to_qujax
import qujax
import matplotlib.pyplot as plt
# # Define the classification task
# We'll try and learn a _donut_ binary classification function (i.e. a bivariate coordinate is labelled 1 if it is inside the donut and 0 if it is outside)
inner_rad = 0.25
outer_rad = 0.75
def classification_function(x, y):
r = jnp.sqrt(x**2 + y**2)
return jnp.where((r > inner_rad) * (r < outer_rad), 1, 0)
linsp = jnp.linspace(-1, 1, 1000)
Z = vmap(lambda x: vmap(lambda y: classification_function(x, y))(linsp))(linsp)
plt.contourf(linsp, linsp, Z, cmap="Purples")
# Now let's generate some data for our quantum circuit to learn from
n_data = 1000
x = random.uniform(random.PRNGKey(0), shape=(n_data, 2), minval=-1, maxval=1)
y = classification_function(x[:, 0], x[:, 1])
plt.scatter(x[:, 0], x[:, 1], alpha=jnp.where(y, 1, 0.2), s=10)
# # Quantum circuit time
# We'll use a variant of data re-uploading [Pérez-Salinas et al](https://doi.org/10.22331/q-2020-02-06-226) to encode the input data, alongside some variational parameters within a quantum circuit classifier
n_qubits = 3
depth = 5
c = Circuit(n_qubits)
for layer in range(depth):
for qi in range(n_qubits):
c.Rz(0.0, qi)
c.Ry(0.0, qi)
c.Rz(0.0, qi)
if layer < (depth - 1):
for qi in range(layer, layer + n_qubits - 1, 2):
c.CZ(qi % n_qubits, (qi + 1) % n_qubits)
c.add_barrier(range(n_qubits))
render_circuit_jupyter(c)
# We can use `pytket-qujax` to generate our angles-to-statetensor function.
angles_to_st = tk_to_qujax(c)
# We'll parameterise each angle as
# $$ \theta_k = b_k + w_k * x_k $$
# where $b_k, w_k$ are variational parameters to be learnt and $x_k = x_0$ if $k$ even, $x_k = x_1$ if $k$ odd for a single bivariate input point $(x_0, x_1)$.
n_angles = 3 * n_qubits * depth
n_params = 2 * n_angles
def param_and_x_to_angles(param, x_single):
biases = param[:n_angles]
weights = param[n_angles:]
weights_times_data = jnp.where(
jnp.arange(n_angles) % 2 == 0, weights * x_single[0], weights * x_single[1]
)
angles = biases + weights_times_data
return angles
param_and_x_to_st = lambda param, x_single: angles_to_st(
param_and_x_to_angles(param, x_single)
)
# We'll measure the first qubit only (if its 1 we label _donut_, if its 0 we label _not donut_)
def param_and_x_to_probability(param, x_single):
st = param_and_x_to_st(param, x_single)
all_probs = jnp.square(jnp.abs(st))
first_qubit_probs = jnp.sum(all_probs, axis=range(1, n_qubits))
return first_qubit_probs[1]
# For binary classification, the likelihood for our full data set $(x_{1:N}, y_{1:N})$ is
# $$ p(y_{1:N} \mid b, w, x_{1:N}) = \prod_{i=1}^N p(y_i \mid b, w, x_i) = \prod_{i=1}^N (1 - q_{(b,w)}(x_i))^{\mathbb{I}[y_i = 0]}q_{(b,w)}(x_i)^{\mathbb{I}[y_i = 1]}, $$
# where $q_{(b, w)}(x)$ is the probability the quantum circuit classifies input $x$ as donut given variational parameter vectors $(b, w)$. This gives log-likelihood
# $$ \log p(y_{1:N} \mid b, w, x_{1:N}) = \sum_{i=1}^N \mathbb{I}[y_i = 0] \log(1 - q_{(b,w)}(x_i)) + \mathbb{I}[y_i = 1] \log q_{(b,w)}(x_i), $$
# which we would like to maximise.
#
# Unfortunately, the log-likelihood **cannot** be approximated unbiasedly using shots, that is we can approximate $q_{(b,w)}(x_i)$ unbiasedly but not $\log(q_{(b,w)}(x_i))$.
# Note that in qujax simulations we can use the statetensor to calculate this exactly, but it is still good to keep in mind loss functions that can also be used with shots from a quantum device.
# Instead we can minimise an expected distance between shots and data
# <br>
# $$ C(b, w, x, y) = \mathbb{E}_{p(y' \mid q_{(b, w)}(x))}[\ell(y', y)] = (1 - q_{(b, w)}(x)) \ell(0, y) + q_{(b, w)}(x)\ell(1, y), $$
# <br>
# where $y'$ is a shot, $y$ is a data label and $\ell$ is some distance between bitstrings - here we simply set $\ell(0, 0) = \ell(1, 1) = 0$ and $\ell(0, 1) = \ell(1, 0) = 1$ (which coincides with the Hamming distance for this binary example). The full batch cost function is $C(b, w) = \frac1N \sum_{i=1}^N C(b, w, x_i, y_i)$.
#
# Note that to calculate the cost function we need to evaluate the statetensor for every input point $x_i$. If the dataset becomes too large, we can easily minibatch.
def param_to_cost(param):
donut_probs = vmap(param_and_x_to_probability, in_axes=(None, 0))(param, x)
costs = jnp.where(y, 1 - donut_probs, donut_probs)
return costs.mean()
# # Ready to descend some gradients?
# We'll just use vanilla gradient descent here
param_to_cost_and_grad = jit(value_and_grad(param_to_cost))
n_iter = 1000
stepsize = 1e-1
param = random.uniform(random.PRNGKey(1), shape=(n_params,), minval=0, maxval=2)
costs = jnp.zeros(n_iter)
for i in range(n_iter):
cost, grad = param_to_cost_and_grad(param)
costs = costs.at[i].set(cost)
param = param - stepsize * grad
print(i, "Cost: ", cost, end="\r")
plt.plot(costs)
plt.xlabel("Iteration")
plt.ylabel("Cost")
# # Visualise trained classifier
linsp = jnp.linspace(-1, 1, 100)
Z = vmap(
lambda a: vmap(lambda b: param_and_x_to_probability(param, jnp.array([a, b])))(
linsp
)
)(linsp)
plt.contourf(linsp, linsp, Z, cmap="Purples", alpha=0.8)
circle_linsp = jnp.linspace(0, 2 * jnp.pi, 100)
plt.plot(inner_rad * jnp.cos(circle_linsp), inner_rad * jnp.sin(circle_linsp), c="red")
plt.plot(outer_rad * jnp.cos(circle_linsp), outer_rad * jnp.sin(circle_linsp), c="red")
# Looks good, it has clearly grasped the donut shape. Sincerest apologies if you are now hungry! 🍩
| 5,758 | 37.393333 | 328 | py |
pytket | pytket-main/examples/python/pytket-qujax_qaoa.py | # # Symbolic circuits with `qujax` and `pytket-qujax`
# In this notebook we will show how to manipulate symbolic circuits with the `pytket-qujax` extension. In particular, we will consider a QAOA and an Ising Hamiltonian.
from pytket import Circuit
from pytket.circuit.display import render_circuit_jupyter
from jax import numpy as jnp, random, value_and_grad, jit
from sympy import Symbol
import matplotlib.pyplot as plt
import qujax
from pytket.extensions.qujax import tk_to_qujax
# # QAOA
# The Quantum Approximate Optimization Algorithm (QAOA), first introduced by [Farhi et al.](https://arxiv.org/pdf/1411.4028.pdf), is a quantum variational algorithm used to solve optimization problems. It consists of a unitary $U(\beta, \gamma)$ formed by alternate repetitions of $U(\beta)=e^{-i\beta H_B}$ and $U(\gamma)=e^{-i\gamma H_P}$, where $H_B$ is the mixing Hamiltonian and $H_P$ the problem Hamiltonian. The goal is to find the optimal parameters that minimize $H_P$.
# Given a depth $d$, the expression of the final unitary is $U(\beta, \gamma) = U(\beta_d)U(\gamma_d)\cdots U(\beta_1)U(\gamma_1)$. Notice that for each repetition the parameters are different.
# ## Problem Hamiltonian
# QAOA uses a problem dependent ansatz. Therefore, we first need to know the problem that we want to solve. In this case we will consider an Ising Hamiltonian with only $Z$ interactions. Given a set of pairs (or qubit indices) $E$, the problem Hamiltonian will be:
# $$H_P = \sum_{(i, j) \in E}\alpha_{ij}Z_iZ_j,$$
# where $\alpha_{ij}$ are the coefficients.
# Let's build our problem Hamiltonian with random coefficients and a set of pairs for a given number of qubits:
n_qubits = 4
hamiltonian_qubit_inds = [(0, 1), (1, 2), (0, 2), (1, 3)]
hamiltonian_gates = [["Z", "Z"]] * (len(hamiltonian_qubit_inds))
# Notice that in order to use the random package from jax we first need to define a seeded key
seed = 13
key = random.PRNGKey(seed)
coefficients = random.uniform(key, shape=(len(hamiltonian_qubit_inds),))
print("Gates:\t", hamiltonian_gates)
print("Qubits:\t", hamiltonian_qubit_inds)
print("Coefficients:\t", coefficients)
# ## Variational Circuit
# Before constructing the circuit, we still need to select the mixing Hamiltonian. In our case, we will be using $X$ gates in each qubit, so $H_B = \sum_{i=1}^{n}X_i$, where $n$ is the number of qubits. Notice that the unitary $U(\beta)$, given this mixing Hamiltonian, is an $X$ rotation in each qubit with angle $\beta$.
# As for the unitary corresponding to the problem Hamiltonian, $U(\gamma)$, it has the following form:
# $$U(\gamma)=\prod_{(i, j) \in E}e^{-i\gamma\alpha_{ij}Z_iZ_j}$$
# The operation $e^{-i\gamma\alpha_{ij}Z_iZ_j}$ can be performed using two CNOT gates with qubit $i$ as control and qubit $j$ as target and a $Z$ rotation in qubit $j$ in between them, with angle $\gamma\alpha_{ij}$.
# Finally, the initial state used, in general, with the QAOA is an equal superposition of all the basis states. This can be achieved adding a first layer of Hadamard gates in each qubit at the beginning of the circuit.
# With all the building blocks, let's construct the symbolic circuit using tket. Notice that in order to define the parameters, we use the ```Symbol``` object from the `sympy` package. More info can be found in this [documentation](https://cqcl.github.io/pytket/manual/manual_circuit.html#symbolic-circuits). In order to later convert the circuit to qujax, we need to return the list of symbolic parameters as well.
def qaoa_circuit(n_qubits, depth):
circuit = Circuit(n_qubits)
p_keys = []
# Initial State
for i in range(n_qubits):
circuit.H(i)
for d in range(depth):
# Hamiltonian unitary
gamma_d = Symbol(f"γ_{d}")
for index in range(len(hamiltonian_qubit_inds)):
pair = hamiltonian_qubit_inds[index]
coef = coefficients[index]
circuit.CX(pair[0], pair[1])
circuit.Rz(gamma_d * coef, pair[1])
circuit.CX(pair[0], pair[1])
circuit.add_barrier(range(0, n_qubits))
p_keys.append(gamma_d)
# Mixing unitary
beta_d = Symbol(f"β_{d}")
for i in range(n_qubits):
circuit.Rx(beta_d, i)
p_keys.append(beta_d)
return circuit, p_keys
depth = 3
circuit, keys = qaoa_circuit(n_qubits, depth)
keys
# Let's check the circuit:
render_circuit_jupyter(circuit)
# # Now for `qujax`
# The `pytket.extensions.qujax.tk_to_qujax` function will generate a parameters -> statetensor function for us. However, in order to convert a symbolic circuit we first need to define the `symbol_map`. This object maps each symbol key to their corresponding index. In our case, since the object `keys` contains the symbols in the correct order, we can simply construct the dictionary as follows:
symbol_map = {keys[i]: i for i in range(len(keys))}
symbol_map
# Then, we invoke the `tk_to_qujax` with both the circuit and the symbolic map.
param_to_st = tk_to_qujax(circuit, symbol_map=symbol_map)
# And we also construct the expectation map using the problem Hamiltonian via qujax:
st_to_expectation = qujax.get_statetensor_to_expectation_func(
hamiltonian_gates, hamiltonian_qubit_inds, coefficients
)
param_to_expectation = lambda param: st_to_expectation(param_to_st(param))
# # Training process
# We construct a function that, given a parameter vector, returns the value of the cost function and the gradient.
# We also `jit` to avoid recompilation, this means that the expensive `cost_and_grad` function is compiled once into a very fast XLA (C++) function which is then executed at each iteration. Alternatively, we could get the same speedup by replacing our `for` loop with `jax.lax.scan`. You can read more about JIT compilation in the [JAX documentation](https://jax.readthedocs.io/en/latest/jax-101/02-jitting.html).
cost_and_grad = jit(value_and_grad(param_to_expectation))
# For the training process we'll use vanilla gradient descent with a constant stepsize:
seed = 123
key = random.PRNGKey(seed)
init_param = random.uniform(key, shape=(len(symbol_map),))
n_steps = 150
stepsize = 0.01
param = init_param
cost_vals = jnp.zeros(n_steps)
cost_vals = cost_vals.at[0].set(param_to_expectation(init_param))
for step in range(1, n_steps):
cost_val, cost_grad = cost_and_grad(param)
cost_vals = cost_vals.at[step].set(cost_val)
param = param - stepsize * cost_grad
print("Iteration:", step, "\tCost:", cost_val, end="\r")
# Let's visualise the gradient descent
plt.plot(cost_vals)
plt.xlabel("Iteration")
plt.ylabel("Cost")
| 6,609 | 47.962963 | 478 | py |
pytket | pytket-main/examples/python/pytket-qujax_heisenberg_vqe.py | from pytket import Circuit
from pytket.circuit.display import render_circuit_jupyter
from jax import numpy as jnp, random, vmap, grad, value_and_grad, jit
import matplotlib.pyplot as plt
import qujax
from pytket.extensions.qujax import tk_to_qujax
# # Let's start with a tket circuit
# We place barriers to stop tket automatically rearranging gates and we also store the number of circuit parameters as we'll need this later.
def get_circuit(n_qubits, depth):
n_params = 2 * n_qubits * (depth + 1)
param = jnp.zeros((n_params,))
circuit = Circuit(n_qubits)
k = 0
for i in range(n_qubits):
circuit.H(i)
for i in range(n_qubits):
circuit.Rx(param[k], i)
k += 1
for i in range(n_qubits):
circuit.Ry(param[k], i)
k += 1
for _ in range(depth):
for i in range(0, n_qubits - 1):
circuit.CZ(i, i + 1)
circuit.add_barrier(range(0, n_qubits))
for i in range(n_qubits):
circuit.Rx(param[k], i)
k += 1
for i in range(n_qubits):
circuit.Ry(param[k], i)
k += 1
return circuit, n_params
n_qubits = 4
depth = 2
circuit, n_params = get_circuit(n_qubits, depth)
render_circuit_jupyter(circuit)
# # Now let's invoke qujax
# The `pytket.extensions.qujax.tk_to_qujax` function will generate a parameters -> statetensor function for us.
param_to_st = tk_to_qujax(circuit)
# Let's try it out on some random parameters values. Be aware that's JAX's random number generator requires a `jax.random.PRNGkey` every time it's called - more info on that [here](https://jax.readthedocs.io/en/latest/jax.random.html).
# Be aware that we still have convention where parameters are specified as multiples of $\pi$ - that is in [0,2].
params = random.uniform(random.PRNGKey(0), shape=(n_params,), minval=0., maxval=2.)
statetensor = param_to_st(params)
print(statetensor)
print(statetensor.shape)
# Note that this function also has an optional second argument where an initiating `statetensor_in` can be provided. If it is not provided it will default to the all 0s state (as we use here).
# We can obtain statevector by simply calling `.flatten()`
statevector = statetensor.flatten()
statevector.shape
# And sampling probabilities by squaring the absolute value of the statevector
sample_probs = jnp.square(jnp.abs(statevector))
plt.bar(jnp.arange(statevector.size), sample_probs);
# # Cost function
# Now we have our `param_to_st` function we are free to define a cost function that acts on bitstrings (e.g. maxcut) or integers by directly wrapping a function around `param_to_st`. However, cost functions defined via quantum Hamiltonians are a bit more involved.
# Fortunately, we can encode an Hamiltonian in JAX via the `qujax.get_statetensor_to_expectation_func` function which generates a statetensor -> expected value function for us.
# It takes three arguments as input
# - `gate_seq_seq`: A list of string (or array) lists encoding the gates in each term of the Hamiltonian. I.e. `[['X','X'], ['Y','Y'], ['Z','Z']]` corresponds to $H = aX_iX_j + bY_kY_l + cZ_mZ_n$ with qubit indices $i,j,k,l,m,n$ specified in the second argument and coefficients $a,b,c$ specified in the third argument
# - `qubit_inds_seq`: A list of integer lists encoding which qubit indices to apply the aforementioned gates. I.e. `[[0, 1],[0,1],[0,1]]`. Must have the same structure as `gate_seq_seq` above.
# - `coefficients`: A list of floats encoding any coefficients in the Hamiltonian. I.e. `[2.3, 0.8, 1.2]` corresponds to $a=2.3,b=0.8,c=1.2$ above. Must have the same length as the two above arguments.
# More specifically let's consider the problem of finding the ground state of the quantum Heisenberg Hamiltonian
# $$ H = \sum_{i=1}^{n_\text{qubits}-1} X_i X_{i+1} + Y_i Y_{i+1} + Z_i Z_{i+1}. $$
# As described, we define the Hamiltonian via its gate strings, qubit indices and coefficients.
hamiltonian_gates = [['X', 'X'], ['Y', 'Y'], ['Z', 'Z']] * (n_qubits - 1)
hamiltonian_qubit_inds = [[int(i), int(i) + 1] for i in jnp.repeat(jnp.arange(n_qubits), 3)]
coefficients = [1.] * len(hamiltonian_qubit_inds)
print('Gates:\t', hamiltonian_gates)
print('Qubits:\t', hamiltonian_qubit_inds)
print('Coefficients:\t', coefficients)
# Now let's get the Hamiltonian as a pure JAX function
st_to_expectation = qujax.get_statetensor_to_expectation_func(hamiltonian_gates,
hamiltonian_qubit_inds,
coefficients)
# Let's check it works on the statetensor we've already generated.
expected_val = st_to_expectation(statetensor)
expected_val
# Now let's wrap the `param_to_st` and `st_to_expectation` together to give us an all in one `param_to_expectation` cost function.
param_to_expectation = lambda param: st_to_expectation(param_to_st(param))
param_to_expectation(params)
# Sanity check that a different, randomly generated set of parameters gives us a new expected value.
new_params = random.uniform(random.PRNGKey(1), shape=(n_params,), minval=0., maxval=2.)
param_to_expectation(new_params)
# # We can now use autodiff for fast, exact gradients within a VQE algorithm
# The `param_to_expectation` function we created is a pure JAX function and outputs a scalar. This means we can pass it to `jax.grad` (or even better `jax.value_and_grad`).
cost_and_grad = value_and_grad(param_to_expectation)
# The `cost_and_grad` function returns a tuple with the exact cost value and exact gradient evaluated at the parameters.
cost_and_grad(params)
# # Now we have all the tools we need to design our VQE!
# We'll just use vanilla gradient descent with a constant stepsize
def vqe(init_param, n_steps, stepsize):
params = jnp.zeros((n_steps, n_params))
params = params.at[0].set(init_param)
cost_vals = jnp.zeros(n_steps)
cost_vals = cost_vals.at[0].set(param_to_expectation(init_param))
for step in range(1, n_steps):
cost_val, cost_grad = cost_and_grad(params[step - 1])
cost_vals = cost_vals.at[step].set(cost_val)
new_param = params[step - 1] - stepsize * cost_grad
params = params.at[step].set(new_param)
print('Iteration:', step, '\tCost:', cost_val, end='\r')
print('\n')
return params, cost_vals
# Ok enough talking, let's run (and whilst we're at it we'll time it too)
%time vqe_params, vqe_cost_vals = vqe(params, n_steps=250, stepsize=0.01)
# Let's plot the results...
plt.plot(vqe_cost_vals)
plt.xlabel('Iteration')
plt.ylabel('Cost');
# Pretty good!
# # `jax.jit` speedup
# One last thing... We can significantly speed up the VQE above via the `jax.jit`. In our current implementation, the expensive `cost_and_grad` function is compiled to [XLA](https://www.tensorflow.org/xla) and then executed at each call. By invoking `jax.jit` we ensure that the function is compiled only once (on the first call) and then simply executed at each future call - this is much faster!
cost_and_grad = jit(cost_and_grad)
# We'll demonstrate this using the second set of initial parameters we randomly generated (to be sure of no caching).
%time new_vqe_params, new_vqe_cost_vals = vqe(new_params, n_steps=250, stepsize=0.01)
# That's some speedup!
# But let's also plot the training to be sure it converged correctly
plt.plot(new_vqe_cost_vals)
plt.xlabel('Iteration')
plt.ylabel('Cost');
| 7,440 | 42.261628 | 398 | py |
pytket | pytket-main/manual/conf.py | # -*- coding: utf-8 -*-
# Configuration file for the Sphinx documentation builder.
# See https://www.sphinx-doc.org/en/master/usage/configuration.html
copyright = "2020-2023 Quantinuum"
author = "Quantinuum"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"jupyter_sphinx",
"sphinx_copybutton",
"sphinx.ext.autosectionlabel",
]
html_theme = "sphinx_book_theme"
html_title = "pytket user manual"
html_theme = "sphinx_book_theme"
html_theme_options = {
"repository_url": "https://github.com/CQCL/tket",
"use_repository_button": True,
"use_issues_button": True,
"logo": {
"image_light": "_static/Quantinuum_logo_black.png",
"image_dark": "_static/Quantinuum_logo_white.png",
},
}
html_static_path = ["_static"]
html_css_files = ["custom.css"]
# -- Extension configuration -------------------------------------------------
pytketdoc_base = "https://cqcl.github.io/tket/pytket/api/"
intersphinx_mapping = {
"https://docs.python.org/3/": None,
pytketdoc_base: None,
}
| 1,109 | 22.617021 | 78 | py |
Resemblyzer | Resemblyzer-master/resemblyzer/voice_encoder.py | from resemblyzer.hparams import *
from resemblyzer import audio
from pathlib import Path
from typing import Union, List
from torch import nn
from time import perf_counter as timer
import numpy as np
import torch
class VoiceEncoder(nn.Module):
def __init__(self, device: Union[str, torch.device]=None, verbose=True, weights_fpath: Union[Path, str]=None):
"""
If None, defaults to cuda if it is available on your machine, otherwise the model will
run on cpu. Outputs are always returned on the cpu, as numpy arrays.
:param weights_fpath: path to "<CUSTOM_MODEL>.pt" file path.
If None, defaults to built-in "pretrained.pt" model
"""
super().__init__()
# Define the network
self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
self.linear = nn.Linear(model_hidden_size, model_embedding_size)
self.relu = nn.ReLU()
# Get the target device
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
elif isinstance(device, str):
device = torch.device(device)
self.device = device
# Load the pretrained model'speaker weights
if weights_fpath is None:
weights_fpath = Path(__file__).resolve().parent.joinpath("pretrained.pt")
else:
weights_fpath = Path(weights_fpath)
if not weights_fpath.exists():
raise Exception("Couldn't find the voice encoder pretrained model at %s." %
weights_fpath)
start = timer()
checkpoint = torch.load(weights_fpath, map_location="cpu")
self.load_state_dict(checkpoint["model_state"], strict=False)
self.to(device)
if verbose:
print("Loaded the voice encoder model on %s in %.2f seconds." %
(device.type, timer() - start))
def forward(self, mels: torch.FloatTensor):
"""
Computes the embeddings of a batch of utterance spectrograms.
:param mels: a batch of mel spectrograms of same duration as a float32 tensor of shape
(batch_size, n_frames, n_channels)
:return: the embeddings as a float 32 tensor of shape (batch_size, embedding_size).
Embeddings are positive and L2-normed, thus they lay in the range [0, 1].
"""
# Pass the input through the LSTM layers and retrieve the final hidden state of the last
# layer. Apply a cutoff to 0 for negative values and L2 normalize the embeddings.
_, (hidden, _) = self.lstm(mels)
embeds_raw = self.relu(self.linear(hidden[-1]))
return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
@staticmethod
def compute_partial_slices(n_samples: int, rate, min_coverage):
"""
Computes where to split an utterance waveform and its corresponding mel spectrogram to
obtain partial utterances of <partials_n_frames> each. Both the waveform and the
mel spectrogram slices are returned, so as to make each partial utterance waveform
correspond to its spectrogram.
The returned ranges may be indexing further than the length of the waveform. It is
recommended that you pad the waveform with zeros up to wav_slices[-1].stop.
:param n_samples: the number of samples in the waveform
:param rate: how many partial utterances should occur per second. Partial utterances must
cover the span of the entire utterance, thus the rate should not be lower than the inverse
of the duration of a partial utterance. By default, partial utterances are 1.6s long and
the minimum rate is thus 0.625.
:param min_coverage: when reaching the last partial utterance, it may or may not have
enough frames. If at least <min_pad_coverage> of <partials_n_frames> are present,
then the last partial utterance will be considered by zero-padding the audio. Otherwise,
it will be discarded. If there aren't enough frames for one partial utterance,
this parameter is ignored so that the function always returns at least one slice.
:return: the waveform slices and mel spectrogram slices as lists of array slices. Index
respectively the waveform and the mel spectrogram with these slices to obtain the partial
utterances.
"""
assert 0 < min_coverage <= 1
# Compute how many frames separate two partial utterances
samples_per_frame = int((sampling_rate * mel_window_step / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = int(np.round((sampling_rate / rate) / samples_per_frame))
assert 0 < frame_step, "The rate is too high"
assert frame_step <= partials_n_frames, "The rate is too low, it should be %f at least" % \
(sampling_rate / (samples_per_frame * partials_n_frames))
# Compute the slices
wav_slices, mel_slices = [], []
steps = max(1, n_frames - partials_n_frames + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + partials_n_frames])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
# Evaluate whether extra padding is warranted or not
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
if coverage < min_coverage and len(mel_slices) > 1:
mel_slices = mel_slices[:-1]
wav_slices = wav_slices[:-1]
return wav_slices, mel_slices
def embed_utterance(self, wav: np.ndarray, return_partials=False, rate=1.3, min_coverage=0.75):
"""
Computes an embedding for a single utterance. The utterance is divided in partial
utterances and an embedding is computed for each. The complete utterance embedding is the
L2-normed average embedding of the partial utterances.
TODO: independent batched version of this function
:param wav: a preprocessed utterance waveform as a numpy array of float32
:param return_partials: if True, the partial embeddings will also be returned along with
the wav slices corresponding to each partial utterance.
:param rate: how many partial utterances should occur per second. Partial utterances must
cover the span of the entire utterance, thus the rate should not be lower than the inverse
of the duration of a partial utterance. By default, partial utterances are 1.6s long and
the minimum rate is thus 0.625.
:param min_coverage: when reaching the last partial utterance, it may or may not have
enough frames. If at least <min_pad_coverage> of <partials_n_frames> are present,
then the last partial utterance will be considered by zero-padding the audio. Otherwise,
it will be discarded. If there aren't enough frames for one partial utterance,
this parameter is ignored so that the function always returns at least one slice.
:return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
<return_partials> is True, the partial utterances as a numpy array of float32 of shape
(n_partials, model_embedding_size) and the wav partials as a list of slices will also be
returned.
"""
# Compute where to split the utterance into partials and pad the waveform with zeros if
# the partial utterances cover a larger range.
wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
max_wave_length = wav_slices[-1].stop
if max_wave_length >= len(wav):
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
# Split the utterance into partials and forward them through the model
mel = audio.wav_to_mel_spectrogram(wav)
mels = np.array([mel[s] for s in mel_slices])
with torch.no_grad():
mels = torch.from_numpy(mels).to(self.device)
partial_embeds = self(mels).cpu().numpy()
# Compute the utterance embedding from the partial embeddings
raw_embed = np.mean(partial_embeds, axis=0)
embed = raw_embed / np.linalg.norm(raw_embed, 2)
if return_partials:
return embed, partial_embeds, wav_slices
return embed
def embed_speaker(self, wavs: List[np.ndarray], **kwargs):
"""
Compute the embedding of a collection of wavs (presumably from the same speaker) by
averaging their embedding and L2-normalizing it.
:param wavs: list of wavs a numpy arrays of float32.
:param kwargs: extra arguments to embed_utterance()
:return: the embedding as a numpy array of float32 of shape (model_embedding_size,).
"""
raw_embed = np.mean([self.embed_utterance(wav, return_partials=False, **kwargs) \
for wav in wavs], axis=0)
return raw_embed / np.linalg.norm(raw_embed, 2)
| 9,191 | 50.640449 | 114 | py |
rebias | rebias-master/evaluator.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import torch
import numpy as np
def n_correct(pred, labels):
_, predicted = torch.max(pred.data, 1)
n_correct = (predicted == labels).sum().item()
return n_correct
class EvaluatorBase(object):
def __init__(self, device='cuda'):
self.device = device
@torch.no_grad()
def evaluate_acc(self, dataloader, model):
model.eval()
total = 0
correct = 0
for x, labels, index in dataloader:
x = x.to(self.device)
labels = labels.to(self.device)
pred = model(x, logits_only=True)
batch_size = labels.size(0)
total += batch_size
correct += n_correct(pred, labels)
return correct / total
@torch.no_grad()
def evaluate_rebias(self, dataloader, rebias_model,
outer_criterion=None,
inner_criterion=None,
**kwargs):
raise NotImplementedError
class MNISTEvaluator(EvaluatorBase):
def _confusion_matrix(self, pred, bias_labels, labels, n_correct, n_total):
for bias_label in range(10):
for label in range(10):
b_indices = (bias_labels.squeeze() == bias_label).nonzero().squeeze()
t_indices = (labels.squeeze() == label).nonzero().squeeze()
indices = np.intersect1d(b_indices.detach().cpu().numpy(),
t_indices.detach().cpu().numpy())
indices = torch.cuda.LongTensor(indices)
if indices.nelement() == 0:
continue
_n = len(indices)
_output = pred.index_select(dim=0, index=indices)
_, predicted = torch.max(_output.data, 1)
_n_correct = (predicted == labels[indices]).sum().item()
n_correct[label][bias_label] += _n_correct
n_total[label][bias_label] += _n
return n_correct, n_total
def get_confusion_matrix(self, dataloader, rebias_model):
n_correct_arr = np.zeros((10, 10))
n_total = np.zeros((10, 10))
total = 0
f_correct = 0
for x, labels, bias_labels in dataloader:
x = x.to(self.device)
labels = labels.to(self.device)
bias_labels = bias_labels.to(self.device)
f_pred, g_preds, f_feat, g_feats = rebias_model(x)
n_correct_arr, n_total = self._confusion_matrix(f_pred, bias_labels, labels, n_correct_arr, n_total)
f_correct += n_correct(f_pred, labels)
total += len(labels)
print('accuracy:', f_correct / total)
CM = n_correct_arr / (n_total + 1e-12)
return CM
@torch.no_grad()
def evaluate_rebias(self, dataloader, rebias_model,
outer_criterion=None,
inner_criterion=None,
**kwargs):
rebias_model.eval()
total = 0
f_correct = 0
g_corrects = [0 for _ in rebias_model.g_nets]
if outer_criterion.__class__.__name__ in ['LearnedMixin', 'RUBi']:
"""For computing HSIC loss only.
"""
outer_criterion = None
outer_loss = [0 for _ in rebias_model.g_nets]
inner_loss = [0 for _ in rebias_model.g_nets]
for x, labels, _ in dataloader:
x = x.to(self.device)
labels = labels.to(self.device)
f_pred, g_preds, f_feat, g_feats = rebias_model(x)
batch_size = labels.size(0)
total += batch_size
f_correct += n_correct(f_pred, labels)
for idx, g_pred in enumerate(g_preds):
g_corrects[idx] += n_correct(g_pred, labels)
if outer_criterion:
for idx, g_pred in enumerate(g_preds):
outer_loss[idx] += batch_size * outer_criterion(f_pred, g_pred).item()
if inner_criterion:
for idx, g_pred in enumerate(g_preds):
inner_loss[idx] += batch_size * inner_criterion(f_pred, g_pred).item()
ret = {'f_acc': f_correct / total}
for idx, (_g_correct, _outer_loss, _inner_loss) in enumerate(zip(g_corrects, outer_loss, inner_loss)):
ret['g_{}_acc'.format(idx)] = _g_correct / total
ret['outer_{}_loss'.format(idx)] = _outer_loss / total
ret['inner_{}_loss'.format(idx)] = _inner_loss / total
return ret
class ImageNetEvaluator(EvaluatorBase):
def imagenet_unbiased_accuracy(self, outputs, labels, cluster_labels,
num_correct, num_instance,
num_cluster_repeat=3):
for j in range(num_cluster_repeat):
for i in range(outputs.size(0)):
output = outputs[i]
label = labels[i]
cluster_label = cluster_labels[j][i]
_, pred = output.topk(1, 0, largest=True, sorted=True)
correct = pred.eq(label).view(-1).float()
num_correct[j][label][cluster_label] += correct.item()
num_instance[j][label][cluster_label] += 1
return num_correct, num_instance
@torch.no_grad()
def evaluate_rebias(self, dataloader, rebias_model,
outer_criterion=None,
inner_criterion=None,
num_classes=9,
num_clusters=9,
num_cluster_repeat=3,
key=None):
rebias_model.eval()
total = 0
f_correct = 0
num_correct = [np.zeros([num_classes, num_clusters]) for _ in range(num_cluster_repeat)]
num_instance = [np.zeros([num_classes, num_clusters]) for _ in range(num_cluster_repeat)]
g_corrects = [0 for _ in rebias_model.g_nets]
if outer_criterion.__class__.__name__ in ['LearnedMixin', 'RUBi']:
"""For computing HSIC loss only.
"""
outer_criterion = None
outer_loss = [0 for _ in rebias_model.g_nets]
inner_loss = [0 for _ in rebias_model.g_nets]
for x, labels, bias_labels in dataloader:
x = x.to(self.device)
labels = labels.to(self.device)
for bias_label in bias_labels:
bias_label.to(self.device)
f_pred, g_preds, f_feat, g_feats = rebias_model(x)
batch_size = labels.size(0)
total += batch_size
if key == 'unbiased':
num_correct, num_instance = self.imagenet_unbiased_accuracy(f_pred.data, labels, bias_labels,
num_correct, num_instance, num_cluster_repeat)
else:
f_correct += n_correct(f_pred, labels)
for idx, g_pred in enumerate(g_preds):
g_corrects[idx] += n_correct(g_pred, labels)
if outer_criterion:
for idx, g_pred in enumerate(g_preds):
outer_loss[idx] += batch_size * outer_criterion(f_pred, g_pred).item()
if inner_criterion:
for idx, g_pred in enumerate(g_preds):
inner_loss[idx] += batch_size * inner_criterion(f_pred, g_pred).item()
if key == 'unbiased':
for k in range(num_cluster_repeat):
x, y = [], []
_num_correct, _num_instance = num_correct[k].flatten(), num_instance[k].flatten()
for i in range(_num_correct.shape[0]):
__num_correct, __num_instance = _num_correct[i], _num_instance[i]
if __num_instance >= 10:
x.append(__num_instance)
y.append(__num_correct / __num_instance)
f_correct += sum(y) / len(x)
ret = {'f_acc': f_correct / num_cluster_repeat}
else:
ret = {'f_acc': f_correct / total}
for idx, (_g_correct, _outer_loss, _inner_loss) in enumerate(zip(g_corrects, outer_loss, inner_loss)):
ret['g_{}_acc'.format(idx)] = _g_correct / total
ret['outer_{}_loss'.format(idx)] = _outer_loss / total
ret['inner_{}_loss'.format(idx)] = _inner_loss / total
return ret
class ActionEvaluator(EvaluatorBase):
@torch.no_grad()
def evaluate_rebias(self, dataloader, rebias_model,
outer_criterion=None,
inner_criterion=None,
num_classes=50,
**kwargs):
rebias_model.eval()
num_clips = dataloader.dataset._num_clips
num_videos = len(dataloader.dataset) // num_clips
video_f_preds = torch.zeros((num_videos, num_classes))
video_g_preds = torch.zeros((len(rebias_model.g_nets), num_videos, num_classes))
video_labels = torch.zeros((num_videos)).long()
clip_count = torch.zeros((num_videos)).long()
total = 0
if outer_criterion.__class__.__name__ in ['LearnedMixin', 'RUBi']:
"""For computing HSIC loss only.
"""
outer_criterion = None
outer_loss = [0 for _ in rebias_model.g_nets]
inner_loss = [0 for _ in rebias_model.g_nets]
for x, labels, index in dataloader:
x = x.to(self.device)
labels = labels.to(self.device)
f_pred, g_preds, f_feat, g_feats = rebias_model(x)
for ind in range(f_pred.shape[0]):
vid_id = int(index[ind]) // num_clips
video_labels[vid_id] = labels[ind].detach().cpu()
video_f_preds[vid_id] += f_pred[ind].detach().cpu()
for g_idx, g_pred in enumerate(g_preds):
video_g_preds[g_idx, vid_id] += g_pred[ind].detach().cpu()
clip_count[vid_id] += 1
batch_size = labels.size(0)
total += batch_size
if outer_criterion:
for idx, g_pred in enumerate(g_preds):
outer_loss[idx] += batch_size * outer_criterion(f_pred, g_pred).item()
if inner_criterion:
for idx, g_pred in enumerate(g_preds):
inner_loss[idx] += batch_size * inner_criterion(f_pred, g_pred).item()
if not all(clip_count == num_clips):
print(
"clip count {} ~= num clips {}".format(
clip_count, num_clips
)
)
f_correct = n_correct(video_f_preds, video_labels)
g_corrects = [n_correct(video_g_pred, video_labels)
for video_g_pred in video_g_preds]
ret = {'f_acc': f_correct / num_videos}
for idx, (_g_correct, _outer_loss, _inner_loss) in enumerate(zip(g_corrects, outer_loss, inner_loss)):
ret['g_{}_acc'.format(idx)] = _g_correct / num_videos
ret['outer_{}_loss'.format(idx)] = _outer_loss / total
ret['inner_{}_loss'.format(idx)] = _inner_loss / total
return ret
| 11,149 | 37.184932 | 122 | py |
rebias | rebias-master/make_clusters.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
from torchvision.utils import save_image
import numpy as np
from PIL import Image
from sklearn.cluster import MiniBatchKMeans
from datasets.imagenet import get_imagenet_dataloader
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='ImageNet')
parser.add_argument('--num_classes', type=int, default=9, help='number of classes')
parser.add_argument('--load_size', type=int, default=256, help='image load size')
parser.add_argument('--image_size', type=int, default=224, help='image crop size')
parser.add_argument('--k', type=int, default=9, help='number of clusters')
parser.add_argument('--n_sample', type=int, default='30', help='number of samples per cluster')
parser.add_argument('--batch_size', type=int, default=64, help='mini-batch size')
parser.add_argument('--num_workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--cluster_dir', type=str, default='clusters')
def main(n_try=None):
args = parser.parse_args()
# create directories if not exist
if not os.path.exists(args.cluster_dir):
os.makedirs(args.cluster_dir)
data_loader = get_imagenet_dataloader(batch_size=args.batch_size, train=False)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
extractor = nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features)[:-16]) # conv1_2
extractor.cuda()
# ======================================================================= #
# 1. Extract features #
# ======================================================================= #
print('Start extracting features...')
extractor.eval()
N = len(data_loader.dataset.dataset)
start = time.time()
for i, (images, targets, _) in enumerate(data_loader):
images = images.cuda()
outputs = gram_matrix(extractor(images))
outputs = outputs.view(images.size(0), -1).data.cpu().numpy()
if i == 0:
features = np.zeros((N, outputs.shape[1])).astype('float32')
if i < N - 1:
features[i * args.batch_size: (i+1) * args.batch_size] = outputs.astype('float32')
else:
features[i * args.batch_size:] = outputs.astype('float32')
# L2 normalization
features = features / np.linalg.norm(features, axis=1)[:, np.newaxis]
print('Finished extracting features...(time: {0:.0f} s)'.format(time.time() - start))
# ======================================================================= #
# 2. Clustering #
# ======================================================================= #
start = time.time()
labels, image_lists = Kmeans(args.k, features)
print('Finished clustering...(time: {0:.0f} s)'.format(time.time() - start))
# save clustering results
torch.save(torch.LongTensor(labels), os.path.join(args.cluster_dir,
'cluster_label_{}.pth'.format(n_try)))
print('Saved cluster label...')
len_list = [len(image_list) for image_list in image_lists]
min_len = min(len_list)
if min_len < args.n_sample:
args.n_sample = min_len
print('number of images in each cluster:', len_list)
# sample clustering results
start = time.time()
samples = [[]] * args.k
for k in range(args.k):
idx_list = image_lists[k] # list of image indexes in each cluster
for j in range(args.n_sample): # sample j indexes
idx = idx_list[j]
filename = data_loader.dataset.dataset[idx][0]
image = transform(Image.open(filename).convert('RGB')).unsqueeze(0)
samples[k] = samples[k] + [image]
for k in range(args.k):
samples[k] = torch.cat(samples[k], dim=3)
samples = torch.cat(samples, dim=0)
filename = os.path.join(args.cluster_dir, 'cluster_sample_{}.jpg'.format(n_try))
save_image(denorm(samples.data.cpu()), filename, nrow=1, padding=0)
print('Finished sampling...(time: {0:.0f} s)'.format(time.time() - start))
def gram_matrix(input, normalize=True):
N, C, H, W = input.size()
feat = input.view(N, C, -1)
G = torch.bmm(feat, feat.transpose(1, 2)) # N X C X C
if normalize:
G /= (C * H * W)
return G
def denorm(x):
"""Convert the range to [0, 1]."""
mean = torch.tensor([0.485, 0.456, 0.406])
std = torch.tensor([0.229, 0.224, 0.225])
return x.mul_(std[:, None, None]).add_(mean[:, None, None]).clamp_(0, 1)
def Kmeans(k, features):
n_data, dim = features.shape
features = torch.FloatTensor(features)
clus = MiniBatchKMeans(n_clusters=k,
batch_size=1024).fit(features)
labels = clus.labels_
image_lists = [[] for _ in range(k)]
feat_lists = [[] for _ in range(k)]
for i in range(n_data):
image_lists[labels[i]].append(i)
feat_lists[labels[i]].append(features[i].unsqueeze(0))
return labels, image_lists
if __name__ == '__main__':
for i in range(5):
main(i+1)
| 5,502 | 34.503226 | 105 | py |
rebias | rebias-master/trainer.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Unified implementation of the de-biasing minimax optimisation by various methods including,
- ReBias (ours, outer_criterion='RbfHSIC', inner criterion='MinusRbfHSIC')
- Vanilla and Biased baselines (f_lambda_outer=0, g_lambda_inner=0)
- Learned Mixin (outer_criterion='LearnedMixin', g_lambda_inner=0, n_g_update=0)
- RUBi (outer_criterion='RUBi', g_lambda_inner=0)
Also, this implementation allows various configurations such as:
- adaptive radius for RBF kernels (see `_set_adaptive_sigma`)
- warm-up before jointly optimisation (n_g_pretrain_epochs, n_f_pretrain_epochs)
- feature position to compute losses (feature_pos in f_config and g_config)
- various biased network configurations (n_g_nets, n_g_update, update_g_cls)
To see the configurations for each experiment, please refer to the following files:
- README.md
- main_biased_mnist.py
- main_imagenet.py
- main_action.py
"""
import itertools
import os
import munch
import torch
import torch.nn as nn
from criterions import get_criterion
from criterions.sigma_utils import median_distance, feature_dimension
from logger import PythonLogger
from optims import get_optim, get_scheduler
def flatten(list_of_lists):
return itertools.chain.from_iterable(list_of_lists)
def cur_step(cur_epoch, idx, N, fmt=None):
_cur_step = cur_epoch + idx / N
if fmt:
return fmt.format(_cur_step)
else:
return _cur_step
class Trainer(object):
"""Base wrapper for the de-biasing minimax optimisation to solve.
..math:: min_g max_f L_f + lambda_1 ( L_debias (f, g) - L_g)
In practice, we optimise the following two minimisation problems sequentially:
.. math::
min L_f + f_lambda_outer * outer_criterion (f, g)
min L_g + g_lambda_inner * inner_criterion (f, g)
Thus, setting f_lambda_outer or g_lambda_inner to zero means only updating classification loss for the optimisation.
In practice, ours set f_lambda_outer = g_lambda_inner = 1, and comparison methods set f_lambda_outer = 1 and g_lambda_inner = 0.
Furthermore, we directly implement criterion functions for comparison methods into `outer_criterion` which also optimise classification too.
In this case, we solely optimise the outer_criterion without the cross entropy loss.
Parameters
----------
outer_criterion, inner_criterion: str
Configurations for setting different criterions including
- ReBias (ours): RbfHSIC, MinusRbfHSIC
- Vanilla and Biased baselines: -, -
- Learned Mixin: LearnedMixin, -
- RUBi: RUBi, -
where `-` denotes to no outer/inner optimisation.
outer_criterion_config, inner_criterion_config: dict
Configuration dict to define criterions, `criterion_fn(**config)`.
outer_criterion_detail, inner_criterion_detail: dict
Configurations dict for more details of each criterion.
In practice, it only contains sigma configurations such as sigma_x_type, sigma_x_scale.
To set ``adaptive radius'' for RBF kernels, use sigma_x_type='median' (see `_set_adaptive_sigma`)
f_config, g_config: dict
Configuration dict for declaring network objects.
f_lambda_outer: float
Control parameter for HSIC or other debiasing objective functions on the target network.
In the experiments, it is always set to one, except ``baseline'' (Vanilla, Biased) cases.
g_lambda_inner: float
Control parameter for HSIC or other debiasing objective functions on the biased network.
ReBias always use one, otherwise it is set to zero.
n_g_update: int
The number of g updates for single f update. It could be used if g update is much slower than expected.
In the experiments, it is always one.
update_g_cls: boolean
Flag for updating g cross entropy loss. If False, only debiasing objective is optimised for g.
n_g_nets: int
The number of biased networks for the optimisation. The debiasing loss is the summation of the loss computed by each g.
n_g_pretrain_epochs, n_f_pretrain_epochs: int
The warm-up epochs for more stable training.
It is not used for ReBias, but other comparison methods when there is no biased network update (LearnedMixin).
train_loader: pytorch dataloader
Used for adaptive kernel updates.
sigma_update_sampling_rate: float
Sampling rate for computing the adaptive kernel radius.
In the experiments, we use 25% of training data points to compute adaptive kernel radius.
"""
def __init__(self,
# criterion settings
outer_criterion='RbfHSIC',
inner_criterion='MinusRbfHSIC',
outer_criterion_config={'sigma': 1.0},
outer_criterion_detail={},
inner_criterion_config={},
inner_criterion_detail={},
# network settings
f_config={},
g_config={},
# optimiser settings
f_lambda_outer=1,
g_lambda_inner=1,
n_g_update=1,
update_g_cls=True,
n_g_nets=1,
optimizer='Adam',
f_optim_config=None,
g_optim_config=None,
scheduler='StepLR',
f_scheduler_config={'step_size': 20},
g_scheduler_config={'step_size': 20},
n_g_pretrain_epochs=0,
n_f_pretrain_epochs=0,
n_epochs=80,
log_step=100,
# adaptive sigma settings
train_loader=None,
sigma_update_sampling_rate=0.25,
# others
device='cuda',
logger=None):
self.device = device
self.sigma_update_sampling_rate = sigma_update_sampling_rate
if logger is None:
logger = PythonLogger()
self.logger = logger
self.log_step = log_step
if f_config['num_classes'] != g_config['num_classes']:
raise ValueError('num_classes for f and g should be same.')
self.num_classes = f_config['num_classes']
options = {
'outer_criterion': outer_criterion,
'inner_criterion': inner_criterion,
'outer_criterion_config': outer_criterion_config,
'outer_criterion_detail': outer_criterion_detail,
'inner_criterion_config': inner_criterion_config,
'inner_criterion_detail': inner_criterion_detail,
'f_config': f_config,
'g_config': g_config,
'f_lambda_outer': f_lambda_outer,
'g_lambda_inner': g_lambda_inner,
'n_g_update': n_g_update,
'update_g_cls': update_g_cls,
'n_g_nets': n_g_nets,
'optimizer': optimizer,
'f_optim_config': f_optim_config,
'g_optim_config': g_optim_config,
'scheduler': scheduler,
'f_scheduler_config': f_scheduler_config,
'g_scheduler_config': g_scheduler_config,
'n_g_pretrain_epochs': n_g_pretrain_epochs,
'n_f_pretrain_epochs': n_f_pretrain_epochs,
'n_epochs': n_epochs,
}
self.options = munch.munchify(options)
self.evaluator = None
self._set_models()
self._to_device()
self._to_parallel()
self._set_criterion(train_loader)
self._set_optimizer()
self.logger.log('Outer criterion: {}'.format(self.outer_criterion.__class__.__name__))
self.logger.log(self.options)
def _set_models(self):
raise NotImplementedError
def _to_device(self):
self.model.f_net = self.model.f_net.to(self.device)
for i, g_net in enumerate(self.model.g_nets):
self.model.g_nets[i] = g_net.to(self.device)
def _to_parallel(self):
self.model.f_net = torch.nn.DataParallel(self.model.f_net)
for i, g_net in enumerate(self.model.g_nets):
self.model.g_nets[i] = torch.nn.DataParallel(g_net)
def _set_adaptive_sigma(self, train_loader):
if self.options.outer_criterion_detail.get('sigma_x_type') == 'median':
self.logger.log('computing sigma from data median')
sigma_x, sigma_y = median_distance(self.model, train_loader, self.sigma_update_sampling_rate, device=self.device)
elif self.options.outer_criterion_detail.get('sigma_x_type') == 'dimension':
sigma_x, sigma_y = feature_dimension(self.model, train_loader, device=self.device)
else:
return
sigma_x_scale = self.options.outer_criterion_detail.get('sigma_x_scale', 1)
sigma_y_scale = self.options.outer_criterion_detail.get('sigma_y_scale', 1)
self.options.outer_criterion_config['sigma_x'] = sigma_x * sigma_x_scale
self.options.outer_criterion_config['sigma_y'] = sigma_y * sigma_y_scale
self.options.inner_criterion_config['sigma_x'] = sigma_x * sigma_x_scale
self.options.inner_criterion_config['sigma_y'] = sigma_y * sigma_y_scale
self.logger.log('current sigma: ({}) * {} ({}) * {}'.format(sigma_x,
sigma_x_scale,
sigma_y,
sigma_y_scale,
))
def _set_criterion(self, train_loader):
self._set_adaptive_sigma(train_loader)
self.outer_criterion = get_criterion(self.options.outer_criterion)(**self.options.outer_criterion_config)
self.inner_criterion = get_criterion(self.options.inner_criterion)(**self.options.inner_criterion_config)
self.classification_criterion = nn.CrossEntropyLoss()
def _set_optimizer(self):
f_net_parameters = self.model.f_net.parameters()
if 'fc' in self.outer_criterion.__dict__:
"""[NOTE] for comparison methods (LearnedMixin, RUBi)
"""
f_net_parameters += list(self.outer_criterion.fc.parameters())
self.f_optimizer = get_optim(f_net_parameters,
self.options.optimizer,
self.options.f_optim_config)
self.g_optimizer = get_optim(flatten([g_net.parameters()
for g_net in self.model.g_nets]),
self.options.optimizer,
self.options.g_optim_config)
self.f_lr_scheduler = get_scheduler(self.f_optimizer,
self.options.scheduler,
self.options.f_scheduler_config)
self.g_lr_scheduler = get_scheduler(self.g_optimizer,
self.options.scheduler,
self.options.g_scheduler_config)
def pretrain(self, dataloader, val_loaders=None):
for cur_epoch in range(self.options.n_g_pretrain_epochs):
if self.options.n_epochs == 0:
self.g_lr_scheduler.step()
for idx, (x, labels, _) in enumerate(dataloader):
x = x.to(self.device)
labels = labels.to(self.device)
loss_dict = {'step': cur_step(cur_epoch, idx, len(dataloader))}
self._update_g(x, labels, update_inner_loop=False,
loss_dict=loss_dict, prefix='pretrain__')
if (idx + 1) % self.log_step == 0:
self.logger.report(loss_dict,
prefix='[Pretrain G] Report @step: ')
self.evaluate_acc(cur_epoch + 1,
f_acc=False,
val_loaders=val_loaders)
for cur_epoch in range(self.options.n_f_pretrain_epochs):
if self.options.n_epochs == 0:
self.f_lr_scheduler.step()
for idx, (x, labels, _) in enumerate(dataloader):
x = x.to(self.device)
labels = labels.to(self.device)
loss_dict = {'step': cur_step(cur_epoch, idx, len(dataloader))}
self._update_f(x, labels, update_outer_loop=False,
loss_dict=loss_dict, prefix='pretrain__')
if (idx + 1) % self.log_step == 0:
self.logger.report(loss_dict,
prefix='[Pretrain F] Report @step: ')
self.evaluate_acc(cur_epoch + 1,
f_acc=True,
val_loaders=val_loaders)
def _update_g(self, x, labels, update_inner_loop=True, loss_dict=None, prefix=''):
if loss_dict is None:
loss_dict = {}
self.model.train()
g_loss = 0
for g_idx, g_net in enumerate(self.model.g_nets):
preds, g_feats = g_net(x)
_g_loss = 0
if self.options.update_g_cls:
_g_loss_cls = self.classification_criterion(preds, labels)
_g_loss += _g_loss_cls
loss_dict['{}g_{}_cls'.format(prefix, g_idx)] = _g_loss_cls.item()
if update_inner_loop and self.options.g_lambda_inner:
_, f_feats = self.model.f_net(x)
_g_loss_inner = self.inner_criterion(g_feats, f_feats, labels=labels)
_g_loss += self.options.g_lambda_inner * _g_loss_inner
loss_dict['{}g_{}_inner'.format(prefix, g_idx)] = _g_loss_inner.item()
g_loss += _g_loss
self.g_optimizer.zero_grad()
g_loss.backward()
self.g_optimizer.step()
loss_dict['{}g_loss'.format(prefix)] = g_loss.item()
def _update_f(self, x, labels, update_outer_loop=True, loss_dict=None, prefix=''):
if loss_dict is None:
loss_dict = {}
self.model.train()
f_loss = 0
preds, f_feats = self.model.f_net(x)
if self.options.outer_criterion not in ('LearnedMixin', 'RUBi'):
"""[NOTE] Comparison methods (LearnedMixin, RUBi) do not compute f_loss_cls
"""
f_loss_cls = self.classification_criterion(preds, labels)
f_loss += f_loss_cls
loss_dict['{}f_loss_cls'.format(prefix)] = f_loss_cls.item()
if update_outer_loop and self.options.f_lambda_outer:
f_loss_indep = 0
for g_idx, g_net in enumerate(self.model.g_nets):
_g_preds, _g_feats = g_net(x)
_f_loss_indep = self.outer_criterion(f_feats, _g_feats, labels=labels, f_pred=preds, g_pred=_g_preds)
f_loss_indep += _f_loss_indep
loss_dict['{}f_loss_indep_g_{}'.format(prefix, g_idx)] = _f_loss_indep.item()
f_loss += self.options.f_lambda_outer * f_loss_indep
loss_dict['{}f_loss_indep'.format(prefix)] = f_loss_indep.item()
self.f_optimizer.zero_grad()
f_loss.backward()
self.f_optimizer.step()
loss_dict['{}f_loss'.format(prefix)] = f_loss.item()
def _train_epoch(self, dataloader, cur_epoch):
for idx, (x, labels, _) in enumerate(dataloader):
x = x.to(self.device)
labels = labels.to(self.device)
loss_dict = {'step': cur_step(cur_epoch, idx, len(dataloader))}
for _ in range(self.options.n_g_update):
self._update_g(x, labels, loss_dict=loss_dict, prefix='train__')
self._update_f(x, labels, loss_dict=loss_dict, prefix='train__')
if (idx + 1) % self.log_step == 0:
self.logger.report(loss_dict,
prefix='[Train] Report @step: ')
def train(self, tr_loader,
val_loaders=None,
val_epoch_step=20,
update_sigma_per_epoch=False,
save_dir='./checkpoints',
experiment=None):
if val_loaders:
if not isinstance(val_loaders, dict):
raise TypeError('val_loaders should be dict, not {}'
.format(type(val_loaders)))
if 'unbiased' not in val_loaders:
raise ValueError('val_loaders should contain key "unbiased", cur keys({})'
.format(list(val_loaders.keys())))
os.makedirs(save_dir, exist_ok=True)
self.logger.log('start pretraining')
self.pretrain(tr_loader, val_loaders=val_loaders)
best_acc = 0
self.logger.log('start training')
for cur_epoch in range(self.options.n_epochs):
self._train_epoch(tr_loader, cur_epoch)
self.f_lr_scheduler.step()
self.g_lr_scheduler.step()
self.logger.log('F learning rate: {}, G learning rate: {}'.format(
self.f_lr_scheduler.get_lr(),
self.g_lr_scheduler.get_lr()
))
metadata = {
'cur_epoch': cur_epoch + 1,
'best_acc': best_acc,
}
if val_loaders and (cur_epoch + 1) % val_epoch_step == 0:
scores = self.evaluate(cur_epoch + 1, val_loaders)
metadata['scores'] = scores
if scores['unbiased']['f_acc'] > best_acc:
metadata['best_acc'] = scores['unbiased']['f_acc']
self.save_models(os.path.join(save_dir, 'best.pth'),
metadata=metadata)
self.save_models(os.path.join(save_dir, 'last.pth'),
metadata=metadata)
if update_sigma_per_epoch:
self.logger.log('sigma update')
self._set_criterion(tr_loader)
sigma_x = self.options.inner_criterion_config['sigma_x']
sigma_y = self.options.inner_criterion_config['sigma_y']
self.logger.report({'step': cur_epoch + 1,
'sigma__f': sigma_x,
'sigma__g': sigma_y}, prefix='[Validation] Report @step: ')
def evaluate(self, step=0, val_loaders=None):
if not val_loaders:
return {}
scores = {}
for key, val_loader in val_loaders.items():
scores[key] = self.evaluator.evaluate_rebias(val_loader, self.model,
outer_criterion=self.outer_criterion,
inner_criterion=self.inner_criterion,
num_classes=self.num_classes,
key=key)
for key, score in scores.items():
msg_dict = {'val__{}_{}'.format(key, k): v for k, v in score.items()}
msg_dict['step'] = step
self.logger.report(msg_dict, prefix='[Validation] Report @step: ')
print(scores)
return scores
def evaluate_acc(self, step=0, f_acc=True, val_loaders=None):
if not val_loaders:
return {}
scores = {}
for key, val_loader in val_loaders.items():
if f_acc:
scores[key] = self.evaluator.evaluate_acc(val_loader, self.model.f_net)
else:
scores[key] = {}
for idx, g_net in enumerate(self.model.g_nets):
scores[key][idx] = self.evaluator.evaluate_acc(val_loader, g_net)
for key, score in scores.items():
if f_acc:
msg_dict = {'pretrain__{}_f_acc'.format(key): score}
else:
msg_dict = {'pretrain__{}_g_{}_acc'.format(key, idx): _score for idx, _score in score.items()}
msg_dict['step'] = step
self.logger.report(msg_dict, prefix='[Pretrain Validation] Report @step: ')
return scores
def save_models(self, save_to, metadata=None):
state_dict = {
'f_net': self.model.f_net.state_dict(),
'g_nets': [g_net.state_dict() for g_net in self.model.g_nets],
'f_optimizer': self.f_optimizer.state_dict(),
'g_optimizer': self.g_optimizer.state_dict(),
'f_lr_scheduler': self.f_lr_scheduler.state_dict(),
'g_lr_scheduler': self.g_lr_scheduler.state_dict(),
'options': dict(self.options),
'metadata': metadata,
}
torch.save(state_dict, save_to)
self.logger.log('state dict is saved to {}, metadata: {}'.format(
save_to, metadata))
| 20,817 | 42.280665 | 144 | py |
rebias | rebias-master/optims/__init__.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Opitmizers for the training.
"""
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR
from adamp import AdamP
__optim__ = ['Adam', 'AdamP']
__scheduler__ = ['StepLR', 'CosineAnnealingLR']
__all__ = ['Adam', 'AdamP', 'StepLR', 'CosineAnnealingLR', 'get_optim', 'get_scheduler']
def get_optim(params, optim_name, optim_config=None):
if optim_name not in __optim__:
raise KeyError(optim_name)
optim = globals()[optim_name]
if not optim_config:
optim_config = {'lr': 1e-2, 'weight_decay': 1e-4}
return optim(params, **optim_config)
def get_scheduler(optimizer, scheduler_name, scheduler_config=None):
if scheduler_name not in __scheduler__:
raise KeyError(scheduler_name)
scheduler = globals()[scheduler_name]
if not scheduler_config:
scheduler_config = {}
return scheduler(optimizer, **scheduler_config)
| 977 | 24.736842 | 88 | py |
rebias | rebias-master/criterions/comparison_methods.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
De-biasing comparison methods.
Cadene, Remi, et al. "RUBi: Reducing Unimodal Biases for Visual Question Answering.",
Clark, Christopher, Mark Yatskar, and Luke Zettlemoyer. "Don't Take the Easy Way Out: Ensemble Based Methods for Avoiding Known Dataset Biases.", EMNLP 2019.
Reference codes:
- https://github.com/cdancette/rubi.bootstrap.pytorch/blob/master/rubi/models/criterions/rubi_criterion.py
- https://github.com/chrisc36/debias/blob/master/debias/modules/clf_debias_loss_functions.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class GradMulConst(torch.autograd.Function):
""" This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
class RUBi(nn.Module):
"""RUBi
Cadene, Remi, et al. "RUBi: Reducing Unimodal Biases for Visual Question Answering.",
Advances in Neural Information Processing Systems. 2019.
"""
def __init__(self, question_loss_weight=1.0, **kwargs):
super(RUBi, self).__init__()
self.question_loss_weight = question_loss_weight
self.fc = nn.Linear(kwargs.get('feat_dim', 128), kwargs.get('num_classes', 10)).cuda()
def forward(self, f_feat, g_feat, labels, f_pred, **kwargs):
"""Compute RUBi loss.
Parameters
----------
f_feat: NOT USED (for compatibility with other losses).
g_feat: features from biased network (will be passed to `self.fc` for computing `g_pred`)
labels: class labels
f_pred: logit values from the target network
"""
g_feat = g_feat.view(g_feat.shape[0], -1)
g_feat = grad_mul_const(g_feat, 0.0) # don't backpropagate through bias encoder
g_pred = self.fc(g_feat)
logits_rubi = f_pred * torch.sigmoid(g_pred)
fusion_loss = F.cross_entropy(logits_rubi, labels)
question_loss = F.cross_entropy(g_pred, labels)
loss = fusion_loss + self.question_loss_weight * question_loss
return loss
class LearnedMixin(nn.Module):
"""LearnedMixin + H
Clark, Christopher, Mark Yatskar, and Luke Zettlemoyer.
"Don't Take the Easy Way Out: Ensemble Based Methods for Avoiding Known Dataset Biases.",
EMNLP 2019.
"""
def __init__(self, w=0.36, **kwargs):
"""
:param w: Weight of the entropy penalty
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(LearnedMixin, self).__init__()
self.w = w
self.fc = nn.Linear(kwargs.get('feat_dim', 128), 1).cuda()
def forward(self, f_feat, g_feat, labels, f_pred, g_pred):
f_feat = f_feat.view(f_feat.shape[0], -1)
f_pred = f_pred.view(f_pred.shape[0], -1)
g_pred = g_pred.view(g_pred.shape[0], -1)
factor = self.fc.forward(f_feat)
factor = F.softplus(factor)
g_pred *= factor
loss = F.cross_entropy(f_pred + g_pred, labels)
bias_lp = F.log_softmax(g_pred, 1)
entropy = -(torch.exp(bias_lp) * bias_lp).sum(1).mean()
return loss + self.w * entropy
| 3,496 | 34.683673 | 157 | py |
rebias | rebias-master/criterions/sigma_utils.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import numpy as np
import torch
def _l2_dist(X):
X = X.view(len(X), -1)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
return X_L2.clone().detach().cpu().numpy().reshape(-1).tolist()
def median_distance(model, train_loader, sigma_update_sampling_rate, func='median', device='cuda'):
if func not in {'mean', 'median'}:
raise ValueError(func)
model.train()
f_dist, g_dist = [], []
for idx, (x, _, _) in enumerate(train_loader):
if idx > len(train_loader) * sigma_update_sampling_rate:
break
x = x.to(device)
n_data = len(x)
_, f_feats = model.f_net(x)
f_feats = f_feats.view(n_data, -1)
_f_dist = _l2_dist(f_feats)
f_dist.extend(_f_dist)
for g_net in model.g_nets:
_, g_feats = g_net(x)
g_feats = g_feats.view(n_data, -1)
_g_dist = _l2_dist(g_feats)
g_dist.extend(_g_dist)
f_dist, g_dist = np.array(f_dist), np.array(g_dist)
if func == 'median':
f_sigma, g_sigma = np.median(f_dist), np.median(g_dist)
else:
f_sigma, g_sigma = np.mean(f_dist), np.mean(g_dist)
return np.sqrt(f_sigma), np.sqrt(g_sigma)
def feature_dimension(model, train_loader, device='cuda'):
model.train()
for x, _ in train_loader:
x = x.to(device)
n_data = len(x)
_, f_feats = model.f_net(x)
f_feats = f_feats.view(n_data, -1)
f_dim = f_feats.size()[1]
for g_net in model.g_nets:
_, g_feats = g_net(x)
g_feats = g_feats.view(n_data, -1)
g_dim = g_feats.size()[1]
return np.sqrt(f_dim), np.sqrt(g_dim)
| 1,810 | 26.029851 | 99 | py |
rebias | rebias-master/criterions/hsic.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Python Implementation of the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC)
We provide both biased estimator and unbiased estimators (unbiased estimator is used in the paper)
"""
import torch
import torch.nn as nn
def to_numpy(x):
"""convert Pytorch tensor to numpy array
"""
return x.clone().detach().cpu().numpy()
class HSIC(nn.Module):
"""Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC)
..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent.
Empirically, we use the finite sample estimator of HSIC (with m observations) by,
(1) biased estimator (HSIC_0)
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
:math: (m - 1)^2 tr KHLH.
where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices).
(2) unbiased estimator (HSIC_1)
Song, Le, et al. "Feature selection via dependence maximization." 2012.
:math: \frac{1}{m (m - 3)} \bigg[ tr (\tilde K \tilde L) + \frac{1^\top \tilde K 1 1^\top \tilde L 1}{(m-1)(m-2)} - \frac{2}{m-2} 1^\top \tilde K \tilde L 1 \bigg].
where \tilde K and \tilde L are related to K and L by the diagonal entries of \tilde K_{ij} and \tilde L_{ij} are set to zero.
Parameters
----------
sigma_x : float
the kernel size of the kernel function for X.
sigma_y : float
the kernel size of the kernel function for Y.
algorithm: str ('unbiased' / 'biased')
the algorithm for the finite sample estimator. 'unbiased' is used for our paper.
reduction: not used (for compatibility with other losses).
"""
def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased',
reduction=None):
super(HSIC, self).__init__()
if sigma_y is None:
sigma_y = sigma_x
self.sigma_x = sigma_x
self.sigma_y = sigma_y
if algorithm == 'biased':
self.estimator = self.biased_estimator
elif algorithm == 'unbiased':
self.estimator = self.unbiased_estimator
else:
raise ValueError('invalid estimator: {}'.format(algorithm))
def _kernel_x(self, X):
raise NotImplementedError
def _kernel_y(self, Y):
raise NotImplementedError
def biased_estimator(self, input1, input2):
"""Biased estimator of Hilbert-Schmidt Independence Criterion
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
"""
K = self._kernel_x(input1)
L = self._kernel_y(input2)
KH = K - K.mean(0, keepdim=True)
LH = L - L.mean(0, keepdim=True)
N = len(input1)
return torch.trace(KH @ LH / (N - 1) ** 2)
def unbiased_estimator(self, input1, input2):
"""Unbiased estimator of Hilbert-Schmidt Independence Criterion
Song, Le, et al. "Feature selection via dependence maximization." 2012.
"""
kernel_XX = self._kernel_x(input1)
kernel_YY = self._kernel_y(input2)
tK = kernel_XX - torch.diag(kernel_XX)
tL = kernel_YY - torch.diag(kernel_YY)
N = len(input1)
hsic = (
torch.trace(tK @ tL)
+ (torch.sum(tK) * torch.sum(tL) / (N - 1) / (N - 2))
- (2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2))
)
return hsic / (N * (N - 3))
def forward(self, input1, input2, **kwargs):
return self.estimator(input1, input2)
class RbfHSIC(HSIC):
"""Radial Basis Function (RBF) kernel HSIC implementation.
"""
def _kernel(self, X, sigma):
X = X.view(len(X), -1)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
gamma = 1 / (2 * sigma ** 2)
kernel_XX = torch.exp(-gamma * X_L2)
return kernel_XX
def _kernel_x(self, X):
return self._kernel(X, self.sigma_x)
def _kernel_y(self, Y):
return self._kernel(Y, self.sigma_y)
class MinusRbfHSIC(RbfHSIC):
"""``Minus'' RbfHSIC for the ``max'' optimization.
"""
def forward(self, input1, input2, **kwargs):
return -self.estimator(input1, input2)
| 4,434 | 33.648438 | 172 | py |
rebias | rebias-master/criterions/dist.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Distance-based objective functions.
Re-implemented for the compatibility with other losses
"""
import torch.nn as nn
import torch.nn.functional as F
class MSELoss(nn.Module):
""" A simple mean squared error (MSE) implementation.
"""
def __init__(self, reduction='mean', **kwargs):
super().__init__()
self.reduction = reduction
def forward(self, input, target, **kwargs):
return F.mse_loss(input, target, reduction=self.reduction)
class L1Loss(nn.Module):
""" A simple mean absolute error (MAE) implementation.
"""
def __init__(self, reduction='mean', **kwargs):
super().__init__()
self.reduction = reduction
def forward(self, input, target, **kwargs):
return F.l1_loss(input, target, reduction=self.reduction)
| 858 | 25.84375 | 66 | py |
rebias | rebias-master/models/rebias_models.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
ReBias model wrapper.
"""
import torch.nn as nn
class ReBiasModels(object):
"""A container for the target network and the intentionally biased network.
"""
def __init__(self, f_net, g_nets):
self.f_net = f_net
self.g_nets = g_nets
def to(self, device):
self.f_net.to(device)
for g_net in self.g_nets:
g_net.to(device)
def to_parallel(self, device):
self.f_net = nn.DataParallel(self.f_net.to(device))
for i, g_net in enumerate(self.g_nets):
self.g_nets[i] = nn.DataParallel(g_net.to(device))
def load_models(self, state_dict):
self.f_net.load_state_dict(state_dict['f_net'])
for g_net, _state_dict in zip(self.g_nets, state_dict['g_nets']):
g_net.load_state_dict(_state_dict)
def train_f(self):
self.f_net.train()
def eval_f(self):
self.f_net.eval()
def train_g(self):
for g_net in self.g_nets:
g_net.train()
def eval_g(self):
for g_net in self.g_nets:
g_net.eval()
def train(self):
self.train_f()
self.train_g()
def eval(self):
self.eval_f()
self.eval_g()
def forward(self, x):
f_pred, f_feat = self.f_net(x)
g_preds, g_feats = [], []
for g_net in self.g_nets:
_g_pred, _g_feat = g_net(x)
g_preds.append(_g_pred)
g_feats.append(_g_feat)
return f_pred, g_preds, f_feat, g_feats
def __call__(self, x):
return self.forward(x)
| 1,620 | 23.560606 | 79 | py |
rebias | rebias-master/models/imagenet_models.py | """ResNet and BagNet implementations.
original codes
- https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
- https://github.com/wielandbrendel/bag-of-local-features-models/blob/master/bagnets/pytorchnet.py
"""
import torch
import torch.nn as nn
import math
from torch.utils.model_zoo import load_url as load_state_dict_from_url
MODEL_URLS = {
'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet8-34f4ccd2.pth.tar',
'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet16-105524de.pth.tar',
'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet32-2ddd53ed.pth.tar',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
class BasicBlock_(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(BasicBlock_, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
if identity.size(-1) != out.size(-1):
diff = identity.size(-1) - out.size(-1)
identity = identity[:, :, :-diff, :-diff]
out += identity
out = self.relu(out)
return out
class Bottleneck_(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1):
super(Bottleneck_, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride,
padding=0, bias=False) # changed padding from (kernel_size - 1) // 2
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if residual.size(-1) != out.size(-1):
diff = residual.size(-1) - out.size(-1)
residual = residual[:, :, :-diff, :-diff]
out += residual
out = self.relu(out)
return out
class BagNetDeep(nn.Module):
def __init__(self, block, layers, strides=[2, 2, 2, 1], kernel3=[0, 0, 0, 0], num_classes=1000,
feature_pos='post', avg_pool=True):
super(BagNetDeep, self).__init__()
self.inplanes = 64
self.feature_pos = feature_pos
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=0.001)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix='layer1')
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], kernel3=kernel3[1], prefix='layer2')
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], kernel3=kernel3[2], prefix='layer3')
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], kernel3=kernel3[3], prefix='layer4')
self.avgpool = nn.AvgPool2d(1, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.avg_pool = avg_pool
self.block = block
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=''):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
kernel = 1 if kernel3 == 0 else 3
layers.append(block(self.inplanes, planes, stride, downsample, kernel_size=kernel))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
kernel = 1 if kernel3 <= i else 3
layers.append(block(self.inplanes, planes, kernel_size=kernel))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x_ = nn.AvgPool2d(x.size()[2], stride=1)(x)
x = x_.view(x_.size(0), -1)
x = self.fc(x)
return x, x_
def bagnet18(feature_pos='post', num_classes=1000, rf=43):
model = BagNetDeep(BasicBlock_, [2, 2, 2, 2], strides=[2, 2, 2, 1], kernel3=[1, 0, 0, 0],
num_classes=num_classes, feature_pos=feature_pos)
return model
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, feature_pos='post', zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, rf=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.feature_pos = feature_pos
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x_ = self.avgpool(x)
x = torch.flatten(x_, 1)
x = self.fc(x)
return x, x_
def _resnet(arch, block, layers, pretrained, progress, rf, num_classes, feature_pos, **kwargs):
model = ResNet(block, layers, rf=rf, num_classes=num_classes, feature_pos=feature_pos, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(MODEL_URLS[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(feature_pos='post', num_classes=1000, rf=43, pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, rf, num_classes, feature_pos,
**kwargs)
| 15,233 | 38.466321 | 159 | py |
rebias | rebias-master/models/mnist_models.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Implementation for simple statcked convolutional networks.
"""
import torch
import torch.nn as nn
class SimpleConvNet(nn.Module):
def __init__(self, num_classes=None, kernel_size=7, feature_pos='post'):
super(SimpleConvNet, self).__init__()
padding = kernel_size // 2
layers = [
nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
]
self.extracter = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if feature_pos not in ['pre', 'post', 'logits']:
raise ValueError(feature_pos)
self.feature_pos = feature_pos
def forward(self, x, logits_only=False):
pre_gap_feats = self.extracter(x)
post_gap_feats = self.avgpool(pre_gap_feats)
post_gap_feats = torch.flatten(post_gap_feats, 1)
logits = self.fc(post_gap_feats)
if logits_only:
return logits
elif self.feature_pos == 'pre':
feats = pre_gap_feats
elif self.feature_pos == 'post':
feats = post_gap_feats
else:
feats = logits
return logits, feats
| 2,049 | 32.606557 | 86 | py |
rebias | rebias-master/models/action_models/ResNet3D.py | import torch.nn as nn
from .weight_init_helper import init_weights
from .stem_helper import VideoModelStem
from .resnet_helper import ResStage
from .head_helper import ResNetBasicHead
# Number of blocks for different stages given the model depth.
_MODEL_STAGE_DEPTH = {18.1: (2, 2, 2, 2),
18: (2, 2, 2, 2),
34.1: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3)}
_MODEL_TRANS_FUNC = {18.1: 'basic_transform',
18: 'basic_transform',
34.1: 'basic_transform',
50: 'bottleneck_transform',
101: 'bottleneck_transform'}
# width_multiplier = {18: [1, 1, 2, 4, 8],
# 50: [1, 4, 8, 16, 32]}
width_multiplier = {18.1: [1, 1, 2, 4, 8],
34.1: [1, 1, 2, 4, 8],
18: [1, 4, 8, 16, 32],
50: [1, 4, 8, 16, 32]}
_POOL1 = [[1, 1, 1]]
_TEMPORAL_KERNEL_BASIS = {
"11111": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"33333": [
[[3]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
"11133": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
}
FC_INIT_STD = 0.01
ZERO_INIT_FINAL_BN = False
NUM_BLOCK_TEMP_KERNEL = [[2], [2], [2], [2]]
DATA_NUM_FRAMES = 8
DATA_CROP_SIZE = 224
NONLOCAL_LOCATION = [[[]], [[]], [[]], [[]]]
NONLOCAL_GROUP = [[1], [1], [1], [1]]
NONLOCAL_INSTANTIATION = "dot_product"
RESNET_STRIDE_1X1 = False
RESNET_INPLACE_RELU = True
class ResNet3DModel(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone without
lateral connection (C2D, I3D, SlowOnly).
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He.
"Non-local neural networks."
https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self,
model_arch='33333',
resnet_depth=18,
feature_position='post',
width_per_group=32,
dropout_rate=0.0,
num_classes=400,
final_bottleneck_dim=0
):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet3DModel, self).__init__()
self.num_pathways = 1
self._construct_network(
model_arch=model_arch,
resnet_depth=resnet_depth,
dropout_rate=dropout_rate,
width_per_group=width_per_group,
num_classes=num_classes,
feature_position=feature_position,
final_bottleneck_dim=final_bottleneck_dim
)
init_weights(
self, FC_INIT_STD, ZERO_INIT_FINAL_BN
)
def _construct_network(self, model_arch='33333',
resnet_depth=18,
feature_position='post',
num_groups=1,
width_per_group=32,
input_channel_num=None,
dropout_rate=0.0,
num_classes=400,
final_bottleneck_dim=0):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
if input_channel_num is None:
input_channel_num = [3]
pool_size = _POOL1
assert len({len(pool_size), self.num_pathways}) == 1
assert resnet_depth in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[resnet_depth]
trans_func = _MODEL_TRANS_FUNC[resnet_depth]
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[str(model_arch)]
self.s1 = VideoModelStem(
dim_in=input_channel_num,
dim_out=[width_per_group * width_multiplier[resnet_depth][0]],
kernel=[temp_kernel[0][0] + [7, 7]],
stride=[[1, 2, 2]],
padding=[[temp_kernel[0][0][0] // 2, 3, 3]],
)
self.s2 = ResStage(
dim_in=[width_per_group * width_multiplier[resnet_depth][0]],
dim_out=[width_per_group * width_multiplier[resnet_depth][1]],
dim_inner=[dim_inner],
temp_kernel_sizes=temp_kernel[1],
stride=[1],
num_blocks=[d2],
num_groups=[num_groups],
num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=NONLOCAL_LOCATION[0],
nonlocal_group=NONLOCAL_GROUP[0],
instantiation=NONLOCAL_INSTANTIATION,
trans_func_name=trans_func,
stride_1x1=RESNET_STRIDE_1X1,
inplace_relu=RESNET_INPLACE_RELU,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = ResStage(
dim_in=[width_per_group * width_multiplier[resnet_depth][1]],
dim_out=[width_per_group * width_multiplier[resnet_depth][2]],
dim_inner=[dim_inner * 2],
temp_kernel_sizes=temp_kernel[2],
stride=[2],
num_blocks=[d3],
num_groups=[num_groups],
num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=NONLOCAL_LOCATION[1],
nonlocal_group=NONLOCAL_GROUP[1],
instantiation=NONLOCAL_INSTANTIATION,
trans_func_name=trans_func,
stride_1x1=RESNET_STRIDE_1X1,
inplace_relu=RESNET_INPLACE_RELU,
)
self.s4 = ResStage(
dim_in=[width_per_group * width_multiplier[resnet_depth][2]],
dim_out=[width_per_group * width_multiplier[resnet_depth][3]],
dim_inner=[dim_inner * 4],
temp_kernel_sizes=temp_kernel[3],
stride=[2],
num_blocks=[d4],
num_groups=[num_groups],
num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=NONLOCAL_LOCATION[2],
nonlocal_group=NONLOCAL_GROUP[2],
instantiation=NONLOCAL_INSTANTIATION,
trans_func_name=trans_func,
stride_1x1=RESNET_STRIDE_1X1,
inplace_relu=RESNET_INPLACE_RELU,
)
self.s5 = ResStage(
dim_in=[width_per_group * width_multiplier[resnet_depth][3]],
dim_out=[width_per_group * width_multiplier[resnet_depth][4]],
dim_inner=[dim_inner * 8],
temp_kernel_sizes=temp_kernel[4],
stride=[2],
num_blocks=[d5],
num_groups=[num_groups],
num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=NONLOCAL_LOCATION[3],
nonlocal_group=NONLOCAL_GROUP[3],
instantiation=NONLOCAL_INSTANTIATION,
trans_func_name=trans_func,
stride_1x1=RESNET_STRIDE_1X1,
inplace_relu=RESNET_INPLACE_RELU,
)
self.head = ResNetBasicHead(
dim_in=[width_per_group * width_multiplier[resnet_depth][4]],
num_classes=num_classes,
pool_size=[
[
DATA_NUM_FRAMES // pool_size[0][0],
DATA_CROP_SIZE // 32 // pool_size[0][1],
DATA_CROP_SIZE // 32 // pool_size[0][2],
]
],
dropout_rate=dropout_rate,
feature_position=feature_position,
final_bottleneck_dim=final_bottleneck_dim
)
def forward(self, x, logits_only=False):
x = [x]
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
x, h = self.head(x)
if logits_only:
return x
else:
return x, h
| 8,939 | 33.921875 | 74 | py |
rebias | rebias-master/models/action_models/nonlocal_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Non-local helper"""
import torch
import torch.nn as nn
class Nonlocal(nn.Module):
"""
Builds Non-local Neural Networks as a generic family of building
blocks for capturing long-range dependencies. Non-local Network
computes the response at a position as a weighted sum of the
features at all positions. This building block can be plugged into
many computer vision architectures.
More details in the paper: https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(
self,
dim,
dim_inner,
pool_size=None,
instantiation="softmax",
norm_type="batchnorm",
zero_init_final_conv=False,
zero_init_final_norm=True,
norm_eps=1e-5,
norm_momentum=0.1,
):
"""
Args:
dim (int): number of dimension for the input.
dim_inner (int): number of dimension inside of the Non-local block.
pool_size (list): the kernel size of spatial temporal pooling,
temporal pool kernel size, spatial pool kernel size, spatial
pool kernel size in order. By default pool_size is None,
then there would be no pooling used.
instantiation (string): supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
norm_type (string): support BatchNorm and LayerNorm for
normalization.
"batchnorm": using BatchNorm for normalization.
"layernorm": using LayerNorm for normalization.
"none": not using any normalization.
zero_init_final_conv (bool): If true, zero initializing the final
convolution of the Non-local block.
zero_init_final_norm (bool):
If true, zero initializing the final batch norm of the Non-local
block.
"""
super(Nonlocal, self).__init__()
self.dim = dim
self.dim_inner = dim_inner
self.pool_size = pool_size
self.instantiation = instantiation
self.norm_type = norm_type
self.use_pool = (
False
if pool_size is None
else any((size > 1 for size in pool_size))
)
self.norm_eps = norm_eps
self.norm_momentum = norm_momentum
self._construct_nonlocal(zero_init_final_conv, zero_init_final_norm)
def _construct_nonlocal(self, zero_init_final_conv, zero_init_final_norm):
# Three convolution heads: theta, phi, and g.
self.conv_theta = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
self.conv_phi = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
self.conv_g = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
# Final convolution output.
self.conv_out = nn.Conv3d(
self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0
)
# Zero initializing the final convolution output.
self.conv_out.zero_init = zero_init_final_conv
if self.norm_type == "batchnorm":
self.bn = nn.BatchNorm3d(
self.dim, eps=self.norm_eps, momentum=self.norm_momentum
)
# Zero initializing the final bn.
self.bn.transform_final_bn = zero_init_final_norm
elif self.norm_type == "layernorm":
# In Caffe2 the LayerNorm op does not contain the scale an bias
# terms described in the paper:
# https://caffe2.ai/docs/operators-catalogue.html#layernorm
# Builds LayerNorm as GroupNorm with one single group.
# Setting Affine to false to align with Caffe2.
self.ln = nn.GroupNorm(1, self.dim, eps=self.norm_eps, affine=False)
elif self.norm_type == "none":
# Does not use any norm.
pass
else:
raise NotImplementedError(
"Norm type {} is not supported".format(self.norm_type)
)
# Optional to add the spatial-temporal pooling.
if self.use_pool:
self.pool = nn.MaxPool3d(
kernel_size=self.pool_size,
stride=self.pool_size,
padding=[0, 0, 0],
)
def forward(self, x):
x_identity = x
N, C, T, H, W = x.size()
theta = self.conv_theta(x)
# Perform temporal-spatial pooling to reduce the computation.
if self.use_pool:
x = self.pool(x)
phi = self.conv_phi(x)
g = self.conv_g(x)
theta = theta.view(N, self.dim_inner, -1)
phi = phi.view(N, self.dim_inner, -1)
g = g.view(N, self.dim_inner, -1)
# (N, C, TxHxW) * (N, C, TxHxW) => (N, TxHxW, TxHxW).
theta_phi = torch.einsum("nct,ncp->ntp", (theta, phi))
# For original Non-local paper, there are two main ways to normalize
# the affinity tensor:
# 1) Softmax normalization (norm on exp).
# 2) dot_product normalization.
if self.instantiation == "softmax":
# Normalizing the affinity tensor theta_phi before softmax.
theta_phi = theta_phi * (self.dim_inner ** -0.5)
theta_phi = nn.functional.softmax(theta_phi, dim=2)
elif self.instantiation == "dot_product":
spatial_temporal_dim = theta_phi.shape[2]
theta_phi = theta_phi / spatial_temporal_dim
else:
raise NotImplementedError(
"Unknown norm type {}".format(self.instantiation)
)
# (N, TxHxW, TxHxW) * (N, C, TxHxW) => (N, C, TxHxW).
theta_phi_g = torch.einsum("ntg,ncg->nct", (theta_phi, g))
# (N, C, TxHxW) => (N, C, T, H, W).
theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W)
p = self.conv_out(theta_phi_g)
if self.norm_type == "batchnorm":
p = self.bn(p)
elif self.norm_type == "layernorm":
p = self.ln(p)
return x_identity + p
| 6,320 | 37.542683 | 80 | py |
rebias | rebias-master/models/action_models/head_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t Head helper."""
import torch
import torch.nn as nn
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self,
dim_in,
num_classes,
pool_size,
dropout_rate=0.0,
feature_position='post',
act_func="softmax",
final_bottleneck_dim=None
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module("pathway{}_avgpool".format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# setting final bottleneck after GAP (e.g., 2048 -> final_bottleck_dim -> num_classes)
if final_bottleneck_dim:
self.final_bottleneck_dim = final_bottleneck_dim
self.final_bottleneck = nn.Conv3d(sum(dim_in), final_bottleneck_dim,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.final_bottleneck_bn = nn.BatchNorm3d(final_bottleneck_dim,
eps=1e-5,
momentum=0.1)
self.final_bottleneck_act = nn.ReLU(inplace=True)
dim_in = final_bottleneck_dim
else:
self.final_bottleneck_dim = None
dim_in = sum(dim_in)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(dim_in, num_classes, bias=True)
self.feature_position = feature_position
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, inputs):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
# Perform final bottleneck
if self.final_bottleneck_dim:
for pathway in range(self.num_pathways):
inputs[pathway] = self.final_bottleneck(inputs[pathway])
inputs[pathway] = self.final_bottleneck_bn(inputs[pathway])
inputs[pathway] = self.final_bottleneck_act(inputs[pathway])
for pathway in range(self.num_pathways):
m = getattr(self, "pathway{}_avgpool".format(pathway))
pool_out.append(m(inputs[pathway]))
h = torch.cat(pool_out, 1)
# (N, C, T, H, W) -> (N, T, H, W, C).
x = h.permute((0, 2, 3, 4, 1))
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
if self.feature_position == 'final_bottleneck':
h = x.mean([1, 2, 3])
h = h.view(h.shape[0], -1)
x = self.projection(x)
if self.feature_position == 'logit':
h = x
# Performs fully convlutional inference.
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x, h
| 5,073 | 36.308824 | 94 | py |
rebias | rebias-master/models/action_models/stem_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t 3D stem helper."""
import torch.nn as nn
class VideoModelStem(nn.Module):
"""
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, SlowOnly
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(VideoModelStem, self).__init__()
assert (
len(
{
len(dim_in),
len(dim_out),
len(kernel),
len(stride),
len(padding),
}
)
== 1
), "Input pathway dimensions are not consistent."
self.num_pathways = len(dim_in)
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out)
def _construct_stem(self, dim_in, dim_out):
for pathway in range(len(dim_in)):
stem = ResNetBasicStem(
dim_in[pathway],
dim_out[pathway],
self.kernel[pathway],
self.stride[pathway],
self.padding[pathway],
self.inplace_relu,
self.eps,
self.bn_mmt,
)
self.add_module("pathway{}_stem".format(pathway), stem)
def forward(self, x):
assert (
len(x) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
for pathway in range(len(x)):
m = getattr(self, "pathway{}_stem".format(pathway))
x[pathway] = m(x[pathway])
return x
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 3D stem module.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out)
def _construct_stem(self, dim_in, dim_out):
self.conv = nn.Conv3d(
dim_in,
dim_out,
self.kernel,
stride=self.stride,
padding=self.padding,
bias=False,
)
self.bn = nn.BatchNorm3d(dim_out, eps=self.eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
self.pool_layer = nn.MaxPool3d(
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool_layer(x)
return x
| 5,867 | 33.116279 | 82 | py |
rebias | rebias-master/models/action_models/weight_init_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Utility function for weight initialization"""
import torch.nn as nn
from fvcore.nn.weight_init import c2_msra_fill
def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True):
"""
Performs ResNet style weight initialization.
Args:
fc_init_std (float): the expected standard deviation for fc layer.
zero_init_final_bn (bool): if True, zero initialize the final bn for
every bottleneck.
"""
for m in model.modules():
if isinstance(m, nn.Conv3d):
"""
Follow the initialization method proposed in:
{He, Kaiming, et al.
"Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification."
arXiv preprint arXiv:1502.01852 (2015)}
"""
c2_msra_fill(m)
elif isinstance(m, nn.BatchNorm3d):
if (
hasattr(m, "transform_final_bn")
and m.transform_final_bn
and zero_init_final_bn
):
batchnorm_weight = 0.0
else:
batchnorm_weight = 1.0
m.weight.data.fill_(batchnorm_weight)
m.bias.data.zero_()
if isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=fc_init_std)
m.bias.data.zero_()
| 1,444 | 33.404762 | 76 | py |
rebias | rebias-master/models/action_models/resnet_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import torch.nn as nn
from .nonlocal_helper import Nonlocal
def get_trans_func(name):
"""
Retrieves the transformation module by name.
"""
trans_funcs = {
"bottleneck_transform": BottleneckTransform,
"basic_transform": BasicTransform,
}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class BasicTransform(nn.Module):
"""
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner=None,
num_groups=1,
stride_1x1=None,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (None): the inner dimension would not be used in
BasicTransform.
num_groups (int): number of groups for the convolution. Number of
group is always 1 for BasicTransform.
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(BasicTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(dim_in, dim_out, stride)
def _construct(self, dim_in, dim_out, stride):
# Tx3x3, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_out,
kernel_size=[self.temp_kernel_size, 3, 3],
stride=[1, stride, stride],
padding=[int(self.temp_kernel_size // 2), 1, 1],
bias=False,
)
self.a_bn = nn.BatchNorm3d(
dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN.
self.b = nn.Conv3d(
dim_out,
dim_out,
kernel_size=[1, 3, 3],
stride=[1, 1, 1],
padding=[0, 1, 1],
bias=False,
)
self.b_bn = nn.BatchNorm3d(
dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.b_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
return x
class BottleneckTransform(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(BottleneckTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._stride_1x1 = stride_1x1
self._construct(dim_in, dim_out, stride, dim_inner, num_groups)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# Tx1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[self.temp_kernel_size, 1, 1],
stride=[1, str1x1, str1x1],
padding=[int(self.temp_kernel_size // 2), 0, 0],
bias=False,
)
self.a_bn = nn.BatchNorm3d(
dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[1, 3, 3],
stride=[1, str3x3, str3x3],
padding=[0, 1, 1],
groups=num_groups,
bias=False,
)
self.b_bn = nn.BatchNorm3d(
dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c_bn = nn.BatchNorm3d(
dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
# Explicitly forward every layer.
# Branch2a.
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
# Branch2b.
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
# Branch2c
x = self.c(x)
x = self.c_bn(x)
return x
class ResBlock(nn.Module):
"""
Residual block.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups=1,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
):
"""
ResBlock class constructs redisual blocks. More details can be found in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
"Deep residual learning for image recognition."
https://arxiv.org/abs/1512.03385
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
trans_func (string): transform function to be used to construct the
bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups,
stride_1x1,
inplace_relu,
)
def _construct(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups,
stride_1x1,
inplace_relu,
):
# Use skip connection with projection if dim or res change.
if (dim_in != dim_out) or (stride != 1):
self.branch1 = nn.Conv3d(
dim_in,
dim_out,
kernel_size=1,
stride=[1, stride, stride],
padding=0,
bias=False,
)
self.branch1_bn = nn.BatchNorm3d(
dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.branch2 = trans_func(
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=stride_1x1,
inplace_relu=inplace_relu,
)
self.relu = nn.ReLU(self._inplace_relu)
def forward(self, x):
if hasattr(self, "branch1"):
x = self.branch1_bn(self.branch1(x)) + self.branch2(x)
else:
x = x + self.branch2(x)
x = self.relu(x)
return x
class ResStage(nn.Module):
"""
Stage of 3D ResNet. It expects to have one or more tensors as input for
single pathway (C2D, I3D, SlowOnly), and multi-pathway (SlowFast) cases.
More details can be found here:
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(
self,
dim_in,
dim_out,
stride,
temp_kernel_sizes,
num_blocks,
dim_inner,
num_groups,
num_block_temp_kernel,
nonlocal_inds,
nonlocal_group,
instantiation="softmax",
trans_func_name="bottleneck_transform",
stride_1x1=False,
inplace_relu=True,
):
"""
The `__init__` method of any subclass should also contain these arguments.
ResStage builds p streams, where p can be greater or equal to one.
Args:
dim_in (list): list of p the channel dimensions of the input.
Different channel dimensions control the input dimension of
different pathways.
dim_out (list): list of p the channel dimensions of the output.
Different channel dimensions control the input dimension of
different pathways.
temp_kernel_sizes (list): list of the p temporal kernel sizes of the
convolution in the bottleneck. Different temp_kernel_sizes
control different pathway.
stride (list): list of the p strides of the bottleneck. Different
stride control different pathway.
num_blocks (list): list of p numbers of blocks for each of the
pathway.
dim_inner (list): list of the p inner channel dimensions of the
input. Different channel dimensions control the input dimension
of different pathways.
num_groups (list): list of number of p groups for the convolution.
num_groups=1 is for standard ResNet like networks, and
num_groups>1 is for ResNeXt like networks.
num_block_temp_kernel (list): extent the temp_kernel_sizes to
num_block_temp_kernel blocks, then fill temporal kernel size
of 1 for the rest of the layers.
nonlocal_inds (list): If the tuple is empty, no nonlocal layer will
be added. If the tuple is not empty, add nonlocal layers after
the index-th block.
nonlocal_group (list): list of number of p nonlocal groups. Each
number controls how to fold temporal dimension to batch
dimension before applying nonlocal transformation.
https://github.com/facebookresearch/video-nonlocal-net.
instantiation (string): different instantiation for nonlocal layer.
Supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
trans_func_name (string): name of the the transformation function apply
on the network.
"""
super(ResStage, self).__init__()
assert all(
(
num_block_temp_kernel[i] <= num_blocks[i]
for i in range(len(temp_kernel_sizes))
)
)
self.num_blocks = num_blocks
self.nonlocal_group = nonlocal_group
self.temp_kernel_sizes = [
(temp_kernel_sizes[i] * num_blocks[i])[: num_block_temp_kernel[i]]
+ [1] * (num_blocks[i] - num_block_temp_kernel[i])
for i in range(len(temp_kernel_sizes))
]
assert (
len(
{
len(dim_in),
len(dim_out),
len(temp_kernel_sizes),
len(stride),
len(num_blocks),
len(dim_inner),
len(num_groups),
len(num_block_temp_kernel),
len(nonlocal_inds),
len(nonlocal_group),
}
)
== 1
)
self.num_pathways = len(self.num_blocks)
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
trans_func_name,
stride_1x1,
inplace_relu,
nonlocal_inds,
instantiation,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
trans_func_name,
stride_1x1,
inplace_relu,
nonlocal_inds,
instantiation,
):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
# Retrieve the transformation function.
trans_func = get_trans_func(trans_func_name)
# Construct the block.
res_block = ResBlock(
dim_in[pathway] if i == 0 else dim_out[pathway],
dim_out[pathway],
self.temp_kernel_sizes[pathway][i],
stride[pathway] if i == 0 else 1,
trans_func,
dim_inner[pathway],
num_groups[pathway],
stride_1x1=stride_1x1,
inplace_relu=inplace_relu,
)
self.add_module("pathway{}_res{}".format(pathway, i), res_block)
if i in nonlocal_inds[pathway]:
nln = Nonlocal(
dim_out[pathway],
dim_out[pathway] // 2,
[1, 2, 2],
instantiation=instantiation,
)
self.add_module(
"pathway{}_nonlocal{}".format(pathway, i), nln
)
def forward(self, inputs):
output = []
for pathway in range(self.num_pathways):
x = inputs[pathway]
for i in range(self.num_blocks[pathway]):
m = getattr(self, "pathway{}_res{}".format(pathway, i))
x = m(x)
if hasattr(self, "pathway{}_nonlocal{}".format(pathway, i)):
nln = getattr(
self, "pathway{}_nonlocal{}".format(pathway, i)
)
b, c, t, h, w = x.shape
if self.nonlocal_group[pathway] > 1:
# Fold temporal dimension into batch dimension.
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(
b * self.nonlocal_group[pathway],
t // self.nonlocal_group[pathway],
c,
h,
w,
)
x = x.permute(0, 2, 1, 3, 4)
x = nln(x)
if self.nonlocal_group[pathway] > 1:
# Fold back to temporal dimension.
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b, t, c, h, w)
x = x.permute(0, 2, 1, 3, 4)
output.append(x)
return output
| 17,499 | 33.448819 | 83 | py |
rebias | rebias-master/datasets/colour_mnist.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Python implementation of Biased-MNIST.
"""
import os
import numpy as np
from PIL import Image
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.datasets import MNIST
class BiasedMNIST(MNIST):
"""A base class for Biased-MNIST.
We manually select ten colours to synthetic colour bias. (See `COLOUR_MAP` for the colour configuration)
Usage is exactly same as torchvision MNIST dataset class.
You have two paramters to control the level of bias.
Parameters
----------
root : str
path to MNIST dataset.
data_label_correlation : float, default=1.0
Here, each class has the pre-defined colour (bias).
data_label_correlation, or `rho` controls the level of the dataset bias.
A sample is coloured with
- the pre-defined colour with probability `rho`,
- coloured with one of the other colours with probability `1 - rho`.
The number of ``other colours'' is controlled by `n_confusing_labels` (default: 9).
Note that the colour is injected into the background of the image (see `_binary_to_colour`).
Hence, we have
- Perfectly biased dataset with rho=1.0
- Perfectly unbiased with rho=0.1 (1/10) ==> our ``unbiased'' setting in the test time.
In the paper, we explore the high correlations but with small hints, e.g., rho=0.999.
n_confusing_labels : int, default=9
In the real-world cases, biases are not equally distributed, but highly unbalanced.
We mimic the unbalanced biases by changing the number of confusing colours for each class.
In the paper, we use n_confusing_labels=9, i.e., during training, the model can observe
all colours for each class. However, you can make the problem harder by setting smaller n_confusing_labels, e.g., 2.
We suggest to researchers considering this benchmark for future researches.
"""
COLOUR_MAP = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [225, 225, 0], [225, 0, 225],
[0, 255, 255], [255, 128, 0], [255, 0, 128], [128, 0, 255], [128, 128, 128]]
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False, data_label_correlation=1.0, n_confusing_labels=9):
super().__init__(root, train=train, transform=transform,
target_transform=target_transform,
download=download)
self.random = True
self.data_label_correlation = data_label_correlation
self.n_confusing_labels = n_confusing_labels
self.data, self.targets, self.biased_targets = self.build_biased_mnist()
indices = np.arange(len(self.data))
self._shuffle(indices)
self.data = self.data[indices].numpy()
self.targets = self.targets[indices]
self.biased_targets = self.biased_targets[indices]
@property
def raw_folder(self):
return os.path.join(self.root, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, 'processed')
def _shuffle(self, iteratable):
if self.random:
np.random.shuffle(iteratable)
def _make_biased_mnist(self, indices, label):
raise NotImplementedError
def _update_bias_indices(self, bias_indices, label):
if self.n_confusing_labels > 9 or self.n_confusing_labels < 1:
raise ValueError(self.n_confusing_labels)
indices = np.where((self.targets == label).numpy())[0]
self._shuffle(indices)
indices = torch.LongTensor(indices)
n_samples = len(indices)
n_correlated_samples = int(n_samples * self.data_label_correlation)
n_decorrelated_per_class = int(np.ceil((n_samples - n_correlated_samples) / (self.n_confusing_labels)))
correlated_indices = indices[:n_correlated_samples]
bias_indices[label] = torch.cat([bias_indices[label], correlated_indices])
decorrelated_indices = torch.split(indices[n_correlated_samples:], n_decorrelated_per_class)
other_labels = [_label % 10 for _label in range(label + 1, label + 1 + self.n_confusing_labels)]
self._shuffle(other_labels)
for idx, _indices in enumerate(decorrelated_indices):
_label = other_labels[idx]
bias_indices[_label] = torch.cat([bias_indices[_label], _indices])
def build_biased_mnist(self):
"""Build biased MNIST.
"""
n_labels = self.targets.max().item() + 1
bias_indices = {label: torch.LongTensor() for label in range(n_labels)}
for label in range(n_labels):
self._update_bias_indices(bias_indices, label)
data = torch.ByteTensor()
targets = torch.LongTensor()
biased_targets = []
for bias_label, indices in bias_indices.items():
_data, _targets = self._make_biased_mnist(indices, bias_label)
data = torch.cat([data, _data])
targets = torch.cat([targets, _targets])
biased_targets.extend([bias_label] * len(indices))
biased_targets = torch.LongTensor(biased_targets)
return data, targets, biased_targets
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
img = Image.fromarray(img.astype(np.uint8), mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, int(self.biased_targets[index])
class ColourBiasedMNIST(BiasedMNIST):
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False, data_label_correlation=1.0, n_confusing_labels=9):
super(ColourBiasedMNIST, self).__init__(root, train=train, transform=transform,
target_transform=target_transform,
download=download,
data_label_correlation=data_label_correlation,
n_confusing_labels=n_confusing_labels)
def _binary_to_colour(self, data, colour):
fg_data = torch.zeros_like(data)
fg_data[data != 0] = 255
fg_data[data == 0] = 0
fg_data = torch.stack([fg_data, fg_data, fg_data], dim=1)
bg_data = torch.zeros_like(data)
bg_data[data == 0] = 1
bg_data[data != 0] = 0
bg_data = torch.stack([bg_data, bg_data, bg_data], dim=3)
bg_data = bg_data * torch.ByteTensor(colour)
bg_data = bg_data.permute(0, 3, 1, 2)
data = fg_data + bg_data
return data.permute(0, 2, 3, 1)
def _make_biased_mnist(self, indices, label):
return self._binary_to_colour(self.data[indices], self.COLOUR_MAP[label]), self.targets[indices]
def get_biased_mnist_dataloader(root, batch_size, data_label_correlation,
n_confusing_labels=9, train=True, num_workers=8):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5))])
dataset = ColourBiasedMNIST(root, train=train, transform=transform,
download=True, data_label_correlation=data_label_correlation,
n_confusing_labels=n_confusing_labels)
dataloader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return dataloader
| 7,875 | 40.235602 | 124 | py |
rebias | rebias-master/datasets/kinetics.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Dataset for the action recognition benchmarks.
We use the official implemenation of SlowFast by Facebook research.
https://github.com/facebookresearch/SlowFast
"""
import torch
from datasets.kinetics_tools.loader import construct_loader
def get_kinetics_dataloader(root,
split='train',
logger=None,
anno_file=None,
dataset_name='kinetics50',
batch_size=16):
return construct_loader(root, split, logger,
anno_file=anno_file,
dataset_name=dataset_name,
num_gpus=torch.cuda.device_count(),
batch_size=batch_size)
| 831 | 32.28 | 67 | py |
rebias | rebias-master/datasets/imagenet.py | """ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
9-Class ImageNet wrapper. Many codes are borrowed from the official torchvision dataset.
https://github.com/pytorch/vision/blob/master/torchvision/datasets/imagenet.py
The following nine classes are selected to build the subset:
dog, cat, frog, turtle, bird, monkey, fish, crab, insect
"""
import os
from PIL import Image
from torchvision import transforms
import torch
import torch.utils.data
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
CLASS_TO_INDEX = {'n01641577': 2, 'n01644373': 2, 'n01644900': 2, 'n01664065': 3, 'n01665541': 3,
'n01667114': 3, 'n01667778': 3, 'n01669191': 3, 'n01819313': 4, 'n01820546': 4,
'n01833805': 4, 'n01843383': 4, 'n01847000': 4, 'n01978287': 7, 'n01978455': 7,
'n01980166': 7, 'n01981276': 7, 'n02085620': 0, 'n02099601': 0, 'n02106550': 0,
'n02106662': 0, 'n02110958': 0, 'n02123045': 1, 'n02123159': 1, 'n02123394': 1,
'n02123597': 1, 'n02124075': 1, 'n02174001': 8, 'n02177972': 8, 'n02190166': 8,
'n02206856': 8, 'n02219486': 8, 'n02486410': 5, 'n02487347': 5, 'n02488291': 5,
'n02488702': 5, 'n02492035': 5, 'n02607072': 6, 'n02640242': 6, 'n02641379': 6,
'n02643566': 6, 'n02655020': 6}
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, class_to_idx, data='ImageNet'):
# dog, cat, frog, turtle, bird, monkey, fish, crab, insect
RESTRICTED_RANGES = [(151, 254), (281, 285), (30, 32), (33, 37), (89, 97),
(372, 378), (393, 397), (118, 121), (306, 310)]
range_sets = [set(range(s, e + 1)) for s, e in RESTRICTED_RANGES]
class_to_idx_ = {}
if data == 'ImageNet-A':
for class_name, idx in class_to_idx.items():
try:
class_to_idx_[class_name] = CLASS_TO_INDEX[class_name]
except Exception:
pass
elif data == 'ImageNet-C':
# TODO
pass
else: # ImageNet
for class_name, idx in class_to_idx.items():
for new_idx, range_set in enumerate(range_sets):
if idx in range_set:
if new_idx == 0: # classes that overlap with ImageNet-A
if idx in [151, 207, 234, 235, 254]:
class_to_idx_[class_name] = new_idx
elif new_idx == 4:
if idx in [89, 90, 94, 96, 97]:
class_to_idx_[class_name] = new_idx
elif new_idx == 5:
if idx in [372, 373, 374, 375, 378]:
class_to_idx_[class_name] = new_idx
else:
class_to_idx_[class_name] = new_idx
images = []
dir = os.path.expanduser(dir)
a = sorted(class_to_idx_.keys())
for target in a:
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx_[target])
images.append(item)
return images, class_to_idx_
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
class ImageFolder(torch.utils.data.Dataset):
def __init__(self, root, transform=None, target_transform=None, loader=pil_loader,
train=True, val_data='ImageNet'):
classes, class_to_idx = find_classes(root)
imgs, class_to_idx_ = make_dataset(root, class_to_idx, val_data)
if len(imgs) == 0:
raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(
IMG_EXTENSIONS)))
self.root = root
self.dataset = imgs
self.classes = classes
self.class_to_idx = class_to_idx_
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.train = train
self.val_data = val_data
self.clusters = []
for i in range(3):
self.clusters.append(torch.load('clusters/cluster_label_{}.pth'.format(i+1)))
def __getitem__(self, index):
path, target = self.dataset[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if not self.train and self.val_data == 'ImageNet':
bias_target = [self.clusters[0][index],
self.clusters[1][index],
self.clusters[2][index]]
return img, target, bias_target
else:
return img, target, target
def __len__(self):
return len(self.dataset)
def get_imagenet_dataloader(root, batch_size, train=True, num_workers=8,
load_size=256, image_size=224, val_data='ImageNet'):
if train:
transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
else:
transform = transforms.Compose([
transforms.Resize(load_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
dataset = ImageFolder(root, transform=transform, train=train, val_data=val_data)
dataloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=train,
num_workers=num_workers,
pin_memory=True)
return dataloader
| 6,565 | 38.317365 | 97 | py |
rebias | rebias-master/datasets/kinetics_tools/transform.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import torch
def random_short_side_scale_jitter(images, min_size, max_size):
"""
Perform a spatial short scale jittering on the given images.
Args:
images (tensor): images to perform scale jitter. Dimension is
`num frames` x `channel` x `height` x `width`.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
Returns:
(tensor): the scaled images with dimension of
`num frames` x `channel` x `new height` x `new width`.
"""
size = int(round(np.random.uniform(min_size, max_size)))
height = images.shape[2]
width = images.shape[3]
if (width <= height and width == size) or (
height <= width and height == size
):
return images
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
return torch.nn.functional.interpolate(
images,
size=(new_height, new_width),
mode="bilinear",
align_corners=False,
)
def random_crop(images, size):
"""
Perform random spatial crop on the given images.
Args:
images (tensor): images to perform random crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): the size of height and width to crop on the image.
Returns:
(tensor): cropped images with dimension of
`num frames` x `channel` x `size` x `size`.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
return cropped
def horizontal_flip(prob, images):
"""
Perform horizontal flip on the given images.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`num frames` x `channel` x `height` x `width`.
Returns:
(tensor): images with dimension of
`num frames` x `channel` x `height` x `width`.
"""
if np.random.uniform() < prob:
images = images.flip((-1))
return images
def uniform_crop(images, size, spatial_idx):
"""
Perform uniform spatial sampling on the images.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
"""
assert spatial_idx in [0, 1, 2]
height = images.shape[2]
width = images.shape[3]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
return cropped
| 3,922 | 31.155738 | 79 | py |
rebias | rebias-master/datasets/kinetics_tools/decoder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numpy as np
import random
import torch
def temporal_sampling(frames, start_idx, end_idx, num_samples):
"""
Given the start and end frame index, sample num_samples frames between
the start and end with equal interval.
Args:
frames (tensor): a tensor of video frames, dimension is
`num video frames` x `channel` x `height` x `width`.
start_idx (int): the index of the start frame.
end_idx (int): the index of the end frame.
num_samples (int): number of frames to sample.
Returns:
frames (tersor): a tensor of temporal sampled video frames, dimension is
`num clip frames` x `channel` x `height` x `width`.
"""
index = torch.linspace(start_idx, end_idx, num_samples)
index = torch.clamp(index, 0, frames.shape[0] - 1).long()
frames = torch.index_select(frames, 0, index)
return frames
def get_start_end_idx(video_size, clip_size, clip_idx, num_clips):
"""
Sample a clip of size clip_size from a video of size video_size and
return the indices of the first and last frame of the clip. If clip_idx is
-1, the clip is randomly sampled, otherwise uniformly split the video to
num_clips clips, and select the start and end index of clip_idx-th video
clip.
Args:
video_size (int): number of overall frames.
clip_size (int): size of the clip to sample from the frames.
clip_idx (int): if clip_idx is -1, perform random jitter sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the start and end index of the clip_idx-th video
clip.
num_clips (int): overall number of clips to uniformly sample from the
given video for testing.
Returns:
start_idx (int): the start frame index.
end_idx (int): the end frame index.
"""
delta = max(video_size - clip_size, 0)
if clip_idx == -1:
# Random temporal sampling.
start_idx = random.uniform(0, delta)
else:
# Uniformly sample the clip with the given index.
start_idx = delta * clip_idx / num_clips
end_idx = start_idx + clip_size - 1
return start_idx, end_idx
def pyav_decode_stream(
container, start_pts, end_pts, stream, stream_name, buffer_size=0
):
"""
Decode the video with PyAV decoder.
Args:
container (container): PyAV container.
start_pts (int): the starting Presentation TimeStamp to fetch the
video frames.
end_pts (int): the ending Presentation TimeStamp of the decoded frames.
stream (stream): PyAV stream.
stream_name (dict): a dictionary of streams. For example, {"video": 0}
means video stream at stream index 0.
buffer_size (int): number of additional frames to decode beyond end_pts.
Returns:
result (list): list of frames decoded.
max_pts (int): max Presentation TimeStamp of the video sequence.
"""
# Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a
# margin pts.
margin = 1024
seek_offset = max(start_pts - margin, 0)
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
frames = {}
buffer_count = 0
max_pts = 0
for frame in container.decode(**stream_name):
max_pts = max(max_pts, frame.pts)
if frame.pts < start_pts:
continue
if frame.pts <= end_pts:
frames[frame.pts] = frame
else:
buffer_count += 1
frames[frame.pts] = frame
if buffer_count >= buffer_size:
break
result = [frames[pts] for pts in sorted(frames)]
return result, max_pts
def pyav_decode(
container,
sampling_rate,
num_frames,
clip_idx,
num_clips=10,
target_fps=30,
):
"""
Convert the video from its original fps to the target_fps. If the video
support selective decoding (contain decoding information in the video head),
the perform temporal selective decoding and sample a clip from the video
with the PyAV decoder. If the video does not support selective decoding,
decode the entire video.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames.
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal sampling. If
clip_idx is larger than -1, uniformly split the video to num_clips
clips, and select the clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly sample from the
given video.
target_fps (int): the input video may has different fps, convert it to
the target video fps before frame sampling.
Returns:
frames (tensor): decoded frames from the video. Return None if the no
video stream was found.
fps (float): the number of frames per second of the video.
decode_all_video (bool): If True, the entire video was decoded.
"""
# Try to fetch the decoding information from the video head. Some of the
# videos does not support fetching the decoding information, for that case
# it will get None duration.
fps = float(container.streams.video[0].average_rate)
frames_length = container.streams.video[0].frames
duration = container.streams.video[0].duration
if duration is None:
# If failed to fetch the decoding information, decode the entire video.
decode_all_video = True
video_start_pts, video_end_pts = 0, math.inf
else:
# Perform selective decoding.
decode_all_video = False
start_idx, end_idx = get_start_end_idx(
frames_length,
sampling_rate * num_frames / target_fps * fps,
clip_idx,
num_clips,
)
timebase = duration / frames_length
video_start_pts = int(start_idx * timebase)
video_end_pts = int(end_idx * timebase)
frames = None
# If video stream was found, fetch video frames from the video.
if container.streams.video:
video_frames, max_pts = pyav_decode_stream(
container,
video_start_pts,
video_end_pts,
container.streams.video[0],
{"video": 0},
)
container.close()
frames = [frame.to_rgb().to_ndarray() for frame in video_frames]
frames = torch.as_tensor(np.stack(frames))
return frames, fps, decode_all_video
def decode(
container,
sampling_rate,
num_frames,
clip_idx=-1,
num_clips=10,
video_meta=None,
target_fps=30,
):
"""
Decode the video and perform temporal sampling.
Args:
container (container): pyav container.
sampling_rate (int): frame sampling rate (interval between two sampled
frames).
num_frames (int): number of frames to sample.
clip_idx (int): if clip_idx is -1, perform random temporal
sampling. If clip_idx is larger than -1, uniformly split the
video to num_clips clips, and select the
clip_idx-th video clip.
num_clips (int): overall number of clips to uniformly
sample from the given video.
video_meta (dict): a dict contains "fps", "timebase", and
"max_pts":
`fps` is the frames per second of the given video.
`timebase` is the video timebase.
`max_pts` is the largest pts from the video.
target_fps (int): the input video may have different fps, convert it to
the target video fps before frame sampling.
Returns:
frames (tensor): decoded frames from the video.
"""
# Currently support two decoders: 1) PyAV, and 2) TorchVision.
assert clip_idx >= -1, "Not valied clip_idx {}".format(clip_idx)
try:
frames, fps, decode_all_video = pyav_decode(
container,
sampling_rate,
num_frames,
clip_idx,
num_clips,
target_fps,
)
except Exception as e:
print("Failed to decode with pyav with exception: {}".format(e))
return None
# Return None if the frames was not decoded successfully.
if frames is None:
return frames
start_idx, end_idx = get_start_end_idx(
frames.shape[0],
num_frames * sampling_rate * fps / target_fps,
clip_idx if decode_all_video else 0,
num_clips if decode_all_video else 1,
)
# Perform temporal sampling from the decoded video.
frames = temporal_sampling(frames, start_idx, end_idx, num_frames)
return frames
| 8,893 | 36.527426 | 80 | py |
rebias | rebias-master/datasets/kinetics_tools/kinetics.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import json
import random
import torch
import torch.utils.data
import datasets.kinetics_tools.decoder as decoder
import datasets.kinetics_tools.video_container as container
import datasets.kinetics_tools.transform as transform
import tqdm
DATA_MEAN = [0.45, 0.45, 0.45]
DATA_STD = [0.225, 0.225, 0.225]
TRAIN_JITTER_SCALES = [256, 320]
TRAIN_CROP_SIZE = 224
TEST_CROP_SIZE = 256
TEST_NUM_ENSEMBLE_VIEWS = 10
TEST_NUM_SPATIAL_CROPS = 1
DATA_SAMPLING_RATE = 8
DATA_NUM_FRAMES = 8
class Kinetics(torch.utils.data.Dataset):
"""
Kinetics video loader. Construct the Kinetics video loader, then sample
clips from the videos. For training and validation, a single clip is
randomly sampled from every video with random cropping, scaling, and
flipping. For testing, multiple clips are uniformaly sampled from every
video with uniform cropping. For uniform cropping, we take the left, center,
and right crop if the width is larger than height, or take top, center, and
bottom crop if the height is larger than the width.
"""
def __init__(self, root, mode, logger, num_retries=10,
dataset_name="kinetics50",
anno_file="kinetics-400.json"):
"""
Construct the Kinetics video loader
"""
# Only support train, val, and test mode.
assert mode in [
"train",
"val",
"test",
], "Split '{}' not supported for Kinetics".format(mode)
self.mode = mode
self.root = root
self.anno_file = anno_file
self.dataset_name = dataset_name
assert self.dataset_name in ['kinetics400', 'kinetics50', 'mimetics50',
'kinetics10', 'mimetics10',]
self.logger = logger
self._video_meta = {}
self._num_retries = num_retries
# For training or validation mode, one single clip is sampled from every
# video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every
# video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from
# the frames.
if self.mode in ["train", "val"]:
self._num_clips = 1
elif self.mode in ["test"]:
self._num_clips = (
TEST_NUM_ENSEMBLE_VIEWS * TEST_NUM_SPATIAL_CROPS
)
self.logger.log("Constructing Kinetics {}...".format(mode))
self._construct_loader()
def _parse_json(self, json_path, valid=False):
self.logger.log(json_path)
with open(json_path, 'r') as data_file:
self.json_data = json.load(data_file)
if valid:
c_tr, c_te, c_v = 0, 0, 0
for jd in self.json_data['database']:
if self.json_data['database'][jd]['subset'] == 'training':
c_tr += 1
elif self.json_data['database'][jd]['subset'] == 'testing':
c_te += 1
elif self.json_data['database'][jd]['subset'] == 'validation':
c_v += 1
self.logger.log('Number of Training samples: %d' % c_tr)
self.logger.log('Number of Validation samples: %d' % c_v)
self.logger.log('Number of Testing samples: %d' % c_te)
def _get_class_idx_map(self, classes):
self.class_labels_map = {}
for index, class_label in enumerate(classes):
self.class_labels_map[class_label] = index
def _get_action_label(self, data):
if self.mode == 'train' and data['subset'] == 'training':
action_label = data['annotations']['label']
elif self.mode == 'val' and data['subset'] == 'validation':
action_label = data['annotations']['label']
elif self.mode == 'test' and data['subset'] == 'validation':
action_label = data['annotations']['label']
elif self.mode == 'test' and data['subset'] == 'testing':
action_label = None
else:
action_label = None
return action_label
def _set_path_prefix(self):
if self.mode == 'train':
self.PATH_PREFIX = 'train'
elif self.mode == 'val':
self.PATH_PREFIX = 'val'
elif self.mode == 'test':
self.PATH_PREFIX = 'val'
else:
raise NotImplementedError
def _construct_loader(self):
"""
Construct the video loader.
"""
path_to_file = os.path.join(self.root, self.anno_file)
self._set_path_prefix()
if path_to_file.endswith('.json'):
self._parse_json(json_path=path_to_file)
self._path_to_videos = []
self._labels = []
self._spatial_temporal_idx = []
clip_idx = 0
num_missing_videos = 0
subclasses = []
if self.dataset_name == 'kinetics50':
with open('datasets/mimetics/mimetics_v1.0_clsannot.txt') as f_subclasses:
f_subclasses.readline()
for line in f_subclasses.readlines():
subclasses.append(line.split()[0])
self._get_class_idx_map(subclasses)
elif self.dataset_name == 'kinetics10':
with open('datasets/mimetics/mimetics_v1.0_clsannot.txt') as f_subclasses:
f_subclasses.readline()
line_idx = 0
for line in f_subclasses.readlines():
line_idx += 1
if line_idx % 5 == 0:
subclasses.append(line.split()[0])
print (subclasses)
self._get_class_idx_map(subclasses)
else:
self._get_class_idx_map(self.json_data['labels'])
for key in tqdm.tqdm(self.json_data['database']):
data = self.json_data['database'][key]
action_label = self._get_action_label(data)
if (action_label not in subclasses) and len(subclasses):
continue
if action_label is None:
# when the json_data['subset'] is not matched with 'self.mode', skip this data.
# (for example, self.mode=='train' but data['subset']=='testing')
continue
# path = os.path.join(root_path, self.PATH_PREFIX, action_label, key + '.mp4')
vid_name = key[:-14]
# possible path lists (.mp4, .mkv, etc.)
paths = []
paths.append(os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mp4'))
paths.append(
os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mp4'))
paths.append(os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mkv'))
paths.append(
os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mkv'))
paths.append(
os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mp4.mkv'))
paths.append(
os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mp4.mkv'))
paths.append(
os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mp4.webm'))
paths.append(
os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mp4.webm'))
exist_path = [p for p in paths if os.path.exists(p)]
label = self.class_labels_map[action_label]
if len(exist_path) > 0:
path = exist_path[0]
else:
# print(path)
num_missing_videos += 1
continue
for idx in range(self._num_clips):
self._path_to_videos.append(path)
self._labels.append(int(label))
self._spatial_temporal_idx.append(idx)
self._video_meta[clip_idx * self._num_clips + idx] = {}
clip_idx += 1
self.logger.log('num_missing_videos: %d' % num_missing_videos)
# assert (
# len(self._path_to_videos) > 0
# ), "Failed to load Kinetics split {} from {}".format(
# self._split_idx, path_to_file
# )
self.logger.log(
"Constructing kinetics_tools dataloader (size: {}) from {}".format(
len(self._path_to_videos), path_to_file
)
)
else:
# path_to_file = os.path.join(
# self.root, "{}.csv".format(self.mode)
# )
self._path_to_videos = []
self._labels = []
self._spatial_temporal_idx = []
label_strings = []
with open(path_to_file, "r") as f:
f.readline()
for clip_idx, path_label in enumerate(f.read().splitlines()):
label_strings.append(path_label.split(',')[0])
label_strings = sorted(set(label_strings))
if self.dataset_name == 'mimetics10':
label_strings = label_strings[4::5]
print (label_strings)
with open(path_to_file, "r") as f:
f.readline()
for clip_idx, path_label in enumerate(f.read().splitlines()):
# assert len(path_label.split()) == 2
label_str, path, start_time, end_time, _, _ = path_label.split(',')
if self.dataset_name == 'mimetics10' and label_str not in label_strings:
continue
label = label_strings.index(label_str)
path = os.path.join(self.root,
'data',
label_str,
'{0}_{1:06d}_{2:06d}.mp4'.format(path, int(start_time), int(end_time)))
if not os.path.exists(path):
self.logger.log('{} is not exists!'.format(path))
continue
for idx in range(self._num_clips):
self._path_to_videos.append(path)
self._labels.append(int(label))
self._spatial_temporal_idx.append(idx)
self._video_meta[clip_idx * self._num_clips + idx] = {}
# assert (
# len(self._path_to_videos) > 0
# ), "Failed to load Kinetics split {} from {}".format(
# self._split_idx, path_to_file
# )
self.logger.log(
"Constructing kinetics_tools dataloader (size: {}) from {}".format(
len(self._path_to_videos), path_to_file
)
)
def __getitem__(self, index):
"""
Given the video index, return the list of frames, label, and video
index if the video can be fetched and decoded successfully, otherwise
repeatly find a random video that can be decoded as a replacement.
Args:
index (int): the video index provided by the pytorch sampler.
Returns:
frames (tensor): the frames of sampled from the video. The dimension
is `channel` x `num frames` x `height` x `width`.
label (int): the label of the current video.
index (int): if the video provided by pytorch sampler can be
decoded, then return the index of the video. If not, return the
index of the video replacement that can be decoded.
"""
if self.mode in ["train", "val"]:
# -1 indicates random sampling.
temporal_sample_index = -1
spatial_sample_index = -1
min_scale = TRAIN_JITTER_SCALES[0]
max_scale = TRAIN_JITTER_SCALES[1]
crop_size = TRAIN_CROP_SIZE
elif self.mode in ["test"]:
temporal_sample_index = (
self._spatial_temporal_idx[index]
// TEST_NUM_SPATIAL_CROPS
)
# spatial_sample_index is in [0, 1, 2]. Corresponding to left,
# center, or right if width is larger than height, and top, middle,
# or bottom if height is larger than width.
spatial_sample_index = (
self._spatial_temporal_idx[index]
% TEST_NUM_SPATIAL_CROPS
)
min_scale, max_scale, crop_size = [TEST_CROP_SIZE] * 3
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
else:
raise NotImplementedError(
"Does not support {} mode".format(self.mode)
)
# Try to decode and sample a clip from a video. If the video can not be
# decoded, repeatly find a random video replacement that can be decoded.
for _ in range(self._num_retries):
video_container = None
try:
video_container = container.get_video_container(
self._path_to_videos[index]
)
except Exception as e:
self.logger.log(
"Failed to load video from {} with error {}".format(
self._path_to_videos[index], e
)
)
# Select a random video if the current video was not able to access.
if video_container is None:
index = random.randint(0, len(self._path_to_videos) - 1)
continue
# Decode video. Meta info is used to perform selective decoding.
frames = decoder.decode(
video_container,
DATA_SAMPLING_RATE,
DATA_NUM_FRAMES,
temporal_sample_index,
TEST_NUM_ENSEMBLE_VIEWS,
# video_meta=self._video_meta[index],
target_fps=30,
)
# If decoding failed (wrong format, video is too short, and etc),
# select another video.
if frames is None:
self.logger.log(self._path_to_videos[index])
index = random.randint(0, len(self._path_to_videos) - 1)
continue
# Perform color normalization.
frames = frames.float()
frames = frames / 255.0
frames = frames - torch.tensor(DATA_MEAN)
frames = frames / torch.tensor(DATA_STD)
# T H W C -> C T H W.
frames = frames.permute(3, 0, 1, 2)
# Perform data augmentation.
frames = self.spatial_sampling(
frames,
spatial_idx=spatial_sample_index,
min_scale=min_scale,
max_scale=max_scale,
crop_size=crop_size,
)
# frames = [frames]
label = self._labels[index]
return frames, label, index
else:
raise RuntimeError(
"Failed to fetch video after {} retries.".format(
self._num_retries
)
)
def __len__(self):
"""
Returns:
(int): the number of videos in the dataset.
"""
return len(self._path_to_videos)
def spatial_sampling(
self,
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
frames = transform.random_short_side_scale_jitter(
frames, min_scale, max_scale
)
frames = transform.random_crop(frames, crop_size)
frames = transform.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
frames = transform.random_short_side_scale_jitter(
frames, min_scale, max_scale
)
frames = transform.uniform_crop(frames, crop_size, spatial_idx)
return frames
| 17,839 | 40.488372 | 118 | py |
rebias | rebias-master/datasets/kinetics_tools/loader.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Data loader."""
import torch
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from datasets.kinetics_tools.kinetics import Kinetics
# Supported datasets.
_DATASET_CATALOG = {"kinetics50": Kinetics, "mimetics50": Kinetics,
"kinetics10": Kinetics, "mimetics10": Kinetics,}
def construct_loader(root, split, logger, anno_file,
dataset_name='kinetics', batch_size=64,
num_gpus=1, num_workers=24, pin_memory=True):
"""
:param root: root path
:param split: dataset split ('train','val','test')
:param logger:
:param dataset_name:
:param batch_size:
:param num_gpus:
:param num_workers:
:param pin_memory:
:return:
"""
assert split in ["train", "val", "test"]
if split in ["train"]:
shuffle = True
drop_last = True
elif split in ["val"]:
shuffle = False
drop_last = False
elif split in ["test"]:
shuffle = False
drop_last = False
assert (
dataset_name in _DATASET_CATALOG.keys()
), "Dataset '{}' is not supported".format(dataset_name)
# Construct the dataset
dataset = _DATASET_CATALOG[dataset_name](root, split, logger,
dataset_name=dataset_name,
anno_file=anno_file)
# Create a sampler for multi-process training
# sampler = DistributedSampler(dataset) if num_gpus > 1 else None
sampler = None
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
)
return loader
def shuffle_dataset(loader, cur_epoch):
""""
Shuffles the data.
Args:
loader (loader): data loader to perform shuffle.
cur_epoch (int): number of the current epoch.
"""
assert isinstance(
loader.sampler, (RandomSampler, DistributedSampler)
), "Sampler type '{}' not supported".format(type(loader.sampler))
# RandomSampler handles shuffling automatically
if isinstance(loader.sampler, DistributedSampler):
# DistributedSampler shuffles data based on epoch
loader.sampler.set_epoch(cur_epoch)
| 2,473 | 29.925 | 71 | py |
rebias | rebias-master/datasets/kinetics_tools/meters.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Meters."""
import datetime
import numpy as np
from collections import deque
import torch
import time
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
from fvcore.common.timer import Timer
logger = logging.get_logger(__name__)
class TestMeter(object):
"""
Perform the multi-view ensemble for testing: each video with an unique index
will be sampled with multiple clips, and the predictions of the clips will
be aggregated to produce the final prediction for the video.
The accuracy is calculated with the given ground truth labels.
"""
def __init__(self, num_videos, num_clips, num_cls, overall_iters):
"""
Construct tensors to store the predictions and labels. Expect to get
num_clips predictions from each video, and calculate the metrics on
num_videos videos.
Args:
num_videos (int): number of videos to test.
num_clips (int): number of clips sampled from each video for
aggregating the final prediction for the video.
num_cls (int): number of classes for each prediction.
overall_iters (int): overall iterations for testing.
"""
self.iter_timer = Timer()
self.num_clips = num_clips
self.overall_iters = overall_iters
# Initialize tensors.
self.video_preds = torch.zeros((num_videos, num_cls))
self.video_labels = torch.zeros((num_videos)).long()
self.clip_count = torch.zeros((num_videos)).long()
# Reset metric.
self.reset()
def reset(self):
"""
Reset the metric.
"""
self.clip_count.zero_()
self.video_preds.zero_()
self.video_labels.zero_()
def update_stats(self, preds, labels, clip_ids):
"""
Collect the predictions from the current batch and perform on-the-flight
summation as ensemble.
Args:
preds (tensor): predictions from the current batch. Dimension is
N x C where N is the batch size and C is the channel size
(num_cls).
labels (tensor): the corresponding labels of the current batch.
Dimension is N.
clip_ids (tensor): clip indexes of the current batch, dimension is
N.
"""
for ind in range(preds.shape[0]):
vid_id = int(clip_ids[ind]) // self.num_clips
self.video_labels[vid_id] = labels[ind]
self.video_preds[vid_id] += preds[ind]
self.clip_count[vid_id] += 1
def log_iter_stats(self, cur_iter):
"""
Log the stats.
Args:
cur_iter (int): the current iteration of testing.
"""
eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"split": "test_iter",
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"time_diff": self.iter_timer.seconds(),
}
logging.log_json_stats(stats)
def iter_tic(self):
self.iter_timer.reset()
def iter_toc(self):
self.iter_timer.pause()
def finalize_metrics(self, ks=(1, 5)):
"""
Calculate and log the final ensembled metrics.
ks (tuple): list of top-k values for topk_accuracies. For example,
ks = (1, 5) correspods to top-1 and top-5 accuracy.
"""
if not all(self.clip_count == self.num_clips):
logger.warning(
"clip count {} ~= num clips {}".format(
self.clip_count, self.num_clips
)
)
logger.warning(self.clip_count)
num_topks_correct = metrics.topks_correct(
self.video_preds, self.video_labels, ks
)
topks = [
(x / self.video_preds.size(0)) * 100.0 for x in num_topks_correct
]
assert len({len(ks), len(topks)}) == 1
stats = {"split": "test_final"}
for k, topk in zip(ks, topks):
stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk, prec=2)
logging.log_json_stats(stats)
class ScalarMeter(object):
"""
A scalar meter uses a deque to track a series of scaler values with a given
window size. It supports calculating the median and average values of the
window, and also supports calculating the global average.
"""
def __init__(self, window_size):
"""
Args:
window_size (int): size of the max length of the deque.
"""
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
"""
Reset the deque.
"""
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
"""
Add a new scalar value to the deque.
"""
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
"""
Calculate the current median value of the deque.
"""
return np.median(self.deque)
def get_win_avg(self):
"""
Calculate the current average value of the deque.
"""
return np.mean(self.deque)
def get_global_avg(self):
"""
Calculate the global mean value.
"""
return self.total / self.count
class TrainMeter(object):
"""
Measures training stats.
"""
def __init__(self, epoch_iters, cfg):
"""
Args:
epoch_iters (int): the overall number of iterations of one epoch.
cfg (CfgNode): configs.
"""
self._cfg = cfg
self.epoch_iters = epoch_iters
self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.LOG_PERIOD)
self.loss_total = 0.0
self.lr = None
# Current minibatch errors (smoothed over a window).
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Number of misclassified examples.
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def reset(self):
"""
Reset the Meter.
"""
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
def update_stats(self, top1_err, top5_err, loss, lr, mb_size):
"""
Update the current stats.
Args:
top1_err (float): top1 error rate.
top5_err (float): top5 error rate.
loss (float): loss value.
lr (float): learning rate.
mb_size (int): mini batch size.
"""
# Current minibatch stats
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.loss.add_value(loss)
self.lr = lr
# Aggregate stats
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.loss_total += loss * mb_size
self.num_samples += mb_size
def log_iter_stats(self, cur_epoch, cur_iter):
"""
log the stats of the current iteration.
Args:
cur_epoch (int): the number of current epoch.
cur_iter (int): the number of current iteration.
"""
if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (
self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1)
)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
mem_usage = misc.gpu_mem_usage()
stats = {
"_type": "train_iter",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"top1_err": self.mb_top1_err.get_win_median(),
"top5_err": self.mb_top5_err.get_win_median(),
"loss": self.loss.get_win_median(),
"lr": self.lr,
"mem": int(np.ceil(mem_usage)),
}
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
eta_sec = self.iter_timer.seconds() * (
self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters
)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
mem_usage = misc.gpu_mem_usage()
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
avg_loss = self.loss_total / self.num_samples
stats = {
"_type": "train_epoch",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"top1_err": top1_err,
"top5_err": top5_err,
"loss": avg_loss,
"lr": self.lr,
"mem": int(np.ceil(mem_usage)),
}
logging.log_json_stats(stats)
class ValMeter(object):
"""
Measures validation stats.
"""
def __init__(self, max_iter, cfg):
"""
Args:
max_iter (int): the max number of iteration of the current epoch.
cfg (CfgNode): configs.
"""
self._cfg = cfg
self.max_iter = max_iter
self.iter_timer = Timer()
# Current minibatch errors (smoothed over a window).
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Min errors (over the full val set).
self.min_top1_err = 100.0
self.min_top5_err = 100.0
# Number of misclassified examples.
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def reset(self):
"""
Reset the Meter.
"""
self.iter_timer.reset()
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
def update_stats(self, top1_err, top5_err, mb_size):
"""
Update the current stats.
Args:
top1_err (float): top1 error rate.
top5_err (float): top5 error rate.
mb_size (int): mini batch size.
"""
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.num_samples += mb_size
def log_iter_stats(self, cur_epoch, cur_iter):
"""
log the stats of the current iteration.
Args:
cur_epoch (int): the number of current epoch.
cur_iter (int): the number of current iteration.
"""
if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
mem_usage = misc.gpu_mem_usage()
stats = {
"_type": "val_iter",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"iter": "{}/{}".format(cur_iter + 1, self.max_iter),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"top1_err": self.mb_top1_err.get_win_median(),
"top5_err": self.mb_top5_err.get_win_median(),
"mem": int(np.ceil(mem_usage)),
}
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
self.min_top1_err = min(self.min_top1_err, top1_err)
self.min_top5_err = min(self.min_top5_err, top5_err)
mem_usage = misc.gpu_mem_usage()
stats = {
"_type": "val_epoch",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"time_diff": self.iter_timer.seconds(),
"top1_err": top1_err,
"top5_err": top5_err,
"min_top1_err": self.min_top1_err,
"min_top5_err": self.min_top5_err,
"mem": int(np.ceil(mem_usage)),
}
logging.log_json_stats(stats)
| 13,430 | 31.442029 | 80 | py |
TraBS | TraBS-main/scripts/main_pretrain.py |
from pathlib import Path
from datetime import datetime
import torch
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import torchio as tio
from breaststudies.data import BreastDataModule, BreastDataModuleLR, BreastDataModule2D, BreastUKADataset
from breaststudies.models import UNet, nnUNet, SwinUNETR
from breaststudies.augmentation import ZNormalization, RandomCropOrPad, RandomCutOut, RandomDisableChannel
if __name__ == "__main__":
current_time = datetime.now().strftime("%Y_%m_%d_%H%M%S")
path_run_dir = Path.cwd() / 'runs' / str(current_time)
path_run_dir.mkdir(parents=True, exist_ok=True)
gpus = [0] if torch.cuda.is_available() else None
torch.set_float32_matmul_precision('high')
# ------------- Settings ---------------------
target = 'tissue' # 'breast' or 'tissue'
batch_size = 1
roi_sizes = {
'tissue': (32, 256, 256),
'breast': (32, 512, 256),
}
roi_size = roi_sizes[target]
# NOTE: Source files include target as source files are loaded as ScalarImage and target files as LabelImage
source_files={
'breast': {'source':['Dyn_0.nii', 'T2_resampled.nii', ], 'target':['Dyn_0.nii', 'T2_resampled.nii',]},
'tissue': {'source':['Dyn_0.nii','T2_resampled.nii', 'Sub.nii' ], 'target':['Dyn_0.nii', 'T2_resampled.nii', 'Sub.nii' ]}
}
# ---------------------------------- Preprocessing ----------------------------------
series_trans = tio.Compose([
# tio.ToCanonical(),
# Resample2((0.64, 0.64, 3)), # exact (0.64453125, 0.64453125, 3)
# RandomResample(),
# SelectRandomChannel()
])
# --------------------------------- Augmentation ---------------------------------------
p=0.5
item_trans = tio.Compose([
ZNormalization(percentiles=(0.5, 99.5), per_channel=True, masking_method=lambda x:x>0),
tio.RandomFlip(axes=(0,1,2), flip_probability=0.5, p=1),
RandomCutOut((64, 64, 4), (32, 32, 2), (4, 4, 1), patch_per='image', include=['source']),
tio.RandomNoise(mean=0, std=(0, 0.75), p=p, include=['source']),
RandomCropOrPad((256, 256, 16)),
RandomDisableChannel((0,1), disable_per='subject', p=p, include=['source', 'target'])
])
# ----------------------- Load Data ----------------------------------
dm = BreastDataModuleLR(
# path_root = Path('/home/gustav/Documents/datasets/BreastDataset/Gustav'),
path_root = Path('/mnt/hdd/datasets/breast/UKA/UKA_2021_05_25'),
batch_size=batch_size,
target=target,
series_trans=series_trans,
item_trans=item_trans,
source_files=source_files[target],
target_files={'mask':'mask_breast_nn.nii.gz'},
target_shape=roi_size[::-1], # The bounding box of the breast mask is enlarged (if smaller) to target_shape to prevent padding with zeros. Only used for target=='tissue'.
)
# Load fixed,balanced split
# dm._item_pointers_split = dm.load_split(Path.cwd() / 'runs/splits/BreastDatasetLR'/dm.ds_kwargs['target'],
# split_file=f'data_split_{dm.Dataset.__name__}.yaml' )
dm.setup('fit') # Run GroupKFold if item_pointers aren't initialized yet
dm.save(path_run_dir) # Save setup configs
#---------------------- Cross-Fold --------------------
for split in range(0, 5):# dm.n_splits
path_split_dir = path_run_dir/('split_'+str(split))
path_split_dir.mkdir(parents=True, exist_ok=True)
dm.setup_split('fit', split=split) # Create train/val datasets for specific split
# --------------------------- Initialize Model ----------------------
in_ch = len(dm.ds_train.kwargs['source_files']['source'])
out_ch = in_ch
# -------- Choose model --------
# model = BasicUNet(in_ch=in_ch, out_ch=out_ch, roi_size=roi_size, target_type='image')
# model = nnUNet(in_ch=in_ch, out_ch=out_ch, roi_size=roi_size, target_type='image')
model = SwinUNETR(in_ch=in_ch, out_ch=out_ch, roi_size=roi_size, target_type='image',
use_spacing = False, # Use spacing as an additional input information
)
model.loss_fct= torch.nn.MSELoss()
# -------------- Training Initialization ---------------
to_monitor = "train/loss" # WARNING: If log() is not called this parameter is ignored!
min_max = "min"
early_stopping = EarlyStopping(
monitor=to_monitor,
min_delta=0.0, # minimum change in the monitored quantity to qualify as an improvement
patience=30, # number of checks with no improvement
mode=min_max
)
checkpointing = ModelCheckpoint(
dirpath=str(path_split_dir), # dirpath
monitor=to_monitor,
every_n_train_steps=50,
save_last=True,
save_top_k=1,
mode=min_max,
)
trainer = Trainer(
gpus=gpus,
precision=16,
gradient_clip_val=0.5,
default_root_dir=str(path_split_dir),
callbacks=[checkpointing, early_stopping],
enable_checkpointing=True,
check_val_every_n_epoch=1,
log_every_n_steps=50, # 50
auto_lr_find=False,
# limit_train_batches=1.0,
# limit_val_batches=1.0, # 0 = disable validation
min_epochs=100,
max_epochs=1001,
num_sanity_val_steps=2,
)
# ---------------- Execute Training ----------------
trainer.fit(model, datamodule=dm)
# ------------- Save path to best model -------------
model.save_best_checkpoint(trainer.logger.log_dir, checkpointing.best_model_path)
del trainer
| 5,913 | 39.506849 | 179 | py |
TraBS | TraBS-main/scripts/main_compute_segmentation_quality.py | import logging
from pathlib import Path
import numpy as np
import pandas as pd
import monai.metrics as mm
import torchio as tio
from breaststudies.utils import one_hot
from breaststudies.metrics import compute_surface_distances, compute_average_surface_distance
from breaststudies.data import BreastDatasetCreator, BreastUKADataset
dataset_name = 'duke' # 'uka', 'duke', 'breast-diagnosis'
model = '2023_02_06_191619_nnUNet_breast'
cohort = 'subset' # 'subset'
lateral='unilateral' # 'bilateral' or 'unilateral'
target_name = 'tissue' # 'tissue' , 'breast'
use_2d = False
path_root = Path().cwd()/'results'/dataset_name/cohort/'predictions'/target_name/model
path_predictions = path_root/'predictions'
path_out = path_root/'eval'
path_out.mkdir(parents=True, exist_ok=True)
path_log_dir = path_root/'log'
path_log_dir.mkdir(parents=True, exist_ok=True)
ds = BreastDatasetCreator(
dataset_name,
cohort,
lateral=lateral,
use_2d=use_2d,
out_format='torch',
source_files={},
target=target_name,
item_trans=None,
series_trans=None,
manipulate_label_func=BreastUKADataset.manipulate_label_func
)
# ds.labels.pop("DCIS")
eval_labels = ds.labels #{'FGT':1}
# ------------ Logging --------------------
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logger.addHandler(logging.FileHandler(path_log_dir/'log_seg_quality.log', 'w'))
# Desired Metrics / Labels present
results = {'UID':[]}
results.update({f'{label}_Dice':[] for label in eval_labels.keys() })
results.update({f'{label}_ASSD':[] for label in eval_labels.keys() })
results.update({f'{label}_{rater}':[] for label in eval_labels.keys() for rater in ['NN', 'GT']})
for case_n, path_case_file in enumerate(path_predictions.rglob(f'mask_{target_name}_nn.nii.gz')):
path_case_dir = path_case_file.relative_to(path_predictions).parent
item_pointer = ((Path(path_case_dir)),)
# Load Ground Truth and add Prediction to series
series = ds.load_series(item_pointer, ds.path_root, **ds.kwargs)
series.add_image(tio.LabelMap(path_case_file), 'pred')
# Split GT and Pred into desired format (eg. left/right or slices)
series_items = ds.series2items(item_pointer[:max(1, len(item_pointer)-1)], series, **ds.kwargs)
for item in series_items.values():
# -------- Get data -------
item = ds.item2out(item, **ds.kwargs)
uid = item['uid']
target = item['target']
pred = item['pred']
spacing = item['spacing'] # [x,y,z]
logger.info(f"Case {case_n} UID: {uid}" )
# -------- Make one-hot --------
target_onehot = one_hot(target, len(eval_labels)) # [B, C, D, H, W]
pred_onehot = one_hot(pred, len(eval_labels)) # [B, C, D, H, W]
# ------ Compute Metrics -------
results['UID'].append(uid)
dice_score = mm.compute_meandice(pred_onehot, target_onehot, include_background=True, ignore_empty=False)[0]
# assd_val = mm.compute_average_surface_distance(pred_onehot, target_onehot, include_background=True, symmetric=True) # Doesn't consider spacing
# Iterate over each label
for lab_name, lab_val in eval_labels.items():
results[f'{lab_name}_GT'].append(lab_val in target)
results[f'{lab_name}_NN'].append(lab_val in pred)
results[f'{lab_name}_Dice'].append(dice_score[lab_val].item())
try:
surface_distances = compute_surface_distances(target_onehot[0,lab_val].numpy().astype(bool), pred_onehot[0,lab_val].numpy().astype(bool), spacing[::-1])
results[f'{lab_name}_ASSD'].append(np.mean(compute_average_surface_distance(surface_distances)))
except:
results[f'{lab_name}_ASSD'].append(float('NaN')) # eg. if label is not present
logger.info("")
df = pd.DataFrame(results)
for label in eval_labels.keys():
logger.info(f"{label} Dice (mean±std): {df[f'{label}_Dice'].mean():.3f} ± {df[f'{label}_Dice'].std():.3f}")
logger.info(f"{label} Dice (min, max): {df[f'{label}_Dice'].min():.3f} to {df[f'{label}_Dice'].max():.3f} ")
df = df.set_index('UID', drop=True)
df.to_csv(path_out/f'segmentation_{lateral}_2D{use_2d}.csv')
| 4,308 | 36.469565 | 168 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.