INSTRUCTION stringlengths 1 46.3k | RESPONSE stringlengths 75 80.2k |
|---|---|
Add n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
x: a tensor with shape [batch, length, depth]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x. | def add_layer_timing_signal_learned_1d(x, layer, num_layers):
"""Add n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
x: a tensor with shape [batch, length, depth]
layer: layer num
num_layers: total number of la... |
Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [1, 1, channels]. | def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers):
"""Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [1, 1, channel... |
Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
x: a Tensor with shape [batch, length, channels]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x. | def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers):
"""Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
x: a Tensor with shape [batch, length, channels]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x.
"""
c... |
Adds sinusoids of diff frequencies to a Tensor, with timing position given.
Args:
x: a Tensor with shape [batch, length, channels]
position: a Tensor with shape [batch, length]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x. | def add_timing_signal_1d_given_position(x,
position,
min_timescale=1.0,
max_timescale=1.0e4):
"""Adds sinusoids of diff frequencies to a Tensor, with timing position given.
Args:
x: a Tensor ... |
Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase in one of the positional dimensions.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some pr... | def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase in one of the positional dimensions.
This allows attention to learn to use ab... |
Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x. | def add_positional_embedding(x, max_length, name=None, positions=None):
"""Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor wit... |
Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dime... | def add_positional_embedding_nd(x, max_length, name=None):
"""Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
... |
Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.
num_edge_types: Number of different edge types
depth: Number of channels
name: a string
Returns:
A [batch, num_nodes, num_nodes, depth] vector of tensors | def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None):
"""Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.
num_edge_types: Number of different edge types
depth: Number of channels
name: a string... |
Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with shape [...]. | def padding_to_length(padding):
"""Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with shape [...].
"""
non_padding = 1.0 - padding
return tf.to_int32(tf.reduce_sum(non_padding, axis=-1)) |
Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimite... | def attention_bias_local(length, max_backward, max_forward):
"""Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum di... |
Create an bias tensor to be added to attention logits.
Positions with the same segment_ids can see each other.
Args:
query_segment_id: a float `Tensor` with shape [batch, query_length].
memory_segment_id: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, ... | def attention_bias_same_segment(query_segment_id, memory_segment_id):
"""Create an bias tensor to be added to attention logits.
Positions with the same segment_ids can see each other.
Args:
query_segment_id: a float `Tensor` with shape [batch, query_length].
memory_segment_id: a float `Tensor` with shap... |
Create an bias tensor to be added to attention logits.
Args:
memory_padding: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, 1, memory_length]. | def attention_bias_ignore_padding(memory_padding):
"""Create an bias tensor to be added to attention logits.
Args:
memory_padding: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, 1, memory_length].
"""
ret = memory_padding * large_compatible_negative(mem... |
Inverse of attention_bias_ignore_padding().
Args:
attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as
returned by attention_bias_ignore_padding().
cast_fn: function used to cast to output type.
Returns:
a Tensor with shape [batch, memory_length] with 1.0 in padding positions
... | def attention_bias_to_padding(attention_bias, cast_fn=tf.to_float):
"""Inverse of attention_bias_ignore_padding().
Args:
attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as
returned by attention_bias_ignore_padding().
cast_fn: function used to cast to output type.
Returns:
a... |
Create a bias tensor for prepend_mode="prepend_inputs_full_attention".
See prepend_inputs in common_hparams.py.
Produces a bias tensor to be used in self-attention.
This bias tensor allows for full connectivity in the "inputs" part of
the sequence and masked connectivity in the targets part.
Args:
pad... | def attention_bias_prepend_inputs_full_attention(padding):
"""Create a bias tensor for prepend_mode="prepend_inputs_full_attention".
See prepend_inputs in common_hparams.py.
Produces a bias tensor to be used in self-attention.
This bias tensor allows for full connectivity in the "inputs" part of
the sequen... |
Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length] | def attention_bias_proximal(length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = tf.to_float(tf.range(length))
diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1)
return tf.expan... |
Generate a mask to prevent the batch to attend to each others.
Args:
batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the
coordinates of the batches
batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the
coordinates of the batches. If None, do self-attent... | def attention_bias_batch(batch_coordinates_q,
batch_coordinates_k=None,
condition_fn=None):
"""Generate a mask to prevent the batch to attend to each others.
Args:
batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the
coordinates of t... |
Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n] | def split_last_dimension(x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
x_shape = common_layers.shape_list(x)
m = x_shape[-1]
... |
Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab] | def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
x_shape = common_layers.shape_list(x)
a, b = x_shape[-2:]
return tf.reshape(x, x_shape[:-2] + [a * b]) |
Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...] | def combine_first_two_dimensions(x):
"""Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
"""
ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0))
old_shape = x.get_shape().dims
a, b = old_s... |
Compute color image summary.
Args:
attn: a Tensor with shape [batch, num_heads, query_length, memory_length]
image_shapes: optional tuple of integer scalars.
If the query positions and memory positions represent the
pixels of flattened images, then pass in their dimensions:
(query_rows, q... | def attention_image_summary(attn, image_shapes=None):
"""Compute color image summary.
Args:
attn: a Tensor with shape [batch, num_heads, query_length, memory_length]
image_shapes: optional tuple of integer scalars.
If the query positions and memory positions represent the
pixels of flattened im... |
Multi-head dot-product attention with sparsity.
For each attention head, the queries are partitioned into groups.
For each group, only a subset of the key-value pairs are considered.
The choices of groups are selected based on trained predictors of
the total attention given the group inclusion.
memory_targ... | def grouped_attention_multihead(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
... |
Make attention weights non-0 only on the top-hard_attention_k ones. | def harden_attention_weights(weights, hard_attention_k):
"""Make attention weights non-0 only on the top-hard_attention_k ones."""
# Subtract the top-kth weight and zero-out all lower ones.
# Note that currently in case of numerical ties it will retain more
# than k elements. In the future, we may want to avoid... |
Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
... | def dot_product_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
... |
Generates matrix of relative positions between inputs. | def _generate_relative_positions_matrix(length_q, length_k,
max_relative_position,
cache=False):
"""Generates matrix of relative positions between inputs."""
if not cache:
if length_q == length_k:
range_vec_q = range_vec_k = t... |
Generates tensor of size [1 if cache else length_q, length_k, depth]. | def _generate_relative_positions_embeddings(length_q, length_k, depth,
max_relative_position, name,
cache=False):
"""Generates tensor of size [1 if cache else length_q, length_k, depth]."""
with tf.variable_scope(name):
rela... |
Relative position-aware dot-product attention inner calculation.
This batches matrix multiply calculations to avoid unnecessary broadcasting.
Args:
x: Tensor with shape [batch_size, heads, length or 1, length or depth].
y: Tensor with shape [batch_size, heads, length or 1, depth].
z: Tensor with shape... | def _relative_attention_inner(x, y, z, transpose):
"""Relative position-aware dot-product attention inner calculation.
This batches matrix multiply calculations to avoid unnecessary broadcasting.
Args:
x: Tensor with shape [batch_size, heads, length or 1, length or depth].
y: Tensor with shape [batch_si... |
Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, he... | def dot_product_attention_relative(q,
k,
v,
bias,
max_relative_position,
dropout_rate=0.0,
image_shapes=None,
... |
Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output represent:
[batch, heads, query_position, memory_position]
... | def _relative_position_to_absolute_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output repr... |
Helper function for dot_product_unmasked_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position]
The dimensions of the output represent:
[batch, heads, query_position, memory_position - query_positio... | def _absolute_position_to_relative_position_unmasked(x):
"""Helper function for dot_product_unmasked_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position]
The dimensions of the output represent:
... |
Instantiate or retrieve relative embeddings, sliced according to length.
Use for unmasked case where the relative attention looks both left and right.
Args:
max_relative_position: an Integer for the number of entries in the relative
embedding, which corresponds to the max relative distance that is
... | def get_relative_embeddings_left_right(max_relative_position, length, depth,
num_heads,
heads_share_relative_embedding,
name):
"""Instantiate or retrieve relative embeddings, sliced according to length... |
Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, he... | def dot_product_unmasked_self_attention_relative_v2(
q, k, v, bias, max_relative_position=None, dropout_rate=0.0,
image_shapes=None, name=None, make_image_summary=True,
dropout_broadcast_dims=None, heads_share_relative_embedding=False,
add_relative_to_values=False):
"""Calculate relative position-awar... |
Helper function for dot_product_unmasked_self_attention_relative_2d. | def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding):
"""Helper function for dot_product_unmasked_self_attention_relative_2d."""
if heads_share_relative_embedding:
ret = tf.einsum("bhxyd,md->bhxym", x, y)
else:
ret = tf.einsum("bhxyd,hmd->bhxym", x, y)
return ret |
Calculate relative position unmasked dot-product self-attention 2d.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v in
height and width dimensions. for query index (i,j) and key index (l, m),
the logit is q_i k... | def dot_product_unmasked_self_attention_relative_2d(
q, k, v, bias, max_relative_position=None, dropout_rate=0.0,
image_shapes=None, name=None, make_image_summary=True,
dropout_broadcast_dims=None, heads_share_relative_embedding=False,
add_relative_to_values=False):
"""Calculate relative position unma... |
Helper function for local 2d attention.
Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks,
height, width, depth] and returns two tensors which contain every alternate
position along the width
Args:
x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks,
height, wi... | def _split_along_width(x_left_right_blocks):
"""Helper function for local 2d attention.
Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks,
height, width, depth] and returns two tensors which contain every alternate
position along the width
Args:
x_left_right_blocks: A [batch, num_h_blocks, nu... |
Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.... | def _get_left_right_blocks(x):
"""Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dime... |
Stitches together the local 2d memory blocks.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+... | def get_2d_local_memory(x, query_shape, memory_flange):
"""Stitches together the local 2d memory blocks.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks... |
Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num... | def get_2d_local_memory_v2(x, query_shape, memory_flange):
"""Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d... |
Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor with shape [batch, heads, height, width, depth].
k: a Tensor with shape [batch, heads, height, width, depth].
v: a Tensor with shape [batch, heads, height, width, depth].
bias: bias Tensor.
max_relative_position: an ... | def dot_product_unmasked_attention_local_2d_tpu(
q, k, v, bias, max_relative_position=None, query_shape=(8, 8),
dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False,
dropout_broadcast_dims=None):
"""Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor... |
Calculate simple unmasked dot-product local self-attention 2d on tpu.
The query, key, and value blocks are the same. We do not do a second linear
transformation after computing the values
Args:
x: a Tensor with shape [batch, height, width, depth].
bias: bias Tensor.
total_key_depth: the dimensions o... | def dot_product_unmasked_attention_local_2d_tpu_simple(
x, bias, total_key_depth, total_value_depth, num_heads,
query_shape=(8, 8),
dropout_rate=0.0, image_shapes=None, make_image_summary=False,
dropout_broadcast_dims=None):
"""Calculate simple unmasked dot-product local self-attention 2d on tpu.
... |
Converts tensor from relative to aboslute indexing for local attention.
Args:
x: a Tensor of shape [batch (or batch*num_blocks), heads,
length, 2 * length - 1]
Returns:
A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1] | def _relative_position_to_absolute_position_unmasked(x):
"""Converts tensor from relative to aboslute indexing for local attention.
Args:
x: a Tensor of shape [batch (or batch*num_blocks), heads,
length, 2 * length - 1]
Returns:
A Tensor of shape [batch (or batch*num_blocks), h... |
Attention to the source position and a neighborhood to the left of it.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position, in the corresponding block and the previous block.
Args:
q: a Tens... | def masked_local_attention_1d(q,
k,
v,
block_length=128,
make_image_summary=False,
dropout_rate=0.,
name=None):
"""Attention to the source... |
Helper function to create a local version of the keys or values for 1d. | def _make_local_block(x, depth, batch, heads, num_blocks, block_length):
"""Helper function to create a local version of the keys or values for 1d."""
prev_block = tf.slice(x, [0, 0, 0, 0, 0],
[-1, -1, num_blocks - 1, -1, -1])
cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1])
... |
Masked local 1d attention with relative positions.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
If mask_right is True, then a target po... | def masked_relative_local_attention_1d(q,
k,
v,
block_length=128,
make_image_summary=False,
dropout_rate=0.,
... |
Strided block local self-attention.
The sequence is divided into blocks of length block_length. Attention for a
given query position can see all memory positions in the corresponding block
and filter_width many positions to the left and right of the block.
Args:
q: a Tensor with shape [batch, heads, lengt... | def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None):
"""Strided block local self-attention.
The sequence is divided into blocks of length block_length. Attention for a
given query position can see all memory positions in the corresponding block
and filter_width many positions to the ... |
Reshapes input by splitting its length over blocks of memory_block_size.
Args:
x: a Tensor with shape [batch, heads, length, depth]
x_shape: tf.TensorShape of x.
memory_block_size: Integer which divides length.
Returns:
Tensor with shape
[batch, heads, length // memory_block_size, memory_block... | def reshape_by_blocks(x, x_shape, memory_block_size):
"""Reshapes input by splitting its length over blocks of memory_block_size.
Args:
x: a Tensor with shape [batch, heads, length, depth]
x_shape: tf.TensorShape of x.
memory_block_size: Integer which divides length.
Returns:
Tensor with shape
... |
Dilated self-attention.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the siz... | def dilated_self_attention_1d(q,
k,
v,
query_block_size=128,
memory_block_size=128,
gap_size=2,
num_memory_blocks=2,
... |
Gathers blocks with gaps in between.
Args:
x: Tensor of shape [length, batch, heads, depth]
num_memory_blocks: how many memory blocks to look in "direction". Each will
be separated by gap_size.
gap_size: an integer indicating the gap size
query_block_size: an integer indicating size of query bl... | def gather_dilated_memory_blocks(x,
num_memory_blocks,
gap_size,
query_block_size,
memory_block_size,
gather_indices,
dire... |
Dilated self-attention. TODO(avaswani): Try it and write a paper on it.
Args:
q: a Tensor with shape [batch, heads, length, depth]
k: a Tensor with shape [batch, heads, length, depth]
v: a Tensor with shape [batch, heads, length, depth]
query_block_size: an integer
memory_block_size: an integer i... | def masked_dilated_self_attention_1d(q,
k,
v,
query_block_size=64,
memory_block_size=64,
gap_size=2,
... |
Strided block local self-attention.
The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention
for a given query position can only see memory positions less than or equal to
the query position. The memory positions are the corresponding block with
memory_flange many positions to add to the hei... | def local_attention_2d(q,
k,
v,
query_shape=(8, 16),
memory_flange=(8, 16),
name=None):
"""Strided block local self-attention.
The 2-D sequence is divided into 2-D blocks of shape query_shape. Attenti... |
Making sure x is a multiple of shape.
Args:
x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
block_shape: a 2-d list of integer shapes
Returns:
padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor | def pad_to_multiple_2d(x, block_shape):
"""Making sure x is a multiple of shape.
Args:
x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
block_shape: a 2-d list of integer shapes
Returns:
padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
"""
old_shape = x.get_s... |
Gathers flattened blocks from x. | def gather_blocks_2d(x, indices):
"""Gathers flattened blocks from x."""
x_shape = common_layers.shape_list(x)
x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
# [length, batch, heads, dim]
x_t = tf.transpose(x, [2, 0, 1, 3])
x_new = tf.gather(x_t, indices)
# returns [batch, heads, num_blocks, b... |
Reshapes a tensor between dimensions i and j. | def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) |
scatters blocks from x into shape with indices. | def scatter_blocks_2d(x, indices, shape):
"""scatters blocks from x into shape with indices."""
x_shape = common_layers.shape_list(x)
# [length, batch, heads, dim]
x_t = tf.transpose(
tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3])
x_t_shape = common_layers.shape_list(x_t)
indi... |
Getting gather indices. | def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers... |
Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flang... | def make_2d_block_raster_mask(query_shape, memory_flange):
"""Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, que... |
Get the memory regions that surround a 2d query.
The memory regions will be the left and top right.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
query_block_shape: a 2-d tuple of integers
memory_flange: a 2-d tuple of integers
q_indices: a tensor of indices for each of the c... | def get_memory_region(x, query_block_shape, memory_flange, q_indices):
"""Get the memory regions that surround a 2d query.
The memory regions will be the left and top right.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
query_block_shape: a 2-d tuple of integers
memory_flange: ... |
Get right shifted blocks for masked local attention 2d.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
indices: The indices to gather blocks
Returns:
x_shifted: a tensor of extracted blocks, each block right shifted along
length. | def get_shifted_center_blocks(x, indices):
"""Get right shifted blocks for masked local attention 2d.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
indices: The indices to gather blocks
Returns:
x_shifted: a tensor of extracted blocks, each block right shifted along
length.... |
Right shifts once in every block.
Args:
x: a tensor of shape [batch, height, width, depth]
query_shape: A 2d tuple of ints
name: a string
Returns:
output: a tensor of the same shape as x | def right_shift_blockwise(x, query_shape, name=None):
"""Right shifts once in every block.
Args:
x: a tensor of shape [batch, height, width, depth]
query_shape: A 2d tuple of ints
name: a string
Returns:
output: a tensor of the same shape as x
"""
with tf.variable_scope(
name, default_... |
Strided block local self-attention.
Each position in a query block can attend to all the generated queries in
the query block, which are generated in raster scan, and positions that are
generated to the left and top. The shapes are specified by query shape and
memory flange. Note that if you're using this func... | def masked_local_attention_2d(q,
k,
v,
query_shape=(8, 16),
memory_flange=(8, 16),
name=None):
"""Strided block local self-attention.
Each position in a query block ... |
Computes attention compoenent (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No paddi... | def compute_attention_component(antecedent,
total_depth,
filter_width=1,
padding="VALID",
name="c",
vars_3d_num_heads=0,
layer_c... |
Computes query, key and value.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels]
total_key_depth: an integer
total_value_depth: an integer
q_filter_width: An integer specifying how wide you want the query to ... | def compute_qkv(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
vars_3d_num_heads=0,
... |
Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_v... | def multihead_attention(query_antecedent,
memory_antecedent,
bias,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
dropout_rate,
... |
2d Multihead scaled-dot-product attention with inp/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, h, w, depth_k]
memory_antecedent: a Tensor with shape [batch, h, w, depth_k]
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_he... | def multihead_attention_2d(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
attention_type="local_attention_2... |
Self-attention feedforward layer.
We use self-attention to do feedforward computations. We apply this function
positionwise where for each position, we linearly transform the output to have
depth filter_depth, and break up the result depth-wise into num_parts
contiguous parts. The parts self-attend, we concate... | def ffn_self_attention_layer(x,
filter_depth,
output_depth,
num_parts,
dropout_rate,
share_kv=False,
name=None):
"""Self-attention feedforward l... |
Attention over parameters.
We use the same multi-headed attention as in the other layers, but the memory
keys and values are model parameters. There are no linear transformation on
the keys or values.
We are also a bit more careful about memory usage, since the number of
memory positions may be very large.
... | def parameter_attention(x,
total_key_depth,
total_value_depth,
output_depth,
memory_rows,
num_heads,
dropout_rate,
name=None):
"""Attention over param... |
Return a tensor with given shape containing coordinate along given axis.
Args:
shape: a Tensor representing the shape of the output Tensor
axis: an integer
Returns:
A tensor with shape shape and type tf.int32, where each elements its
coordinate along the given axis. | def coordinate_tensor(shape, axis):
"""Return a tensor with given shape containing coordinate along given axis.
Args:
shape: a Tensor representing the shape of the output Tensor
axis: an integer
Returns:
A tensor with shape shape and type tf.int32, where each elements its
coordinate along the gi... |
Implementing attention that runs inside each expert.
Args:
x: A tensor of shape[batch, depth]. Contains representations from
different positions, which are lexicographically ordered.
batch_coordinate: A tensor of shape [batch, 1] containing the batch
coordinate of each element in x. This is neede... | def self_attention_expert(x,
batch_coordinate,
mask_right=True,
split_batch=False,
attention_num_head=1,
attention_kq_size=None,
attention_v_size=None):
"""Implem... |
Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention ... | def local_expert_attention(x,
k,
loss_coef,
attention_num_experts,
train=True,
batch_coordinate=None,
**kwargs):
"""Attention using a mixture of experts.
... |
Perform dot product on a subset of the sequence.
Can add a mask to the attention to prevent sequences to attend to each other
and to prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [length_expert_q, depth_k]
k (tf.Tensor): Keys of shape [length_expert_k, depth_k]
v (tf.Tensor)... | def expert_dot_product(q, k, v, info_q, info_k):
"""Perform dot product on a subset of the sequence.
Can add a mask to the attention to prevent sequences to attend to each other
and to prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [length_expert_q, depth_k]
k (tf.Tensor): Keys... |
Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [length_q, depth_q]
k (tf.Tensor): [length_k, depth_q]
v (tf.Tensor): [length_k, depth_v... | def dot_product_single_head(q, k, v, gates_q, gates_k, bi):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [length_q, depth_q]
k (tf.T... |
Construct the graph with either tf.map_fn or a python for loop.
This function is mainly for for benchmarking purpose.
tf.map_fn is dynamic but is much slower than creating a static graph with
for loop. However, having a for loop make the graph much longer to build
and can consume too much RAM on distributed s... | def map_fn_switch(fn, elems, use_map_fn=True, **kwargs):
"""Construct the graph with either tf.map_fn or a python for loop.
This function is mainly for for benchmarking purpose.
tf.map_fn is dynamic but is much slower than creating a static graph with
for loop. However, having a for loop make the graph much l... |
Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sq... | def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params):
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
... |
Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.T... | def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, le... |
Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sq... | def sparse_dot_product_attention_truncated(
q,
k,
v,
bi, # Unused
experts_params,
use_map_fn=False, # Unused
mask_right=False,
): # pylint: disable=unused-argument
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens... |
Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
out_depth (int): Output depth (if None, keep depth... | def deconv_elems_1d(x, factor, out_depth=None):
"""Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
... |
Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.T... | def conv_elems_1d(x, factor, out_depth=None):
"""Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original lengt... |
Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch... | def local_reduction_attention(x, block_length, multihead_params):
"""Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead... |
Reduce the length dimension by compressing with conv.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
memory_antecedent (tf.Tensor): Unsupported for now
bias (tf.Tensor): Ignored
factor (int): compression factor for the memory sequence
multihead_params (dict): parameters for multihea... | def multihead_self_attention_reduced(
x,
memory_antecedent=None,
bias=None,
factor=None,
multihead_params=None,
nonlinearity="none",
reduction_type="conv",
add_mask=True,
):
"""Reduce the length dimension by compressing with conv.
Args:
x (tf.Tensor): float32 of shape [batch, le... |
Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: optional Tensor broadcastable to [batch, length_q, length_kv]
name: an... | def scaled_dot_product_attention_simple(q, k, v, bias, name=None):
"""Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: op... |
Multihead scaled-dot-product self-attention.
Includes layer norm.
Returns multihead-self-attention(layer_norm(x))
Computes one attention head at a time to avoid exhausting memory.
If forget=True, then forget all forwards activations and recompute on
the backwards pass.
Args:
x: a Tensor with shape ... | def multihead_self_attention_memory_efficient(x,
bias,
num_heads,
head_size=None,
epsilon=1e-6,
... |
Convert an group index to its bit representation. | def _idx_to_bits(self, i):
"""Convert an group index to its bit representation."""
bits = bin(i)[2:].zfill(self.nb_hyperplanes) # Pad the bits str with 0
return [-1.0 if b == "0" else 1.0 for b in bits] |
Return the bucket id of the given tensor.
Args:
x (tf.Tensor): float32 of shape [length, depth]
Returns:
tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]
containing the id of the bucket | def get_gates(self, x):
"""Return the bucket id of the given tensor.
Args:
x (tf.Tensor): float32 of shape [length, depth]
Returns:
tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]
containing the id of the bucket
"""
# The balance loss don't propagate to th... |
The image encoder for the VAN.
Similar architecture as Ruben's paper
(http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf).
Args:
x: The image to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
h... | def van_image_enc_2d(x, first_depth, reuse=False, hparams=None):
"""The image encoder for the VAN.
Similar architecture as Ruben's paper
(http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf).
Args:
x: The image to encode.
first_depth: The depth of the first layer. Depth is increased in subseq... |
The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
T... | def van_enc_2d(x, first_depth, reuse=False):
"""The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To... |
The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns... | def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
"""The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of th... |
Implements the deep analogy computation. | def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation."""
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
fr... |
Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
gt_image: The ground truth image, only used for regularization.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the s... | def van(first_enc,
first_frame,
current_enc,
gt_image,
reuse=False,
scope_prefix='',
hparams=None):
"""Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
... |
VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparam... | def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
is_training=True):
"""VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse... |
LSTM predictor network. | def predictor(enc_flat,
action,
lstm_states,
pred_depth,
reuse=False,
scope_prefix='',
hparams=None):
"""LSTM predictor network."""
with tf.variable_scope(scope_prefix + 'predict', reuse=reuse):
enc_final_size = enc_flat.get_sh... |
Constructs the tensorflow graph of the hierarchical model. | def construct_model(images,
actions=None,
context_frames=2,
hparams=None,
is_training=True):
"""Constructs the tensorflow graph of the hierarchical model."""
pred_depth = 20
enc_out_all, pred_out_all, van_out_all, van_on_enc_all = [... |
Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR) | def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.l... |
L1 distance between tensors true and pred. | def l1_error(true, pred):
"""L1 distance between tensors true and pred."""
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred)) |
L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image. | def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
result = tf.reduce_sum(
tf.squared_difference(true, pred)) / tf.to_float... |
Calculates loss and psnr for predictions over multiple timesteps. | def calc_loss_psnr(gen_images, images, name, hparams=None, use_l1_loss=False):
"""Calculates loss and psnr for predictions over multiple timesteps."""
del hparams
with tf.name_scope(name):
loss, error, psnr_all = 0.0, 0.0, 0.0
for _, x, gx in zip(range(len(gen_images)), images, gen_images):
recon_co... |
SV2P model hparams. | def next_frame_sv2p():
"""SV2P model hparams."""
hparams = basic_stochastic.next_frame_basic_stochastic()
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = "constant"
hparams.learning_rate_constant = 1e-3
hparams.video_num_input_frames = 1
hparams.video_num_target_frames = 3
hparams.batch... |
SV2P discrete model hparams. | def next_frame_sv2p_discrete():
"""SV2P discrete model hparams."""
hparams = next_frame_sv2p()
hparams.action_injection = "multiplicative"
hparams.small_mode = True
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.02)
hparams.add_hparam("discrete_warmup_steps", 40000)
... |
SV2P model for atari. | def next_frame_sv2p_atari():
"""SV2P model for atari."""
hparams = next_frame_sv2p()
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
hparams.action_injection = "multiplicative"
hparams.num_iterations_1st_stage = 12000
hparams.num_iterations_2nd_stage = 12000
hparams.anneal_end = 4... |
SV2P model for atari with softmax. | def next_frame_sv2p_atari_softmax():
"""SV2P model for atari with softmax."""
hparams = next_frame_sv2p_atari()
hparams.bottom = {}
hparams.loss = {}
hparams.top = {}
hparams.internal_loss = True
return hparams |
Tiny SV2P model. | def next_frame_sv2p_tiny():
"""Tiny SV2P model."""
hparams = next_frame_sv2p_atari_softmax()
hparams.batch_size = 2
hparams.tiny_mode = True
hparams.num_masks = 1
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
return hparams |
SV2P model with additional cutoff in L2 loss for environments like pong. | def next_frame_sv2p_cutoff():
"""SV2P model with additional cutoff in L2 loss for environments like pong."""
hparams = next_frame_sv2p()
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 1
return hparams |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.