repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
self_attention_expert
|
def self_attention_expert(x,
batch_coordinate,
mask_right=True,
split_batch=False,
attention_num_head=1,
attention_kq_size=None,
attention_v_size=None):
"""Implementing attention that runs inside each expert.
Args:
x: A tensor of shape[batch, depth]. Contains representations from
different positions, which are lexicographically ordered.
batch_coordinate: A tensor of shape [batch, 1] containing the batch
coordinate of each element in x. This is needed to make sure that
positions from different sequences don't attend to each other.
mask_right: A bool. If true, we will not attend to positions on the right,
just as decoder self attention.
split_batch (bool): If True, each sequence of the batch is processed
individually on a loop. If False, the sequences are processed all at
once and a mask is applied to isolate the sequences from each others
attention_num_head (int): number of attention heads
attention_kq_size (int): dimension used for the attention key, and query
attention_v_size (int): dimension used for the attention value
Returns:
out: A tensor of shape [batch, depth].
example use:
expert_utils.local_moe(
...
expert_fn=functools.partial(self_attention_expert, mask_right=)
)
"""
depth = x.get_shape().as_list()[-1]
length = common_layers.shape_list(batch_coordinate)[0]
# Print a warning message if one of the expert isn't used (useful at
# inference where summaries aren't used and the gating function don't add
# noise)
global _expert_count # Hack to make each expert have a unique id
_expert_count += 1
length = tf.cond(
tf.equal(length, 0),
lambda: tf.Print( # pylint: disable=g-long-lambda
length, [length], "Expert {} empty: ".format(_expert_count)),
lambda: length,
)
tf.summary.scalar("batch_size", length, family="experts_stats_batch_size")
attention_kq_size = attention_kq_size or depth
attention_v_size = attention_v_size or depth
def length_not_null(x, batch_coordinate):
"""Branch of the graph only evaluated when length isn't null."""
# Mask between the sequences (not used if map_ids is used)
bias_batch = attention_bias_coordinates(batch_coordinate)
def add_or_set_if(prev_bias, new_bias, condition):
"""Add the bias together while considering the None case."""
if not condition:
return prev_bias
if prev_bias is None:
return new_bias
return prev_bias + new_bias
def mask_and_call_attention(x):
"""Function applied once for each sequence of the batch."""
# Mask to prevent sequences of attending to the future
length = common_layers.shape_list(x)[1] # x has shape [1, length,...]
bias_past = tf.reshape(
attention_bias_lower_triangle(length), [length, length])
# bias has shape [length, length]
bias = None
bias = add_or_set_if(bias, bias_past, mask_right)
bias = add_or_set_if(bias, bias_batch, not split_batch)
bias = tf.reshape(bias, [1, 1, length, length])
return multihead_attention(
x,
None,
bias,
total_key_depth=attention_kq_size,
total_value_depth=attention_v_size,
output_depth=depth,
num_heads=attention_num_head,
dropout_rate=0.0)
if split_batch:
out = expert_utils.map_ids(x, batch_coordinate, mask_and_call_attention)
else:
x = tf.reshape(x, [1, length, depth])
out = mask_and_call_attention(x)
out = tf.squeeze(out, 0)
return out
# If the length is empty, just forward an empty tensor (avoid having to
# evaluate multihead_attention with tensor having dim equal to zeros)
out = tf.cond(
tf.equal(length, 0),
lambda: tf.zeros(shape=[0, depth], dtype=tf.float32, name="empty_out"),
lambda: length_not_null(x, batch_coordinate),
)
return out
|
python
|
def self_attention_expert(x,
batch_coordinate,
mask_right=True,
split_batch=False,
attention_num_head=1,
attention_kq_size=None,
attention_v_size=None):
"""Implementing attention that runs inside each expert.
Args:
x: A tensor of shape[batch, depth]. Contains representations from
different positions, which are lexicographically ordered.
batch_coordinate: A tensor of shape [batch, 1] containing the batch
coordinate of each element in x. This is needed to make sure that
positions from different sequences don't attend to each other.
mask_right: A bool. If true, we will not attend to positions on the right,
just as decoder self attention.
split_batch (bool): If True, each sequence of the batch is processed
individually on a loop. If False, the sequences are processed all at
once and a mask is applied to isolate the sequences from each others
attention_num_head (int): number of attention heads
attention_kq_size (int): dimension used for the attention key, and query
attention_v_size (int): dimension used for the attention value
Returns:
out: A tensor of shape [batch, depth].
example use:
expert_utils.local_moe(
...
expert_fn=functools.partial(self_attention_expert, mask_right=)
)
"""
depth = x.get_shape().as_list()[-1]
length = common_layers.shape_list(batch_coordinate)[0]
# Print a warning message if one of the expert isn't used (useful at
# inference where summaries aren't used and the gating function don't add
# noise)
global _expert_count # Hack to make each expert have a unique id
_expert_count += 1
length = tf.cond(
tf.equal(length, 0),
lambda: tf.Print( # pylint: disable=g-long-lambda
length, [length], "Expert {} empty: ".format(_expert_count)),
lambda: length,
)
tf.summary.scalar("batch_size", length, family="experts_stats_batch_size")
attention_kq_size = attention_kq_size or depth
attention_v_size = attention_v_size or depth
def length_not_null(x, batch_coordinate):
"""Branch of the graph only evaluated when length isn't null."""
# Mask between the sequences (not used if map_ids is used)
bias_batch = attention_bias_coordinates(batch_coordinate)
def add_or_set_if(prev_bias, new_bias, condition):
"""Add the bias together while considering the None case."""
if not condition:
return prev_bias
if prev_bias is None:
return new_bias
return prev_bias + new_bias
def mask_and_call_attention(x):
"""Function applied once for each sequence of the batch."""
# Mask to prevent sequences of attending to the future
length = common_layers.shape_list(x)[1] # x has shape [1, length,...]
bias_past = tf.reshape(
attention_bias_lower_triangle(length), [length, length])
# bias has shape [length, length]
bias = None
bias = add_or_set_if(bias, bias_past, mask_right)
bias = add_or_set_if(bias, bias_batch, not split_batch)
bias = tf.reshape(bias, [1, 1, length, length])
return multihead_attention(
x,
None,
bias,
total_key_depth=attention_kq_size,
total_value_depth=attention_v_size,
output_depth=depth,
num_heads=attention_num_head,
dropout_rate=0.0)
if split_batch:
out = expert_utils.map_ids(x, batch_coordinate, mask_and_call_attention)
else:
x = tf.reshape(x, [1, length, depth])
out = mask_and_call_attention(x)
out = tf.squeeze(out, 0)
return out
# If the length is empty, just forward an empty tensor (avoid having to
# evaluate multihead_attention with tensor having dim equal to zeros)
out = tf.cond(
tf.equal(length, 0),
lambda: tf.zeros(shape=[0, depth], dtype=tf.float32, name="empty_out"),
lambda: length_not_null(x, batch_coordinate),
)
return out
|
[
"def",
"self_attention_expert",
"(",
"x",
",",
"batch_coordinate",
",",
"mask_right",
"=",
"True",
",",
"split_batch",
"=",
"False",
",",
"attention_num_head",
"=",
"1",
",",
"attention_kq_size",
"=",
"None",
",",
"attention_v_size",
"=",
"None",
")",
":",
"depth",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"length",
"=",
"common_layers",
".",
"shape_list",
"(",
"batch_coordinate",
")",
"[",
"0",
"]",
"# Print a warning message if one of the expert isn't used (useful at",
"# inference where summaries aren't used and the gating function don't add",
"# noise)",
"global",
"_expert_count",
"# Hack to make each expert have a unique id",
"_expert_count",
"+=",
"1",
"length",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"length",
",",
"0",
")",
",",
"lambda",
":",
"tf",
".",
"Print",
"(",
"# pylint: disable=g-long-lambda",
"length",
",",
"[",
"length",
"]",
",",
"\"Expert {} empty: \"",
".",
"format",
"(",
"_expert_count",
")",
")",
",",
"lambda",
":",
"length",
",",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"batch_size\"",
",",
"length",
",",
"family",
"=",
"\"experts_stats_batch_size\"",
")",
"attention_kq_size",
"=",
"attention_kq_size",
"or",
"depth",
"attention_v_size",
"=",
"attention_v_size",
"or",
"depth",
"def",
"length_not_null",
"(",
"x",
",",
"batch_coordinate",
")",
":",
"\"\"\"Branch of the graph only evaluated when length isn't null.\"\"\"",
"# Mask between the sequences (not used if map_ids is used)",
"bias_batch",
"=",
"attention_bias_coordinates",
"(",
"batch_coordinate",
")",
"def",
"add_or_set_if",
"(",
"prev_bias",
",",
"new_bias",
",",
"condition",
")",
":",
"\"\"\"Add the bias together while considering the None case.\"\"\"",
"if",
"not",
"condition",
":",
"return",
"prev_bias",
"if",
"prev_bias",
"is",
"None",
":",
"return",
"new_bias",
"return",
"prev_bias",
"+",
"new_bias",
"def",
"mask_and_call_attention",
"(",
"x",
")",
":",
"\"\"\"Function applied once for each sequence of the batch.\"\"\"",
"# Mask to prevent sequences of attending to the future",
"length",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"1",
"]",
"# x has shape [1, length,...]",
"bias_past",
"=",
"tf",
".",
"reshape",
"(",
"attention_bias_lower_triangle",
"(",
"length",
")",
",",
"[",
"length",
",",
"length",
"]",
")",
"# bias has shape [length, length]",
"bias",
"=",
"None",
"bias",
"=",
"add_or_set_if",
"(",
"bias",
",",
"bias_past",
",",
"mask_right",
")",
"bias",
"=",
"add_or_set_if",
"(",
"bias",
",",
"bias_batch",
",",
"not",
"split_batch",
")",
"bias",
"=",
"tf",
".",
"reshape",
"(",
"bias",
",",
"[",
"1",
",",
"1",
",",
"length",
",",
"length",
"]",
")",
"return",
"multihead_attention",
"(",
"x",
",",
"None",
",",
"bias",
",",
"total_key_depth",
"=",
"attention_kq_size",
",",
"total_value_depth",
"=",
"attention_v_size",
",",
"output_depth",
"=",
"depth",
",",
"num_heads",
"=",
"attention_num_head",
",",
"dropout_rate",
"=",
"0.0",
")",
"if",
"split_batch",
":",
"out",
"=",
"expert_utils",
".",
"map_ids",
"(",
"x",
",",
"batch_coordinate",
",",
"mask_and_call_attention",
")",
"else",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"1",
",",
"length",
",",
"depth",
"]",
")",
"out",
"=",
"mask_and_call_attention",
"(",
"x",
")",
"out",
"=",
"tf",
".",
"squeeze",
"(",
"out",
",",
"0",
")",
"return",
"out",
"# If the length is empty, just forward an empty tensor (avoid having to",
"# evaluate multihead_attention with tensor having dim equal to zeros)",
"out",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"equal",
"(",
"length",
",",
"0",
")",
",",
"lambda",
":",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"[",
"0",
",",
"depth",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"name",
"=",
"\"empty_out\"",
")",
",",
"lambda",
":",
"length_not_null",
"(",
"x",
",",
"batch_coordinate",
")",
",",
")",
"return",
"out"
] |
Implementing attention that runs inside each expert.
Args:
x: A tensor of shape[batch, depth]. Contains representations from
different positions, which are lexicographically ordered.
batch_coordinate: A tensor of shape [batch, 1] containing the batch
coordinate of each element in x. This is needed to make sure that
positions from different sequences don't attend to each other.
mask_right: A bool. If true, we will not attend to positions on the right,
just as decoder self attention.
split_batch (bool): If True, each sequence of the batch is processed
individually on a loop. If False, the sequences are processed all at
once and a mask is applied to isolate the sequences from each others
attention_num_head (int): number of attention heads
attention_kq_size (int): dimension used for the attention key, and query
attention_v_size (int): dimension used for the attention value
Returns:
out: A tensor of shape [batch, depth].
example use:
expert_utils.local_moe(
...
expert_fn=functools.partial(self_attention_expert, mask_right=)
)
|
[
"Implementing",
"attention",
"that",
"runs",
"inside",
"each",
"expert",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4526-L4632
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
local_expert_attention
|
def local_expert_attention(x,
k,
loss_coef,
attention_num_experts,
train=True,
batch_coordinate=None,
**kwargs):
"""Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
"""
if batch_coordinate is None:
batch_coordinate = tf.expand_dims(
coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1)
with tf.variable_scope("local_expert_attention"):
additional_dispatch_params = {"batch_coordinate": batch_coordinate}
return expert_utils.local_moe(
x,
train,
functools.partial(self_attention_expert, **kwargs),
attention_num_experts,
k=k,
loss_coef=loss_coef,
pass_x=True,
pass_gates=False,
additional_dispatch_params=additional_dispatch_params,
)
|
python
|
def local_expert_attention(x,
k,
loss_coef,
attention_num_experts,
train=True,
batch_coordinate=None,
**kwargs):
"""Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
"""
if batch_coordinate is None:
batch_coordinate = tf.expand_dims(
coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1)
with tf.variable_scope("local_expert_attention"):
additional_dispatch_params = {"batch_coordinate": batch_coordinate}
return expert_utils.local_moe(
x,
train,
functools.partial(self_attention_expert, **kwargs),
attention_num_experts,
k=k,
loss_coef=loss_coef,
pass_x=True,
pass_gates=False,
additional_dispatch_params=additional_dispatch_params,
)
|
[
"def",
"local_expert_attention",
"(",
"x",
",",
"k",
",",
"loss_coef",
",",
"attention_num_experts",
",",
"train",
"=",
"True",
",",
"batch_coordinate",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"batch_coordinate",
"is",
"None",
":",
"batch_coordinate",
"=",
"tf",
".",
"expand_dims",
"(",
"coordinate_tensor",
"(",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
":",
"-",
"1",
"]",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"-",
"1",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"local_expert_attention\"",
")",
":",
"additional_dispatch_params",
"=",
"{",
"\"batch_coordinate\"",
":",
"batch_coordinate",
"}",
"return",
"expert_utils",
".",
"local_moe",
"(",
"x",
",",
"train",
",",
"functools",
".",
"partial",
"(",
"self_attention_expert",
",",
"*",
"*",
"kwargs",
")",
",",
"attention_num_experts",
",",
"k",
"=",
"k",
",",
"loss_coef",
"=",
"loss_coef",
",",
"pass_x",
"=",
"True",
",",
"pass_gates",
"=",
"False",
",",
"additional_dispatch_params",
"=",
"additional_dispatch_params",
",",
")"
] |
Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
|
[
"Attention",
"using",
"a",
"mixture",
"of",
"experts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4635-L4681
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
expert_dot_product
|
def expert_dot_product(q, k, v, info_q, info_k):
"""Perform dot product on a subset of the sequence.
Can add a mask to the attention to prevent sequences to attend to each other
and to prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [length_expert_q, depth_k]
k (tf.Tensor): Keys of shape [length_expert_k, depth_k]
v (tf.Tensor): Values of shape [length_expert_k, depth_v]
info_q (BatchInfo): Batch info for queries. If None, no mask is added
info_k (BatchInfo): Batch info for keys
Returns:
tf.Tensor: dot product attention output ([length_expert_q, depth_v])
"""
length_q = common_layers.shape_list(q)[0]
length_k = common_layers.shape_list(k)[0]
depth_v = v.get_shape().as_list()[-1]
# Create the mask
bias = attention_bias_coordinates(info_q.coordinates, info_k.coordinates)
if info_k.order is not None:
bias += attention_bias_future(info_q.order, info_k.order)
# Restore batch and head dimension
q, k, v = [tf.expand_dims(tf.expand_dims(t, 0), 0) for t in (q, k, v)]
def is_zero():
zeros = tf.zeros(shape=[1, 1, length_q, depth_v], dtype=tf.float32)
zeros = tf.Print(zeros, [length_k, length_q], "length_k/length_q: ")
return zeros
def is_not_zero():
return dot_product_attention(
q,
k,
v,
bias=bias,
# No image summary to avoid "Retval[0] does not have value" (because
# inside a condition)
make_image_summary=False,
)
# TODO(epot): Should make sure a query gets at least one key. Because the
# different sequences of a batch are merged, it's possible that a
# query from a sequence only receive memory from another sequence, so
# with the mask, the query will perform a softmax on -infinity values.
# A hack could be to add at least one sequence of each batch on each group so
# the query can attend to at least one element.
# Softmax(Q.K)*V
v_out = tf.cond(
tf.logical_or(tf.equal(length_q, 0), tf.equal(length_k, 0)),
is_zero,
is_not_zero,
)
# Remove batch and head dimension
v_out = tf.squeeze(v_out, axis=0)
v_out = tf.squeeze(v_out, axis=0)
return v_out
|
python
|
def expert_dot_product(q, k, v, info_q, info_k):
"""Perform dot product on a subset of the sequence.
Can add a mask to the attention to prevent sequences to attend to each other
and to prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [length_expert_q, depth_k]
k (tf.Tensor): Keys of shape [length_expert_k, depth_k]
v (tf.Tensor): Values of shape [length_expert_k, depth_v]
info_q (BatchInfo): Batch info for queries. If None, no mask is added
info_k (BatchInfo): Batch info for keys
Returns:
tf.Tensor: dot product attention output ([length_expert_q, depth_v])
"""
length_q = common_layers.shape_list(q)[0]
length_k = common_layers.shape_list(k)[0]
depth_v = v.get_shape().as_list()[-1]
# Create the mask
bias = attention_bias_coordinates(info_q.coordinates, info_k.coordinates)
if info_k.order is not None:
bias += attention_bias_future(info_q.order, info_k.order)
# Restore batch and head dimension
q, k, v = [tf.expand_dims(tf.expand_dims(t, 0), 0) for t in (q, k, v)]
def is_zero():
zeros = tf.zeros(shape=[1, 1, length_q, depth_v], dtype=tf.float32)
zeros = tf.Print(zeros, [length_k, length_q], "length_k/length_q: ")
return zeros
def is_not_zero():
return dot_product_attention(
q,
k,
v,
bias=bias,
# No image summary to avoid "Retval[0] does not have value" (because
# inside a condition)
make_image_summary=False,
)
# TODO(epot): Should make sure a query gets at least one key. Because the
# different sequences of a batch are merged, it's possible that a
# query from a sequence only receive memory from another sequence, so
# with the mask, the query will perform a softmax on -infinity values.
# A hack could be to add at least one sequence of each batch on each group so
# the query can attend to at least one element.
# Softmax(Q.K)*V
v_out = tf.cond(
tf.logical_or(tf.equal(length_q, 0), tf.equal(length_k, 0)),
is_zero,
is_not_zero,
)
# Remove batch and head dimension
v_out = tf.squeeze(v_out, axis=0)
v_out = tf.squeeze(v_out, axis=0)
return v_out
|
[
"def",
"expert_dot_product",
"(",
"q",
",",
"k",
",",
"v",
",",
"info_q",
",",
"info_k",
")",
":",
"length_q",
"=",
"common_layers",
".",
"shape_list",
"(",
"q",
")",
"[",
"0",
"]",
"length_k",
"=",
"common_layers",
".",
"shape_list",
"(",
"k",
")",
"[",
"0",
"]",
"depth_v",
"=",
"v",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"# Create the mask",
"bias",
"=",
"attention_bias_coordinates",
"(",
"info_q",
".",
"coordinates",
",",
"info_k",
".",
"coordinates",
")",
"if",
"info_k",
".",
"order",
"is",
"not",
"None",
":",
"bias",
"+=",
"attention_bias_future",
"(",
"info_q",
".",
"order",
",",
"info_k",
".",
"order",
")",
"# Restore batch and head dimension",
"q",
",",
"k",
",",
"v",
"=",
"[",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"expand_dims",
"(",
"t",
",",
"0",
")",
",",
"0",
")",
"for",
"t",
"in",
"(",
"q",
",",
"k",
",",
"v",
")",
"]",
"def",
"is_zero",
"(",
")",
":",
"zeros",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"[",
"1",
",",
"1",
",",
"length_q",
",",
"depth_v",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"zeros",
"=",
"tf",
".",
"Print",
"(",
"zeros",
",",
"[",
"length_k",
",",
"length_q",
"]",
",",
"\"length_k/length_q: \"",
")",
"return",
"zeros",
"def",
"is_not_zero",
"(",
")",
":",
"return",
"dot_product_attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"bias",
"=",
"bias",
",",
"# No image summary to avoid \"Retval[0] does not have value\" (because",
"# inside a condition)",
"make_image_summary",
"=",
"False",
",",
")",
"# TODO(epot): Should make sure a query gets at least one key. Because the",
"# different sequences of a batch are merged, it's possible that a",
"# query from a sequence only receive memory from another sequence, so",
"# with the mask, the query will perform a softmax on -infinity values.",
"# A hack could be to add at least one sequence of each batch on each group so",
"# the query can attend to at least one element.",
"# Softmax(Q.K)*V",
"v_out",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"logical_or",
"(",
"tf",
".",
"equal",
"(",
"length_q",
",",
"0",
")",
",",
"tf",
".",
"equal",
"(",
"length_k",
",",
"0",
")",
")",
",",
"is_zero",
",",
"is_not_zero",
",",
")",
"# Remove batch and head dimension",
"v_out",
"=",
"tf",
".",
"squeeze",
"(",
"v_out",
",",
"axis",
"=",
"0",
")",
"v_out",
"=",
"tf",
".",
"squeeze",
"(",
"v_out",
",",
"axis",
"=",
"0",
")",
"return",
"v_out"
] |
Perform dot product on a subset of the sequence.
Can add a mask to the attention to prevent sequences to attend to each other
and to prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [length_expert_q, depth_k]
k (tf.Tensor): Keys of shape [length_expert_k, depth_k]
v (tf.Tensor): Values of shape [length_expert_k, depth_v]
info_q (BatchInfo): Batch info for queries. If None, no mask is added
info_k (BatchInfo): Batch info for keys
Returns:
tf.Tensor: dot product attention output ([length_expert_q, depth_v])
|
[
"Perform",
"dot",
"product",
"on",
"a",
"subset",
"of",
"the",
"sequence",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4685-L4746
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
dot_product_single_head
|
def dot_product_single_head(q, k, v, gates_q, gates_k, bi):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [length_q, depth_q]
k (tf.Tensor): [length_k, depth_q]
v (tf.Tensor): [length_k, depth_v]
gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets]
gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets]
bi (BatchInfo): Contains the batch coordinates and sequence order
Returns:
tf.Tensor: [length_q, depth_v]
"""
nb_buckets = gates_q.get_shape().as_list()[-1]
q_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_q)
k_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_k)
def eventually_dispatch(dispatcher, value):
if value is not None:
return dispatcher.dispatch(value)
return [None] * nb_buckets
# Iterate over every dispatched group
list_v_out = []
for (
q_i,
k_i,
v_i,
qbc,
qbo,
kbc,
kbo,
) in zip(
# Dispatch queries, keys and values
q_dispatcher.dispatch(q),
k_dispatcher.dispatch(k),
k_dispatcher.dispatch(v),
# Also dispatch the sequence positions and batch coordinates
eventually_dispatch(q_dispatcher, bi.coordinates),
eventually_dispatch(q_dispatcher, bi.order),
eventually_dispatch(k_dispatcher, bi.coordinates),
eventually_dispatch(k_dispatcher, bi.order),
):
list_v_out.append(
expert_dot_product(
q_i,
k_i,
v_i,
info_q=BatchInfo(coordinates=qbc, order=qbo),
info_k=BatchInfo(coordinates=kbc, order=kbo)))
# Combine all buckets together to restore the original length
return q_dispatcher.combine(list_v_out)
|
python
|
def dot_product_single_head(q, k, v, gates_q, gates_k, bi):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [length_q, depth_q]
k (tf.Tensor): [length_k, depth_q]
v (tf.Tensor): [length_k, depth_v]
gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets]
gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets]
bi (BatchInfo): Contains the batch coordinates and sequence order
Returns:
tf.Tensor: [length_q, depth_v]
"""
nb_buckets = gates_q.get_shape().as_list()[-1]
q_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_q)
k_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_k)
def eventually_dispatch(dispatcher, value):
if value is not None:
return dispatcher.dispatch(value)
return [None] * nb_buckets
# Iterate over every dispatched group
list_v_out = []
for (
q_i,
k_i,
v_i,
qbc,
qbo,
kbc,
kbo,
) in zip(
# Dispatch queries, keys and values
q_dispatcher.dispatch(q),
k_dispatcher.dispatch(k),
k_dispatcher.dispatch(v),
# Also dispatch the sequence positions and batch coordinates
eventually_dispatch(q_dispatcher, bi.coordinates),
eventually_dispatch(q_dispatcher, bi.order),
eventually_dispatch(k_dispatcher, bi.coordinates),
eventually_dispatch(k_dispatcher, bi.order),
):
list_v_out.append(
expert_dot_product(
q_i,
k_i,
v_i,
info_q=BatchInfo(coordinates=qbc, order=qbo),
info_k=BatchInfo(coordinates=kbc, order=kbo)))
# Combine all buckets together to restore the original length
return q_dispatcher.combine(list_v_out)
|
[
"def",
"dot_product_single_head",
"(",
"q",
",",
"k",
",",
"v",
",",
"gates_q",
",",
"gates_k",
",",
"bi",
")",
":",
"nb_buckets",
"=",
"gates_q",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"q_dispatcher",
"=",
"expert_utils",
".",
"SparseDispatcher",
"(",
"nb_buckets",
",",
"gates_q",
")",
"k_dispatcher",
"=",
"expert_utils",
".",
"SparseDispatcher",
"(",
"nb_buckets",
",",
"gates_k",
")",
"def",
"eventually_dispatch",
"(",
"dispatcher",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"dispatcher",
".",
"dispatch",
"(",
"value",
")",
"return",
"[",
"None",
"]",
"*",
"nb_buckets",
"# Iterate over every dispatched group",
"list_v_out",
"=",
"[",
"]",
"for",
"(",
"q_i",
",",
"k_i",
",",
"v_i",
",",
"qbc",
",",
"qbo",
",",
"kbc",
",",
"kbo",
",",
")",
"in",
"zip",
"(",
"# Dispatch queries, keys and values",
"q_dispatcher",
".",
"dispatch",
"(",
"q",
")",
",",
"k_dispatcher",
".",
"dispatch",
"(",
"k",
")",
",",
"k_dispatcher",
".",
"dispatch",
"(",
"v",
")",
",",
"# Also dispatch the sequence positions and batch coordinates",
"eventually_dispatch",
"(",
"q_dispatcher",
",",
"bi",
".",
"coordinates",
")",
",",
"eventually_dispatch",
"(",
"q_dispatcher",
",",
"bi",
".",
"order",
")",
",",
"eventually_dispatch",
"(",
"k_dispatcher",
",",
"bi",
".",
"coordinates",
")",
",",
"eventually_dispatch",
"(",
"k_dispatcher",
",",
"bi",
".",
"order",
")",
",",
")",
":",
"list_v_out",
".",
"append",
"(",
"expert_dot_product",
"(",
"q_i",
",",
"k_i",
",",
"v_i",
",",
"info_q",
"=",
"BatchInfo",
"(",
"coordinates",
"=",
"qbc",
",",
"order",
"=",
"qbo",
")",
",",
"info_k",
"=",
"BatchInfo",
"(",
"coordinates",
"=",
"kbc",
",",
"order",
"=",
"kbo",
")",
")",
")",
"# Combine all buckets together to restore the original length",
"return",
"q_dispatcher",
".",
"combine",
"(",
"list_v_out",
")"
] |
Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [length_q, depth_q]
k (tf.Tensor): [length_k, depth_q]
v (tf.Tensor): [length_k, depth_v]
gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets]
gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets]
bi (BatchInfo): Contains the batch coordinates and sequence order
Returns:
tf.Tensor: [length_q, depth_v]
|
[
"Perform",
"a",
"dot",
"product",
"attention",
"on",
"a",
"single",
"sequence",
"on",
"a",
"single",
"head",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4750-L4808
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
map_fn_switch
|
def map_fn_switch(fn, elems, use_map_fn=True, **kwargs):
"""Construct the graph with either tf.map_fn or a python for loop.
This function is mainly for for benchmarking purpose.
tf.map_fn is dynamic but is much slower than creating a static graph with
for loop. However, having a for loop make the graph much longer to build
and can consume too much RAM on distributed setting.
Args:
fn (fct): same that tf.map_fn but for now can only return a single tensor
value (instead of a tuple of tensor for the general case)
elems (tuple): same that tf.map_fn
use_map_fn (bool): If True, tf.map_fn is used, if False, for _ in _: is used
instead
**kwargs: Additional tf.map_fn arguments (ignored if use_map_fn is False)
Returns:
tf.Tensor: the output of tf.map_fn
"""
if use_map_fn:
return tf.map_fn(fn, elems, **kwargs)
elems_unpacked = (tf.unstack(e) for e in elems)
out_unpacked = [fn(e) for e in zip(*elems_unpacked)]
out = tf.stack(out_unpacked)
return out
|
python
|
def map_fn_switch(fn, elems, use_map_fn=True, **kwargs):
"""Construct the graph with either tf.map_fn or a python for loop.
This function is mainly for for benchmarking purpose.
tf.map_fn is dynamic but is much slower than creating a static graph with
for loop. However, having a for loop make the graph much longer to build
and can consume too much RAM on distributed setting.
Args:
fn (fct): same that tf.map_fn but for now can only return a single tensor
value (instead of a tuple of tensor for the general case)
elems (tuple): same that tf.map_fn
use_map_fn (bool): If True, tf.map_fn is used, if False, for _ in _: is used
instead
**kwargs: Additional tf.map_fn arguments (ignored if use_map_fn is False)
Returns:
tf.Tensor: the output of tf.map_fn
"""
if use_map_fn:
return tf.map_fn(fn, elems, **kwargs)
elems_unpacked = (tf.unstack(e) for e in elems)
out_unpacked = [fn(e) for e in zip(*elems_unpacked)]
out = tf.stack(out_unpacked)
return out
|
[
"def",
"map_fn_switch",
"(",
"fn",
",",
"elems",
",",
"use_map_fn",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"use_map_fn",
":",
"return",
"tf",
".",
"map_fn",
"(",
"fn",
",",
"elems",
",",
"*",
"*",
"kwargs",
")",
"elems_unpacked",
"=",
"(",
"tf",
".",
"unstack",
"(",
"e",
")",
"for",
"e",
"in",
"elems",
")",
"out_unpacked",
"=",
"[",
"fn",
"(",
"e",
")",
"for",
"e",
"in",
"zip",
"(",
"*",
"elems_unpacked",
")",
"]",
"out",
"=",
"tf",
".",
"stack",
"(",
"out_unpacked",
")",
"return",
"out"
] |
Construct the graph with either tf.map_fn or a python for loop.
This function is mainly for for benchmarking purpose.
tf.map_fn is dynamic but is much slower than creating a static graph with
for loop. However, having a for loop make the graph much longer to build
and can consume too much RAM on distributed setting.
Args:
fn (fct): same that tf.map_fn but for now can only return a single tensor
value (instead of a tuple of tensor for the general case)
elems (tuple): same that tf.map_fn
use_map_fn (bool): If True, tf.map_fn is used, if False, for _ in _: is used
instead
**kwargs: Additional tf.map_fn arguments (ignored if use_map_fn is False)
Returns:
tf.Tensor: the output of tf.map_fn
|
[
"Construct",
"the",
"graph",
"with",
"either",
"tf",
".",
"map_fn",
"or",
"a",
"python",
"for",
"loop",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4811-L4836
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
sparse_dot_product_attention
|
def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params):
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
experts_params (dict): Additional params for the local expert
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
"""
batch_size, nb_heads, _, depth = common_layers.shape_list(q)
@expert_utils.add_name_scope()
def flatten_first_dims(x):
"""Reshape such that x is [num_heads, -1, depth]."""
# Case 1: Either constant batch size of size 1 or batch already flattened
if x.get_shape().as_list()[0] == 1:
return tf.squeeze(x, axis=0)
# Case 2: Flatten batch dimension
x = tf.transpose(x, perm=[1, 0, 2, 3])
x = tf.reshape(x, [nb_heads, -1, depth])
return x
def flatten_batch(x):
if x is None:
return x
return expert_utils.flatten_all_but_last(x)
q = flatten_first_dims(q)
k = flatten_first_dims(k)
v = flatten_first_dims(v)
bi = BatchInfo(
coordinates=flatten_batch(bi.coordinates),
order=flatten_batch(bi.order),
)
# Unstack heads
list_q = tf.unstack(q) # list[tf.Tensor(shape=[batch * length, depth])]
list_k = tf.unstack(k)
list_v = tf.unstack(v)
list_gates_q = []
list_gates_k = []
total_loss = 0.0
# There might be a more optimized way to compute all heads at once
for single_q, single_k, _ in zip(list_q, list_k, list_v):
# Each head get its own dispatcher
lhs_gating = LshGating(
depth=single_q.get_shape().as_list()[-1], **experts_params)
list_gates_q.append(lhs_gating.get_gates(single_q))
list_gates_k.append(lhs_gating.get_gates(single_k))
gates_q = tf.stack(list_gates_q)
gates_k = tf.stack(list_gates_k)
# Process each head separately.
v_out = map_fn_switch(
lambda args: dot_product_single_head(bi=bi, *args),
elems=(q, k, v, gates_q, gates_k),
dtype=(tf.float32),
parallel_iterations=2,
use_map_fn=use_map_fn,
)
# Restore original shape as expected by multihead_attention
if isinstance(batch_size, int) and batch_size == 1:
v_out = tf.expand_dims(v_out, axis=0) # Restore batch_size = 1
else:
v_out = tf.reshape(v_out, [nb_heads, batch_size, -1, depth])
v_out = tf.transpose(v_out, [1, 0, 2, 3])
return v_out, total_loss / nb_heads
|
python
|
def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params):
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
experts_params (dict): Additional params for the local expert
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
"""
batch_size, nb_heads, _, depth = common_layers.shape_list(q)
@expert_utils.add_name_scope()
def flatten_first_dims(x):
"""Reshape such that x is [num_heads, -1, depth]."""
# Case 1: Either constant batch size of size 1 or batch already flattened
if x.get_shape().as_list()[0] == 1:
return tf.squeeze(x, axis=0)
# Case 2: Flatten batch dimension
x = tf.transpose(x, perm=[1, 0, 2, 3])
x = tf.reshape(x, [nb_heads, -1, depth])
return x
def flatten_batch(x):
if x is None:
return x
return expert_utils.flatten_all_but_last(x)
q = flatten_first_dims(q)
k = flatten_first_dims(k)
v = flatten_first_dims(v)
bi = BatchInfo(
coordinates=flatten_batch(bi.coordinates),
order=flatten_batch(bi.order),
)
# Unstack heads
list_q = tf.unstack(q) # list[tf.Tensor(shape=[batch * length, depth])]
list_k = tf.unstack(k)
list_v = tf.unstack(v)
list_gates_q = []
list_gates_k = []
total_loss = 0.0
# There might be a more optimized way to compute all heads at once
for single_q, single_k, _ in zip(list_q, list_k, list_v):
# Each head get its own dispatcher
lhs_gating = LshGating(
depth=single_q.get_shape().as_list()[-1], **experts_params)
list_gates_q.append(lhs_gating.get_gates(single_q))
list_gates_k.append(lhs_gating.get_gates(single_k))
gates_q = tf.stack(list_gates_q)
gates_k = tf.stack(list_gates_k)
# Process each head separately.
v_out = map_fn_switch(
lambda args: dot_product_single_head(bi=bi, *args),
elems=(q, k, v, gates_q, gates_k),
dtype=(tf.float32),
parallel_iterations=2,
use_map_fn=use_map_fn,
)
# Restore original shape as expected by multihead_attention
if isinstance(batch_size, int) and batch_size == 1:
v_out = tf.expand_dims(v_out, axis=0) # Restore batch_size = 1
else:
v_out = tf.reshape(v_out, [nb_heads, batch_size, -1, depth])
v_out = tf.transpose(v_out, [1, 0, 2, 3])
return v_out, total_loss / nb_heads
|
[
"def",
"sparse_dot_product_attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"bi",
",",
"use_map_fn",
",",
"experts_params",
")",
":",
"batch_size",
",",
"nb_heads",
",",
"_",
",",
"depth",
"=",
"common_layers",
".",
"shape_list",
"(",
"q",
")",
"@",
"expert_utils",
".",
"add_name_scope",
"(",
")",
"def",
"flatten_first_dims",
"(",
"x",
")",
":",
"\"\"\"Reshape such that x is [num_heads, -1, depth].\"\"\"",
"# Case 1: Either constant batch size of size 1 or batch already flattened",
"if",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"0",
"]",
"==",
"1",
":",
"return",
"tf",
".",
"squeeze",
"(",
"x",
",",
"axis",
"=",
"0",
")",
"# Case 2: Flatten batch dimension",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"nb_heads",
",",
"-",
"1",
",",
"depth",
"]",
")",
"return",
"x",
"def",
"flatten_batch",
"(",
"x",
")",
":",
"if",
"x",
"is",
"None",
":",
"return",
"x",
"return",
"expert_utils",
".",
"flatten_all_but_last",
"(",
"x",
")",
"q",
"=",
"flatten_first_dims",
"(",
"q",
")",
"k",
"=",
"flatten_first_dims",
"(",
"k",
")",
"v",
"=",
"flatten_first_dims",
"(",
"v",
")",
"bi",
"=",
"BatchInfo",
"(",
"coordinates",
"=",
"flatten_batch",
"(",
"bi",
".",
"coordinates",
")",
",",
"order",
"=",
"flatten_batch",
"(",
"bi",
".",
"order",
")",
",",
")",
"# Unstack heads",
"list_q",
"=",
"tf",
".",
"unstack",
"(",
"q",
")",
"# list[tf.Tensor(shape=[batch * length, depth])]",
"list_k",
"=",
"tf",
".",
"unstack",
"(",
"k",
")",
"list_v",
"=",
"tf",
".",
"unstack",
"(",
"v",
")",
"list_gates_q",
"=",
"[",
"]",
"list_gates_k",
"=",
"[",
"]",
"total_loss",
"=",
"0.0",
"# There might be a more optimized way to compute all heads at once",
"for",
"single_q",
",",
"single_k",
",",
"_",
"in",
"zip",
"(",
"list_q",
",",
"list_k",
",",
"list_v",
")",
":",
"# Each head get its own dispatcher",
"lhs_gating",
"=",
"LshGating",
"(",
"depth",
"=",
"single_q",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
",",
"*",
"*",
"experts_params",
")",
"list_gates_q",
".",
"append",
"(",
"lhs_gating",
".",
"get_gates",
"(",
"single_q",
")",
")",
"list_gates_k",
".",
"append",
"(",
"lhs_gating",
".",
"get_gates",
"(",
"single_k",
")",
")",
"gates_q",
"=",
"tf",
".",
"stack",
"(",
"list_gates_q",
")",
"gates_k",
"=",
"tf",
".",
"stack",
"(",
"list_gates_k",
")",
"# Process each head separately.",
"v_out",
"=",
"map_fn_switch",
"(",
"lambda",
"args",
":",
"dot_product_single_head",
"(",
"bi",
"=",
"bi",
",",
"*",
"args",
")",
",",
"elems",
"=",
"(",
"q",
",",
"k",
",",
"v",
",",
"gates_q",
",",
"gates_k",
")",
",",
"dtype",
"=",
"(",
"tf",
".",
"float32",
")",
",",
"parallel_iterations",
"=",
"2",
",",
"use_map_fn",
"=",
"use_map_fn",
",",
")",
"# Restore original shape as expected by multihead_attention",
"if",
"isinstance",
"(",
"batch_size",
",",
"int",
")",
"and",
"batch_size",
"==",
"1",
":",
"v_out",
"=",
"tf",
".",
"expand_dims",
"(",
"v_out",
",",
"axis",
"=",
"0",
")",
"# Restore batch_size = 1",
"else",
":",
"v_out",
"=",
"tf",
".",
"reshape",
"(",
"v_out",
",",
"[",
"nb_heads",
",",
"batch_size",
",",
"-",
"1",
",",
"depth",
"]",
")",
"v_out",
"=",
"tf",
".",
"transpose",
"(",
"v_out",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"return",
"v_out",
",",
"total_loss",
"/",
"nb_heads"
] |
Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
experts_params (dict): Additional params for the local expert
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
|
[
"Sparse",
"multihead",
"self",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4840-L4933
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
dot_product_batched_head
|
def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.Tensor): [batch*heads, length_k, depth_v]
gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets]
gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets]
mask_right (bool): Add a bias to prevent attention to the future
Returns:
tf.Tensor: [length_q, depth_v]
"""
nb_buckets = common_layers.shape_list(gates_q)[-1]
@expert_utils.add_name_scope()
def get_dispatcher(gates):
"""Construct dispatcher for gates."""
length = common_layers.shape_list(gates)[1]
# Count the number of ones per batch (and keep the max value)
nb_elems_to_dispatch = tf.reduce_sum(gates, axis=[1, 2])
nb_elems_to_dispatch = tf.reduce_max(nb_elems_to_dispatch)
nb_elems_to_dispatch = tf.to_int32(nb_elems_to_dispatch)
capacity = nb_elems_to_dispatch // nb_buckets * 2 # Capacity is hardcoded
capacity = tf.minimum(length, capacity)
tf.summary.scalar("dispatch_capacity", capacity, family="lsh")
return expert_utils.TruncatingDispatcher(gates, capacity)
def add_summary_capacity(x, prefix):
# Monitor if capacity overflow
x = x[0, ...] # Take first batch/head
x = tf.reduce_sum(x, axis=0)
tf.summary.scalar(prefix + "_min", tf.reduce_min(x), family="lsh")
tf.summary.scalar(prefix + "_max", tf.reduce_max(x), family="lsh")
tf.summary.histogram(prefix + "capacity_distribution", x, family="lsh")
for i in range(3): # Show the first 3 buckets
tf.summary.scalar("{}_{}".format(prefix, i), x[i], family="lsh")
add_summary_capacity(gates_q, "q")
add_summary_capacity(gates_k, "k")
q_dispatcher = get_dispatcher(gates_q)
k_dispatcher = get_dispatcher(gates_k)
q = q_dispatcher.dispatch(q)
k = k_dispatcher.dispatch(k)
v = k_dispatcher.dispatch(v)
# Bias of shape [batch*heads, nb_buckets, 1, capacity] broadcasted to every
# queries
bias = tf.expand_dims((k_dispatcher.nonpadding() - 1.0) * 1e9, 2)
if mask_right:
q_coordinate = tf.to_float(
tf.expand_dims(q_dispatcher.length_coordinate(), 3))
k_coordinate = tf.to_float(
tf.expand_dims(k_dispatcher.length_coordinate(), 2))
bias += tf.to_float(tf.greater(k_coordinate, q_coordinate)) * -1e9
# The sequence padding is not masked but is ignored on the next layers
# q, k, v now have shape [batch*heads, nb_bucket, capacity, depth]
# The buckets can be seen as different heads
v_out = dot_product_attention(q, k, v, bias=bias)
# Combine all buckets together to restore the original length
return q_dispatcher.combine(v_out)
|
python
|
def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.Tensor): [batch*heads, length_k, depth_v]
gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets]
gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets]
mask_right (bool): Add a bias to prevent attention to the future
Returns:
tf.Tensor: [length_q, depth_v]
"""
nb_buckets = common_layers.shape_list(gates_q)[-1]
@expert_utils.add_name_scope()
def get_dispatcher(gates):
"""Construct dispatcher for gates."""
length = common_layers.shape_list(gates)[1]
# Count the number of ones per batch (and keep the max value)
nb_elems_to_dispatch = tf.reduce_sum(gates, axis=[1, 2])
nb_elems_to_dispatch = tf.reduce_max(nb_elems_to_dispatch)
nb_elems_to_dispatch = tf.to_int32(nb_elems_to_dispatch)
capacity = nb_elems_to_dispatch // nb_buckets * 2 # Capacity is hardcoded
capacity = tf.minimum(length, capacity)
tf.summary.scalar("dispatch_capacity", capacity, family="lsh")
return expert_utils.TruncatingDispatcher(gates, capacity)
def add_summary_capacity(x, prefix):
# Monitor if capacity overflow
x = x[0, ...] # Take first batch/head
x = tf.reduce_sum(x, axis=0)
tf.summary.scalar(prefix + "_min", tf.reduce_min(x), family="lsh")
tf.summary.scalar(prefix + "_max", tf.reduce_max(x), family="lsh")
tf.summary.histogram(prefix + "capacity_distribution", x, family="lsh")
for i in range(3): # Show the first 3 buckets
tf.summary.scalar("{}_{}".format(prefix, i), x[i], family="lsh")
add_summary_capacity(gates_q, "q")
add_summary_capacity(gates_k, "k")
q_dispatcher = get_dispatcher(gates_q)
k_dispatcher = get_dispatcher(gates_k)
q = q_dispatcher.dispatch(q)
k = k_dispatcher.dispatch(k)
v = k_dispatcher.dispatch(v)
# Bias of shape [batch*heads, nb_buckets, 1, capacity] broadcasted to every
# queries
bias = tf.expand_dims((k_dispatcher.nonpadding() - 1.0) * 1e9, 2)
if mask_right:
q_coordinate = tf.to_float(
tf.expand_dims(q_dispatcher.length_coordinate(), 3))
k_coordinate = tf.to_float(
tf.expand_dims(k_dispatcher.length_coordinate(), 2))
bias += tf.to_float(tf.greater(k_coordinate, q_coordinate)) * -1e9
# The sequence padding is not masked but is ignored on the next layers
# q, k, v now have shape [batch*heads, nb_bucket, capacity, depth]
# The buckets can be seen as different heads
v_out = dot_product_attention(q, k, v, bias=bias)
# Combine all buckets together to restore the original length
return q_dispatcher.combine(v_out)
|
[
"def",
"dot_product_batched_head",
"(",
"q",
",",
"k",
",",
"v",
",",
"gates_q",
",",
"gates_k",
",",
"mask_right",
"=",
"False",
")",
":",
"nb_buckets",
"=",
"common_layers",
".",
"shape_list",
"(",
"gates_q",
")",
"[",
"-",
"1",
"]",
"@",
"expert_utils",
".",
"add_name_scope",
"(",
")",
"def",
"get_dispatcher",
"(",
"gates",
")",
":",
"\"\"\"Construct dispatcher for gates.\"\"\"",
"length",
"=",
"common_layers",
".",
"shape_list",
"(",
"gates",
")",
"[",
"1",
"]",
"# Count the number of ones per batch (and keep the max value)",
"nb_elems_to_dispatch",
"=",
"tf",
".",
"reduce_sum",
"(",
"gates",
",",
"axis",
"=",
"[",
"1",
",",
"2",
"]",
")",
"nb_elems_to_dispatch",
"=",
"tf",
".",
"reduce_max",
"(",
"nb_elems_to_dispatch",
")",
"nb_elems_to_dispatch",
"=",
"tf",
".",
"to_int32",
"(",
"nb_elems_to_dispatch",
")",
"capacity",
"=",
"nb_elems_to_dispatch",
"//",
"nb_buckets",
"*",
"2",
"# Capacity is hardcoded",
"capacity",
"=",
"tf",
".",
"minimum",
"(",
"length",
",",
"capacity",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"dispatch_capacity\"",
",",
"capacity",
",",
"family",
"=",
"\"lsh\"",
")",
"return",
"expert_utils",
".",
"TruncatingDispatcher",
"(",
"gates",
",",
"capacity",
")",
"def",
"add_summary_capacity",
"(",
"x",
",",
"prefix",
")",
":",
"# Monitor if capacity overflow",
"x",
"=",
"x",
"[",
"0",
",",
"...",
"]",
"# Take first batch/head",
"x",
"=",
"tf",
".",
"reduce_sum",
"(",
"x",
",",
"axis",
"=",
"0",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"prefix",
"+",
"\"_min\"",
",",
"tf",
".",
"reduce_min",
"(",
"x",
")",
",",
"family",
"=",
"\"lsh\"",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"prefix",
"+",
"\"_max\"",
",",
"tf",
".",
"reduce_max",
"(",
"x",
")",
",",
"family",
"=",
"\"lsh\"",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"prefix",
"+",
"\"capacity_distribution\"",
",",
"x",
",",
"family",
"=",
"\"lsh\"",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"# Show the first 3 buckets",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"{}_{}\"",
".",
"format",
"(",
"prefix",
",",
"i",
")",
",",
"x",
"[",
"i",
"]",
",",
"family",
"=",
"\"lsh\"",
")",
"add_summary_capacity",
"(",
"gates_q",
",",
"\"q\"",
")",
"add_summary_capacity",
"(",
"gates_k",
",",
"\"k\"",
")",
"q_dispatcher",
"=",
"get_dispatcher",
"(",
"gates_q",
")",
"k_dispatcher",
"=",
"get_dispatcher",
"(",
"gates_k",
")",
"q",
"=",
"q_dispatcher",
".",
"dispatch",
"(",
"q",
")",
"k",
"=",
"k_dispatcher",
".",
"dispatch",
"(",
"k",
")",
"v",
"=",
"k_dispatcher",
".",
"dispatch",
"(",
"v",
")",
"# Bias of shape [batch*heads, nb_buckets, 1, capacity] broadcasted to every",
"# queries",
"bias",
"=",
"tf",
".",
"expand_dims",
"(",
"(",
"k_dispatcher",
".",
"nonpadding",
"(",
")",
"-",
"1.0",
")",
"*",
"1e9",
",",
"2",
")",
"if",
"mask_right",
":",
"q_coordinate",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"expand_dims",
"(",
"q_dispatcher",
".",
"length_coordinate",
"(",
")",
",",
"3",
")",
")",
"k_coordinate",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"expand_dims",
"(",
"k_dispatcher",
".",
"length_coordinate",
"(",
")",
",",
"2",
")",
")",
"bias",
"+=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"greater",
"(",
"k_coordinate",
",",
"q_coordinate",
")",
")",
"*",
"-",
"1e9",
"# The sequence padding is not masked but is ignored on the next layers",
"# q, k, v now have shape [batch*heads, nb_bucket, capacity, depth]",
"# The buckets can be seen as different heads",
"v_out",
"=",
"dot_product_attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"bias",
"=",
"bias",
")",
"# Combine all buckets together to restore the original length",
"return",
"q_dispatcher",
".",
"combine",
"(",
"v_out",
")"
] |
Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.Tensor): [batch*heads, length_k, depth_v]
gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets]
gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets]
mask_right (bool): Add a bias to prevent attention to the future
Returns:
tf.Tensor: [length_q, depth_v]
|
[
"Perform",
"a",
"dot",
"product",
"attention",
"on",
"a",
"single",
"sequence",
"on",
"a",
"single",
"head",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4937-L5005
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
sparse_dot_product_attention_truncated
|
def sparse_dot_product_attention_truncated(
q,
k,
v,
bi, # Unused
experts_params,
use_map_fn=False, # Unused
mask_right=False,
): # pylint: disable=unused-argument
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
experts_params (dict): Additional params for the local expert
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
mask_right (bool):
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
"""
# Currently depth is the same for for q and v
batch_size, nb_heads, _, depth = common_layers.shape_list(q)
total_loss = 0.0
# Each head get its own dispatcher
list_lsh = [LshGating(depth=depth, **experts_params) for _ in range(nb_heads)]
@expert_utils.add_name_scope()
def get_gates_head(x, add_first=False):
"""Return the gates for each heads of the current x.
Args:
x (tf.Tensor): of shape [batch, heads, length, depth]
add_first (bool): if True, add the first element on each bucket
Returns:
tf.Tensor: gates of shape [batch, heads, length, num_buckets]
"""
length = common_layers.shape_list(x)[2]
# Invert heads/batch
x = tf.transpose(x, perm=[1, 0, 2, 3])
x = tf.reshape(x, [nb_heads, batch_size * length, depth])
list_x = tf.unstack(x) # list[tf.Tensor(shape=[batch * length, depth])]
# Unstack heads
list_gates = []
# There might be a more optimized way to compute all heads at once
for lsh, single_x in zip(list_lsh, list_x):
# Each head get its own dispatcher
gates = lsh.get_gates(single_x)
nb_buckets = gates.get_shape().as_list()[-1]
# Reshape to [batch, length, depth] but should consider sequence
# padding in that case (also dispatch the padding)
gates = tf.reshape(gates, [batch_size, length, nb_buckets])
list_gates.append(gates)
gates = tf.stack(list_gates)
# Restore original shape
gates = tf.reshape(gates, [nb_heads, batch_size, length, nb_buckets])
gates = tf.transpose(gates, [1, 0, 2, 3])
# Dispatch the first element to every gates to avoid empty buckets
if add_first:
gates = tf.maximum(gates,
tf.reshape(tf.one_hot([0], length), [1, 1, length, 1]))
return gates
gates_q = get_gates_head(q)
gates_k = get_gates_head(k, add_first=True)
# [batch, heads, length, depth] => [batch*heads, length, depth]
q, k, v, gates_q, gates_k = [
combine_first_two_dimensions(t) for t in (q, k, v, gates_q, gates_k)
]
v_out = dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right)
# Restore original dimension
v_out = tf.reshape(v_out, [batch_size, nb_heads, -1, depth])
return v_out, total_loss / nb_heads
|
python
|
def sparse_dot_product_attention_truncated(
q,
k,
v,
bi, # Unused
experts_params,
use_map_fn=False, # Unused
mask_right=False,
): # pylint: disable=unused-argument
"""Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
experts_params (dict): Additional params for the local expert
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
mask_right (bool):
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
"""
# Currently depth is the same for for q and v
batch_size, nb_heads, _, depth = common_layers.shape_list(q)
total_loss = 0.0
# Each head get its own dispatcher
list_lsh = [LshGating(depth=depth, **experts_params) for _ in range(nb_heads)]
@expert_utils.add_name_scope()
def get_gates_head(x, add_first=False):
"""Return the gates for each heads of the current x.
Args:
x (tf.Tensor): of shape [batch, heads, length, depth]
add_first (bool): if True, add the first element on each bucket
Returns:
tf.Tensor: gates of shape [batch, heads, length, num_buckets]
"""
length = common_layers.shape_list(x)[2]
# Invert heads/batch
x = tf.transpose(x, perm=[1, 0, 2, 3])
x = tf.reshape(x, [nb_heads, batch_size * length, depth])
list_x = tf.unstack(x) # list[tf.Tensor(shape=[batch * length, depth])]
# Unstack heads
list_gates = []
# There might be a more optimized way to compute all heads at once
for lsh, single_x in zip(list_lsh, list_x):
# Each head get its own dispatcher
gates = lsh.get_gates(single_x)
nb_buckets = gates.get_shape().as_list()[-1]
# Reshape to [batch, length, depth] but should consider sequence
# padding in that case (also dispatch the padding)
gates = tf.reshape(gates, [batch_size, length, nb_buckets])
list_gates.append(gates)
gates = tf.stack(list_gates)
# Restore original shape
gates = tf.reshape(gates, [nb_heads, batch_size, length, nb_buckets])
gates = tf.transpose(gates, [1, 0, 2, 3])
# Dispatch the first element to every gates to avoid empty buckets
if add_first:
gates = tf.maximum(gates,
tf.reshape(tf.one_hot([0], length), [1, 1, length, 1]))
return gates
gates_q = get_gates_head(q)
gates_k = get_gates_head(k, add_first=True)
# [batch, heads, length, depth] => [batch*heads, length, depth]
q, k, v, gates_q, gates_k = [
combine_first_two_dimensions(t) for t in (q, k, v, gates_q, gates_k)
]
v_out = dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right)
# Restore original dimension
v_out = tf.reshape(v_out, [batch_size, nb_heads, -1, depth])
return v_out, total_loss / nb_heads
|
[
"def",
"sparse_dot_product_attention_truncated",
"(",
"q",
",",
"k",
",",
"v",
",",
"bi",
",",
"# Unused",
"experts_params",
",",
"use_map_fn",
"=",
"False",
",",
"# Unused",
"mask_right",
"=",
"False",
",",
")",
":",
"# pylint: disable=unused-argument",
"# Currently depth is the same for for q and v",
"batch_size",
",",
"nb_heads",
",",
"_",
",",
"depth",
"=",
"common_layers",
".",
"shape_list",
"(",
"q",
")",
"total_loss",
"=",
"0.0",
"# Each head get its own dispatcher",
"list_lsh",
"=",
"[",
"LshGating",
"(",
"depth",
"=",
"depth",
",",
"*",
"*",
"experts_params",
")",
"for",
"_",
"in",
"range",
"(",
"nb_heads",
")",
"]",
"@",
"expert_utils",
".",
"add_name_scope",
"(",
")",
"def",
"get_gates_head",
"(",
"x",
",",
"add_first",
"=",
"False",
")",
":",
"\"\"\"Return the gates for each heads of the current x.\n\n Args:\n x (tf.Tensor): of shape [batch, heads, length, depth]\n add_first (bool): if True, add the first element on each bucket\n\n Returns:\n tf.Tensor: gates of shape [batch, heads, length, num_buckets]\n \"\"\"",
"length",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"2",
"]",
"# Invert heads/batch",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"nb_heads",
",",
"batch_size",
"*",
"length",
",",
"depth",
"]",
")",
"list_x",
"=",
"tf",
".",
"unstack",
"(",
"x",
")",
"# list[tf.Tensor(shape=[batch * length, depth])]",
"# Unstack heads",
"list_gates",
"=",
"[",
"]",
"# There might be a more optimized way to compute all heads at once",
"for",
"lsh",
",",
"single_x",
"in",
"zip",
"(",
"list_lsh",
",",
"list_x",
")",
":",
"# Each head get its own dispatcher",
"gates",
"=",
"lsh",
".",
"get_gates",
"(",
"single_x",
")",
"nb_buckets",
"=",
"gates",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"# Reshape to [batch, length, depth] but should consider sequence",
"# padding in that case (also dispatch the padding)",
"gates",
"=",
"tf",
".",
"reshape",
"(",
"gates",
",",
"[",
"batch_size",
",",
"length",
",",
"nb_buckets",
"]",
")",
"list_gates",
".",
"append",
"(",
"gates",
")",
"gates",
"=",
"tf",
".",
"stack",
"(",
"list_gates",
")",
"# Restore original shape",
"gates",
"=",
"tf",
".",
"reshape",
"(",
"gates",
",",
"[",
"nb_heads",
",",
"batch_size",
",",
"length",
",",
"nb_buckets",
"]",
")",
"gates",
"=",
"tf",
".",
"transpose",
"(",
"gates",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"# Dispatch the first element to every gates to avoid empty buckets",
"if",
"add_first",
":",
"gates",
"=",
"tf",
".",
"maximum",
"(",
"gates",
",",
"tf",
".",
"reshape",
"(",
"tf",
".",
"one_hot",
"(",
"[",
"0",
"]",
",",
"length",
")",
",",
"[",
"1",
",",
"1",
",",
"length",
",",
"1",
"]",
")",
")",
"return",
"gates",
"gates_q",
"=",
"get_gates_head",
"(",
"q",
")",
"gates_k",
"=",
"get_gates_head",
"(",
"k",
",",
"add_first",
"=",
"True",
")",
"# [batch, heads, length, depth] => [batch*heads, length, depth]",
"q",
",",
"k",
",",
"v",
",",
"gates_q",
",",
"gates_k",
"=",
"[",
"combine_first_two_dimensions",
"(",
"t",
")",
"for",
"t",
"in",
"(",
"q",
",",
"k",
",",
"v",
",",
"gates_q",
",",
"gates_k",
")",
"]",
"v_out",
"=",
"dot_product_batched_head",
"(",
"q",
",",
"k",
",",
"v",
",",
"gates_q",
",",
"gates_k",
",",
"mask_right",
")",
"# Restore original dimension",
"v_out",
"=",
"tf",
".",
"reshape",
"(",
"v_out",
",",
"[",
"batch_size",
",",
"nb_heads",
",",
"-",
"1",
",",
"depth",
"]",
")",
"return",
"v_out",
",",
"total_loss",
"/",
"nb_heads"
] |
Sparse multihead self attention.
Perform an approximation of the full multihead attention by dispatching
the tokens using their keys/values. Thus the attention matrix are only
computed each times on a subset of the tokens.
Notes:
* The function don't perform scaling here (multihead_attention does
the /sqrt(depth)).
* The padding should have been removed (so batch size should be 1 but length
contains the elements from all different batches)
* Right now, only self attention is supported so length_q and length_kv
should be identical and the function will add triangular mask.
* If bi.order is not None, The bias is added inside this function to
prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k]
k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k]
v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v]
bi (BatchInfo): Contains the batch coordinates and sequence order
experts_params (dict): Additional params for the local expert
use_map_fn (bool): Use either tf.map_fn of python for loop to compute the
heads separately
mask_right (bool):
Returns:
tf.Tensor: Approximation of Softmax(Q.K) * V, of shape
[batch, heads, length_q, depth_v]
|
[
"Sparse",
"multihead",
"self",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5009-L5112
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
deconv_elems_1d
|
def deconv_elems_1d(x, factor, out_depth=None):
"""Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
out_depth (int): Output depth (if None, keep depth constant)
Returns:
tf.Tensor: shape [batch_size, length*factor, out_depth]
"""
out_depth = out_depth or x.get_shape().as_list()[-1]
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2DTranspose(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length*factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length*factor, depth]
return x
|
python
|
def deconv_elems_1d(x, factor, out_depth=None):
"""Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
out_depth (int): Output depth (if None, keep depth constant)
Returns:
tf.Tensor: shape [batch_size, length*factor, out_depth]
"""
out_depth = out_depth or x.get_shape().as_list()[-1]
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2DTranspose(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length*factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length*factor, depth]
return x
|
[
"def",
"deconv_elems_1d",
"(",
"x",
",",
"factor",
",",
"out_depth",
"=",
"None",
")",
":",
"out_depth",
"=",
"out_depth",
"or",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"x",
"=",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"1",
")",
"# [batch_size, 1, length, depth]",
"x",
"=",
"layers",
"(",
")",
".",
"Conv2DTranspose",
"(",
"filters",
"=",
"out_depth",
",",
"kernel_size",
"=",
"(",
"1",
",",
"factor",
")",
",",
"strides",
"=",
"(",
"1",
",",
"factor",
")",
",",
"padding",
"=",
"\"valid\"",
",",
"data_format",
"=",
"\"channels_last\"",
",",
")",
"(",
"x",
")",
"# [batch_size, 1, length*factor, out_depth]",
"x",
"=",
"tf",
".",
"squeeze",
"(",
"x",
",",
"1",
")",
"# [batch_size, length*factor, depth]",
"return",
"x"
] |
Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
out_depth (int): Output depth (if None, keep depth constant)
Returns:
tf.Tensor: shape [batch_size, length*factor, out_depth]
|
[
"Increase",
"the",
"length",
"and",
"change",
"the",
"dimensionality",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5116-L5140
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
conv_elems_1d
|
def conv_elems_1d(x, factor, out_depth=None):
"""Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
"""
out_depth = out_depth or x.get_shape().as_list()[-1]
# with tf.control_dependencies( # Dynamic assertion
# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2D(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length//factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length//factor, depth]
return x
|
python
|
def conv_elems_1d(x, factor, out_depth=None):
"""Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
"""
out_depth = out_depth or x.get_shape().as_list()[-1]
# with tf.control_dependencies( # Dynamic assertion
# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2D(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length//factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length//factor, depth]
return x
|
[
"def",
"conv_elems_1d",
"(",
"x",
",",
"factor",
",",
"out_depth",
"=",
"None",
")",
":",
"out_depth",
"=",
"out_depth",
"or",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"# with tf.control_dependencies( # Dynamic assertion",
"# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):",
"x",
"=",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"1",
")",
"# [batch_size, 1, length, depth]",
"x",
"=",
"layers",
"(",
")",
".",
"Conv2D",
"(",
"filters",
"=",
"out_depth",
",",
"kernel_size",
"=",
"(",
"1",
",",
"factor",
")",
",",
"strides",
"=",
"(",
"1",
",",
"factor",
")",
",",
"padding",
"=",
"\"valid\"",
",",
"data_format",
"=",
"\"channels_last\"",
",",
")",
"(",
"x",
")",
"# [batch_size, 1, length//factor, out_depth]",
"x",
"=",
"tf",
".",
"squeeze",
"(",
"x",
",",
"1",
")",
"# [batch_size, length//factor, depth]",
"return",
"x"
] |
Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
|
[
"Decrease",
"the",
"length",
"and",
"change",
"the",
"dimensionality",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5144-L5172
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
local_reduction_attention
|
def local_reduction_attention(x, block_length, multihead_params):
"""Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
"""
@expert_utils.add_name_scope()
def dot_product_self_local_attention_flattened(q, k, v):
"""Strided block local self-attention.
No overlap between the blocks.
Args:
q (tf.Tensor): shape [batch, heads, length, depth_k]
k (tf.Tensor): shape [batch, heads, length, depth_k]
v (tf.Tensor): shape [batch, heads, length, depth_v]
Returns:
tf.Tensor: shape [batch, heads, length, depth_v]
"""
_, num_head, _, depth = q.get_shape().as_list()
# Extract the blocks
def pad_and_reshape(x):
"""Split the length dim into [num_block, block_length]."""
length_x = common_layers.shape_list(x)[2]
# Add some padding, but won't matter as the last block will never be
# attended by the query (after compression)
x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]])
x = tf.reshape(
x,
[
common_layers.shape_list(x)[0], # Batch
num_head, # Head
common_layers.shape_list(x)[2] // block_length, # Num blocks
block_length, # Block length
depth, # Depth
])
return x
q, k, v = [pad_and_reshape(t) for t in (q, k, v)]
# Perform attention on the flattened dot product
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.reshape(
logits,
[
common_layers.shape_list(logits)[0], # Batch
num_head, # Head
common_layers.shape_list(logits)[2], # Num blocks
block_length**2, # Flatten last dimension
])
weights = tf.nn.softmax(logits)
weights = tf.reshape(
weights,
[
common_layers.shape_list(weights)[0], # Batch
num_head, # Head
common_layers.shape_list(weights)[2], # Num blocks
block_length,
block_length, # Restore the block length dimension
])
weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block
v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth]
v_out = tf.squeeze(v_out, axis=3)
return v_out
return multihead_attention(
x,
None,
bias=None,
output_depth=x.get_shape().as_list()[-1],
attention_type=dot_product_self_local_attention_flattened,
**multihead_params)
|
python
|
def local_reduction_attention(x, block_length, multihead_params):
"""Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
"""
@expert_utils.add_name_scope()
def dot_product_self_local_attention_flattened(q, k, v):
"""Strided block local self-attention.
No overlap between the blocks.
Args:
q (tf.Tensor): shape [batch, heads, length, depth_k]
k (tf.Tensor): shape [batch, heads, length, depth_k]
v (tf.Tensor): shape [batch, heads, length, depth_v]
Returns:
tf.Tensor: shape [batch, heads, length, depth_v]
"""
_, num_head, _, depth = q.get_shape().as_list()
# Extract the blocks
def pad_and_reshape(x):
"""Split the length dim into [num_block, block_length]."""
length_x = common_layers.shape_list(x)[2]
# Add some padding, but won't matter as the last block will never be
# attended by the query (after compression)
x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]])
x = tf.reshape(
x,
[
common_layers.shape_list(x)[0], # Batch
num_head, # Head
common_layers.shape_list(x)[2] // block_length, # Num blocks
block_length, # Block length
depth, # Depth
])
return x
q, k, v = [pad_and_reshape(t) for t in (q, k, v)]
# Perform attention on the flattened dot product
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.reshape(
logits,
[
common_layers.shape_list(logits)[0], # Batch
num_head, # Head
common_layers.shape_list(logits)[2], # Num blocks
block_length**2, # Flatten last dimension
])
weights = tf.nn.softmax(logits)
weights = tf.reshape(
weights,
[
common_layers.shape_list(weights)[0], # Batch
num_head, # Head
common_layers.shape_list(weights)[2], # Num blocks
block_length,
block_length, # Restore the block length dimension
])
weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block
v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth]
v_out = tf.squeeze(v_out, axis=3)
return v_out
return multihead_attention(
x,
None,
bias=None,
output_depth=x.get_shape().as_list()[-1],
attention_type=dot_product_self_local_attention_flattened,
**multihead_params)
|
[
"def",
"local_reduction_attention",
"(",
"x",
",",
"block_length",
",",
"multihead_params",
")",
":",
"@",
"expert_utils",
".",
"add_name_scope",
"(",
")",
"def",
"dot_product_self_local_attention_flattened",
"(",
"q",
",",
"k",
",",
"v",
")",
":",
"\"\"\"Strided block local self-attention.\n\n No overlap between the blocks.\n\n Args:\n q (tf.Tensor): shape [batch, heads, length, depth_k]\n k (tf.Tensor): shape [batch, heads, length, depth_k]\n v (tf.Tensor): shape [batch, heads, length, depth_v]\n\n Returns:\n tf.Tensor: shape [batch, heads, length, depth_v]\n \"\"\"",
"_",
",",
"num_head",
",",
"_",
",",
"depth",
"=",
"q",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"# Extract the blocks",
"def",
"pad_and_reshape",
"(",
"x",
")",
":",
"\"\"\"Split the length dim into [num_block, block_length].\"\"\"",
"length_x",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"2",
"]",
"# Add some padding, but won't matter as the last block will never be",
"# attended by the query (after compression)",
"x",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"-",
"length_x",
"%",
"block_length",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"0",
"]",
",",
"# Batch",
"num_head",
",",
"# Head",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"2",
"]",
"//",
"block_length",
",",
"# Num blocks",
"block_length",
",",
"# Block length",
"depth",
",",
"# Depth",
"]",
")",
"return",
"x",
"q",
",",
"k",
",",
"v",
"=",
"[",
"pad_and_reshape",
"(",
"t",
")",
"for",
"t",
"in",
"(",
"q",
",",
"k",
",",
"v",
")",
"]",
"# Perform attention on the flattened dot product",
"logits",
"=",
"tf",
".",
"matmul",
"(",
"q",
",",
"k",
",",
"transpose_b",
"=",
"True",
")",
"logits",
"=",
"tf",
".",
"reshape",
"(",
"logits",
",",
"[",
"common_layers",
".",
"shape_list",
"(",
"logits",
")",
"[",
"0",
"]",
",",
"# Batch",
"num_head",
",",
"# Head",
"common_layers",
".",
"shape_list",
"(",
"logits",
")",
"[",
"2",
"]",
",",
"# Num blocks",
"block_length",
"**",
"2",
",",
"# Flatten last dimension",
"]",
")",
"weights",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
")",
"weights",
"=",
"tf",
".",
"reshape",
"(",
"weights",
",",
"[",
"common_layers",
".",
"shape_list",
"(",
"weights",
")",
"[",
"0",
"]",
",",
"# Batch",
"num_head",
",",
"# Head",
"common_layers",
".",
"shape_list",
"(",
"weights",
")",
"[",
"2",
"]",
",",
"# Num blocks",
"block_length",
",",
"block_length",
",",
"# Restore the block length dimension",
"]",
")",
"weights",
"=",
"tf",
".",
"reduce_sum",
"(",
"weights",
",",
"axis",
"=",
"3",
",",
"keep_dims",
"=",
"True",
")",
"# Compress block",
"v_out",
"=",
"tf",
".",
"matmul",
"(",
"weights",
",",
"v",
")",
"# [1, block_length] @ [block_length, depth]",
"v_out",
"=",
"tf",
".",
"squeeze",
"(",
"v_out",
",",
"axis",
"=",
"3",
")",
"return",
"v_out",
"return",
"multihead_attention",
"(",
"x",
",",
"None",
",",
"bias",
"=",
"None",
",",
"output_depth",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
",",
"attention_type",
"=",
"dot_product_self_local_attention_flattened",
",",
"*",
"*",
"multihead_params",
")"
] |
Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
|
[
"Reduce",
"the",
"length",
"dimension",
"using",
"self",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5176-L5255
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
multihead_self_attention_reduced
|
def multihead_self_attention_reduced(
x,
memory_antecedent=None,
bias=None,
factor=None,
multihead_params=None,
nonlinearity="none",
reduction_type="conv",
add_mask=True,
):
"""Reduce the length dimension by compressing with conv.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
memory_antecedent (tf.Tensor): Unsupported for now
bias (tf.Tensor): Ignored
factor (int): compression factor for the memory sequence
multihead_params (dict): parameters for multihead attention
nonlinearity (str): Add some non-linearity after the memory block
reduction_type (str): type of compression
add_mask (bool): If True, add the bias to prevent attention to the future
Returns:
(tf.Tensor): float32 of shape [batch, length, depth]
Raises:
ValueError: If reduction_type or nonlinearity is invalid
"""
if not factor or not multihead_params:
raise ValueError("factor and multihead_params should be set")
if memory_antecedent is not None:
raise NotImplementedError(
"multihead_self_attention_reduced only works with self-attention")
depth = x.get_shape().as_list()[-1]
# Could try to have some overlap between the blocks but that would
# create conv artifacts, would make it difficult to not attend to the future
# within one group and the padding should be handled specially.
# Reduce the memory dimension
if reduction_type == "attention":
memory_x = local_reduction_attention(x, factor, multihead_params)
elif reduction_type == "conv":
# With valid padding, the last block won't be computed (not attended anyway)
memory_x = conv_elems_1d(x, factor)
else:
raise ValueError("Unknown reduction type {}".format(reduction_type))
if nonlinearity == "silu":
memory_x *= tf.nn.sigmoid(memory_x)
elif nonlinearity != "none":
raise ValueError("Unknown non linearity {}".format(nonlinearity))
memory_x = tf.concat(
# Add the first elem to make it attendable by everyone (otherwise the
# first block cannot attend to anything)
[x[:, :1, :], memory_x],
axis=1,
)
# Construct the bias
@expert_utils.add_name_scope()
def construct_bias_vectors(t, axis):
length = tf.to_float(common_layers.shape_list(t)[1])
length_coordinates = tf.range(length, dtype=tf.float32)
length_coordinates = tf.expand_dims(length_coordinates, axis=axis)
# [1, length_k] or [length_q, 1]
return length_coordinates
if add_mask: # Create mask to prevent attention to the future
bias = tf.to_float(
tf.greater(
# Because we add the first elem to the memory block and it can be
# attended by anyone,we don't need to add +1 anymore to prevent self
# attention Use * factor to make sure the last tokens of a block
# cannot attend the block
construct_bias_vectors(memory_x, 0) * factor,
# +epsilon to avoid float equality
construct_bias_vectors(x, 1) + 1e-3,
)) * -1e9
bias = tf.expand_dims(bias, axis=0)
bias = tf.expand_dims(bias, axis=0) # [1, 1, length_k, length_q]
else:
bias = None
return multihead_attention(
query_antecedent=x,
memory_antecedent=memory_x,
bias=bias,
output_depth=depth,
**multihead_params)
|
python
|
def multihead_self_attention_reduced(
x,
memory_antecedent=None,
bias=None,
factor=None,
multihead_params=None,
nonlinearity="none",
reduction_type="conv",
add_mask=True,
):
"""Reduce the length dimension by compressing with conv.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
memory_antecedent (tf.Tensor): Unsupported for now
bias (tf.Tensor): Ignored
factor (int): compression factor for the memory sequence
multihead_params (dict): parameters for multihead attention
nonlinearity (str): Add some non-linearity after the memory block
reduction_type (str): type of compression
add_mask (bool): If True, add the bias to prevent attention to the future
Returns:
(tf.Tensor): float32 of shape [batch, length, depth]
Raises:
ValueError: If reduction_type or nonlinearity is invalid
"""
if not factor or not multihead_params:
raise ValueError("factor and multihead_params should be set")
if memory_antecedent is not None:
raise NotImplementedError(
"multihead_self_attention_reduced only works with self-attention")
depth = x.get_shape().as_list()[-1]
# Could try to have some overlap between the blocks but that would
# create conv artifacts, would make it difficult to not attend to the future
# within one group and the padding should be handled specially.
# Reduce the memory dimension
if reduction_type == "attention":
memory_x = local_reduction_attention(x, factor, multihead_params)
elif reduction_type == "conv":
# With valid padding, the last block won't be computed (not attended anyway)
memory_x = conv_elems_1d(x, factor)
else:
raise ValueError("Unknown reduction type {}".format(reduction_type))
if nonlinearity == "silu":
memory_x *= tf.nn.sigmoid(memory_x)
elif nonlinearity != "none":
raise ValueError("Unknown non linearity {}".format(nonlinearity))
memory_x = tf.concat(
# Add the first elem to make it attendable by everyone (otherwise the
# first block cannot attend to anything)
[x[:, :1, :], memory_x],
axis=1,
)
# Construct the bias
@expert_utils.add_name_scope()
def construct_bias_vectors(t, axis):
length = tf.to_float(common_layers.shape_list(t)[1])
length_coordinates = tf.range(length, dtype=tf.float32)
length_coordinates = tf.expand_dims(length_coordinates, axis=axis)
# [1, length_k] or [length_q, 1]
return length_coordinates
if add_mask: # Create mask to prevent attention to the future
bias = tf.to_float(
tf.greater(
# Because we add the first elem to the memory block and it can be
# attended by anyone,we don't need to add +1 anymore to prevent self
# attention Use * factor to make sure the last tokens of a block
# cannot attend the block
construct_bias_vectors(memory_x, 0) * factor,
# +epsilon to avoid float equality
construct_bias_vectors(x, 1) + 1e-3,
)) * -1e9
bias = tf.expand_dims(bias, axis=0)
bias = tf.expand_dims(bias, axis=0) # [1, 1, length_k, length_q]
else:
bias = None
return multihead_attention(
query_antecedent=x,
memory_antecedent=memory_x,
bias=bias,
output_depth=depth,
**multihead_params)
|
[
"def",
"multihead_self_attention_reduced",
"(",
"x",
",",
"memory_antecedent",
"=",
"None",
",",
"bias",
"=",
"None",
",",
"factor",
"=",
"None",
",",
"multihead_params",
"=",
"None",
",",
"nonlinearity",
"=",
"\"none\"",
",",
"reduction_type",
"=",
"\"conv\"",
",",
"add_mask",
"=",
"True",
",",
")",
":",
"if",
"not",
"factor",
"or",
"not",
"multihead_params",
":",
"raise",
"ValueError",
"(",
"\"factor and multihead_params should be set\"",
")",
"if",
"memory_antecedent",
"is",
"not",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"multihead_self_attention_reduced only works with self-attention\"",
")",
"depth",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"# Could try to have some overlap between the blocks but that would",
"# create conv artifacts, would make it difficult to not attend to the future",
"# within one group and the padding should be handled specially.",
"# Reduce the memory dimension",
"if",
"reduction_type",
"==",
"\"attention\"",
":",
"memory_x",
"=",
"local_reduction_attention",
"(",
"x",
",",
"factor",
",",
"multihead_params",
")",
"elif",
"reduction_type",
"==",
"\"conv\"",
":",
"# With valid padding, the last block won't be computed (not attended anyway)",
"memory_x",
"=",
"conv_elems_1d",
"(",
"x",
",",
"factor",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown reduction type {}\"",
".",
"format",
"(",
"reduction_type",
")",
")",
"if",
"nonlinearity",
"==",
"\"silu\"",
":",
"memory_x",
"*=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"memory_x",
")",
"elif",
"nonlinearity",
"!=",
"\"none\"",
":",
"raise",
"ValueError",
"(",
"\"Unknown non linearity {}\"",
".",
"format",
"(",
"nonlinearity",
")",
")",
"memory_x",
"=",
"tf",
".",
"concat",
"(",
"# Add the first elem to make it attendable by everyone (otherwise the",
"# first block cannot attend to anything)",
"[",
"x",
"[",
":",
",",
":",
"1",
",",
":",
"]",
",",
"memory_x",
"]",
",",
"axis",
"=",
"1",
",",
")",
"# Construct the bias",
"@",
"expert_utils",
".",
"add_name_scope",
"(",
")",
"def",
"construct_bias_vectors",
"(",
"t",
",",
"axis",
")",
":",
"length",
"=",
"tf",
".",
"to_float",
"(",
"common_layers",
".",
"shape_list",
"(",
"t",
")",
"[",
"1",
"]",
")",
"length_coordinates",
"=",
"tf",
".",
"range",
"(",
"length",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"length_coordinates",
"=",
"tf",
".",
"expand_dims",
"(",
"length_coordinates",
",",
"axis",
"=",
"axis",
")",
"# [1, length_k] or [length_q, 1]",
"return",
"length_coordinates",
"if",
"add_mask",
":",
"# Create mask to prevent attention to the future",
"bias",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"greater",
"(",
"# Because we add the first elem to the memory block and it can be",
"# attended by anyone,we don't need to add +1 anymore to prevent self",
"# attention Use * factor to make sure the last tokens of a block",
"# cannot attend the block",
"construct_bias_vectors",
"(",
"memory_x",
",",
"0",
")",
"*",
"factor",
",",
"# +epsilon to avoid float equality",
"construct_bias_vectors",
"(",
"x",
",",
"1",
")",
"+",
"1e-3",
",",
")",
")",
"*",
"-",
"1e9",
"bias",
"=",
"tf",
".",
"expand_dims",
"(",
"bias",
",",
"axis",
"=",
"0",
")",
"bias",
"=",
"tf",
".",
"expand_dims",
"(",
"bias",
",",
"axis",
"=",
"0",
")",
"# [1, 1, length_k, length_q]",
"else",
":",
"bias",
"=",
"None",
"return",
"multihead_attention",
"(",
"query_antecedent",
"=",
"x",
",",
"memory_antecedent",
"=",
"memory_x",
",",
"bias",
"=",
"bias",
",",
"output_depth",
"=",
"depth",
",",
"*",
"*",
"multihead_params",
")"
] |
Reduce the length dimension by compressing with conv.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
memory_antecedent (tf.Tensor): Unsupported for now
bias (tf.Tensor): Ignored
factor (int): compression factor for the memory sequence
multihead_params (dict): parameters for multihead attention
nonlinearity (str): Add some non-linearity after the memory block
reduction_type (str): type of compression
add_mask (bool): If True, add the bias to prevent attention to the future
Returns:
(tf.Tensor): float32 of shape [batch, length, depth]
Raises:
ValueError: If reduction_type or nonlinearity is invalid
|
[
"Reduce",
"the",
"length",
"dimension",
"by",
"compressing",
"with",
"conv",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5259-L5350
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
scaled_dot_product_attention_simple
|
def scaled_dot_product_attention_simple(q, k, v, bias, name=None):
"""Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: optional Tensor broadcastable to [batch, length_q, length_kv]
name: an optional string
Returns:
A Tensor.
"""
with tf.variable_scope(
name, default_name="scaled_dot_product_attention_simple"):
scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2]))
logits = tf.matmul(q * scalar, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if common_layers.should_generate_summaries():
tf.summary.image(
"attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1)
return tf.matmul(weights, v)
|
python
|
def scaled_dot_product_attention_simple(q, k, v, bias, name=None):
"""Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: optional Tensor broadcastable to [batch, length_q, length_kv]
name: an optional string
Returns:
A Tensor.
"""
with tf.variable_scope(
name, default_name="scaled_dot_product_attention_simple"):
scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2]))
logits = tf.matmul(q * scalar, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if common_layers.should_generate_summaries():
tf.summary.image(
"attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1)
return tf.matmul(weights, v)
|
[
"def",
"scaled_dot_product_attention_simple",
"(",
"q",
",",
"k",
",",
"v",
",",
"bias",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"scaled_dot_product_attention_simple\"",
")",
":",
"scalar",
"=",
"tf",
".",
"rsqrt",
"(",
"tf",
".",
"to_float",
"(",
"common_layers",
".",
"shape_list",
"(",
"q",
")",
"[",
"2",
"]",
")",
")",
"logits",
"=",
"tf",
".",
"matmul",
"(",
"q",
"*",
"scalar",
",",
"k",
",",
"transpose_b",
"=",
"True",
")",
"if",
"bias",
"is",
"not",
"None",
":",
"logits",
"+=",
"bias",
"weights",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
",",
"name",
"=",
"\"attention_weights\"",
")",
"if",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
":",
"tf",
".",
"summary",
".",
"image",
"(",
"\"attention\"",
",",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"pow",
"(",
"weights",
",",
"0.2",
")",
",",
"3",
")",
",",
"max_outputs",
"=",
"1",
")",
"return",
"tf",
".",
"matmul",
"(",
"weights",
",",
"v",
")"
] |
Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: optional Tensor broadcastable to [batch, length_q, length_kv]
name: an optional string
Returns:
A Tensor.
|
[
"Scaled",
"dot",
"-",
"product",
"attention",
".",
"One",
"head",
".",
"One",
"spatial",
"dimension",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5353-L5376
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
multihead_self_attention_memory_efficient
|
def multihead_self_attention_memory_efficient(x,
bias,
num_heads,
head_size=None,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""Multihead scaled-dot-product self-attention.
Includes layer norm.
Returns multihead-self-attention(layer_norm(x))
Computes one attention head at a time to avoid exhausting memory.
If forget=True, then forget all forwards activations and recompute on
the backwards pass.
Args:
x: a Tensor with shape [batch, length, input_size]
bias: an attention bias tensor broadcastable to [batch, 1, length, length]
num_heads: an integer
head_size: an optional integer - defaults to input_size/num_heads
epsilon: a float, for layer norm
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
A Tensor.
"""
io_size = x.get_shape().as_list()[-1]
if head_size is None:
assert io_size % num_heads == 0
head_size = io_size / num_heads
def forward_internal(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
"""Forward function."""
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
y = 0
for h in range(num_heads):
with tf.control_dependencies([y] if h > 0 else []):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME")
q, k, v = tf.split(combined, 3, axis=2)
o = scaled_dot_product_attention_simple(q, k, v, attention_bias)
y += tf.nn.conv1d(o, wo_split[h], 1, "SAME")
return y
key = (
"multihead_self_attention_memory_efficient %s %s" % (num_heads, epsilon))
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias, dy):
"""Custom gradient function."""
with tf.control_dependencies([dy]):
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
deps = []
dwqkvs = []
dwos = []
dn = 0
for h in range(num_heads):
with tf.control_dependencies(deps):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME")
q, k, v = tf.split(combined, 3, axis=2)
o = scaled_dot_product_attention_simple(q, k, v, attention_bias)
partial_y = tf.nn.conv1d(o, wo_split[h], 1, "SAME")
pdn, dwqkvh, dwoh = tf.gradients(
ys=[partial_y],
xs=[n, wqkv_split[h], wo_split[h]],
grad_ys=[dy])
dn += pdn
dwqkvs.append(dwqkvh)
dwos.append(dwoh)
deps = [dn, dwqkvh, dwoh]
dwqkv = tf.stack(dwqkvs)
dwo = tf.stack(dwos)
with tf.control_dependencies(deps):
dx, dnorm_scale, dnorm_bias = tf.gradients(
ys=[n], xs=[x, norm_scale, norm_bias], grad_ys=[dn])
return (dx, dwqkv, dwo, tf.zeros_like(attention_bias), dnorm_scale,
dnorm_bias)
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
return forward_internal(x, wqkv, wo, attention_bias, norm_scale,
norm_bias)
_function_cache[key] = forward_fn
if bias is not None:
bias = tf.squeeze(bias, 1)
with tf.variable_scope(name, default_name="multihead_attention", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
wqkv, wo, norm_scale, norm_bias = list(test_vars)
else:
wqkv = tf.get_variable(
"wqkv", [num_heads, 1, io_size, 3 * head_size],
initializer=tf.random_normal_initializer(stddev=io_size**-0.5))
wo = tf.get_variable(
"wo", [num_heads, 1, head_size, io_size],
initializer=tf.random_normal_initializer(
stddev=(head_size * num_heads)**-0.5))
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = forward_fn(x, wqkv, wo, bias, norm_scale, norm_bias)
y.set_shape(x.get_shape())
return y
|
python
|
def multihead_self_attention_memory_efficient(x,
bias,
num_heads,
head_size=None,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""Multihead scaled-dot-product self-attention.
Includes layer norm.
Returns multihead-self-attention(layer_norm(x))
Computes one attention head at a time to avoid exhausting memory.
If forget=True, then forget all forwards activations and recompute on
the backwards pass.
Args:
x: a Tensor with shape [batch, length, input_size]
bias: an attention bias tensor broadcastable to [batch, 1, length, length]
num_heads: an integer
head_size: an optional integer - defaults to input_size/num_heads
epsilon: a float, for layer norm
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
A Tensor.
"""
io_size = x.get_shape().as_list()[-1]
if head_size is None:
assert io_size % num_heads == 0
head_size = io_size / num_heads
def forward_internal(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
"""Forward function."""
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
y = 0
for h in range(num_heads):
with tf.control_dependencies([y] if h > 0 else []):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME")
q, k, v = tf.split(combined, 3, axis=2)
o = scaled_dot_product_attention_simple(q, k, v, attention_bias)
y += tf.nn.conv1d(o, wo_split[h], 1, "SAME")
return y
key = (
"multihead_self_attention_memory_efficient %s %s" % (num_heads, epsilon))
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias, dy):
"""Custom gradient function."""
with tf.control_dependencies([dy]):
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
deps = []
dwqkvs = []
dwos = []
dn = 0
for h in range(num_heads):
with tf.control_dependencies(deps):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME")
q, k, v = tf.split(combined, 3, axis=2)
o = scaled_dot_product_attention_simple(q, k, v, attention_bias)
partial_y = tf.nn.conv1d(o, wo_split[h], 1, "SAME")
pdn, dwqkvh, dwoh = tf.gradients(
ys=[partial_y],
xs=[n, wqkv_split[h], wo_split[h]],
grad_ys=[dy])
dn += pdn
dwqkvs.append(dwqkvh)
dwos.append(dwoh)
deps = [dn, dwqkvh, dwoh]
dwqkv = tf.stack(dwqkvs)
dwo = tf.stack(dwos)
with tf.control_dependencies(deps):
dx, dnorm_scale, dnorm_bias = tf.gradients(
ys=[n], xs=[x, norm_scale, norm_bias], grad_ys=[dn])
return (dx, dwqkv, dwo, tf.zeros_like(attention_bias), dnorm_scale,
dnorm_bias)
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
return forward_internal(x, wqkv, wo, attention_bias, norm_scale,
norm_bias)
_function_cache[key] = forward_fn
if bias is not None:
bias = tf.squeeze(bias, 1)
with tf.variable_scope(name, default_name="multihead_attention", values=[x]):
# TODO(noam): it would be nice to save memory by casting x to float16
# here, but this causes problems with the gradients. Figure out if there
# is a way to leave the gradients as float32.
if test_vars is not None:
wqkv, wo, norm_scale, norm_bias = list(test_vars)
else:
wqkv = tf.get_variable(
"wqkv", [num_heads, 1, io_size, 3 * head_size],
initializer=tf.random_normal_initializer(stddev=io_size**-0.5))
wo = tf.get_variable(
"wo", [num_heads, 1, head_size, io_size],
initializer=tf.random_normal_initializer(
stddev=(head_size * num_heads)**-0.5))
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = forward_fn(x, wqkv, wo, bias, norm_scale, norm_bias)
y.set_shape(x.get_shape())
return y
|
[
"def",
"multihead_self_attention_memory_efficient",
"(",
"x",
",",
"bias",
",",
"num_heads",
",",
"head_size",
"=",
"None",
",",
"epsilon",
"=",
"1e-6",
",",
"forget",
"=",
"True",
",",
"test_vars",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"io_size",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"if",
"head_size",
"is",
"None",
":",
"assert",
"io_size",
"%",
"num_heads",
"==",
"0",
"head_size",
"=",
"io_size",
"/",
"num_heads",
"def",
"forward_internal",
"(",
"x",
",",
"wqkv",
",",
"wo",
",",
"attention_bias",
",",
"norm_scale",
",",
"norm_bias",
")",
":",
"\"\"\"Forward function.\"\"\"",
"n",
"=",
"common_layers",
".",
"layer_norm_compute",
"(",
"x",
",",
"epsilon",
",",
"norm_scale",
",",
"norm_bias",
")",
"wqkv_split",
"=",
"tf",
".",
"unstack",
"(",
"wqkv",
",",
"num",
"=",
"num_heads",
")",
"wo_split",
"=",
"tf",
".",
"unstack",
"(",
"wo",
",",
"num",
"=",
"num_heads",
")",
"y",
"=",
"0",
"for",
"h",
"in",
"range",
"(",
"num_heads",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"y",
"]",
"if",
"h",
">",
"0",
"else",
"[",
"]",
")",
":",
"combined",
"=",
"tf",
".",
"nn",
".",
"conv1d",
"(",
"n",
",",
"wqkv_split",
"[",
"h",
"]",
",",
"1",
",",
"\"SAME\"",
")",
"q",
",",
"k",
",",
"v",
"=",
"tf",
".",
"split",
"(",
"combined",
",",
"3",
",",
"axis",
"=",
"2",
")",
"o",
"=",
"scaled_dot_product_attention_simple",
"(",
"q",
",",
"k",
",",
"v",
",",
"attention_bias",
")",
"y",
"+=",
"tf",
".",
"nn",
".",
"conv1d",
"(",
"o",
",",
"wo_split",
"[",
"h",
"]",
",",
"1",
",",
"\"SAME\"",
")",
"return",
"y",
"key",
"=",
"(",
"\"multihead_self_attention_memory_efficient %s %s\"",
"%",
"(",
"num_heads",
",",
"epsilon",
")",
")",
"if",
"not",
"forget",
":",
"forward_fn",
"=",
"forward_internal",
"elif",
"key",
"in",
"_function_cache",
":",
"forward_fn",
"=",
"_function_cache",
"[",
"key",
"]",
"else",
":",
"@",
"function",
".",
"Defun",
"(",
"compiled",
"=",
"True",
")",
"def",
"grad_fn",
"(",
"x",
",",
"wqkv",
",",
"wo",
",",
"attention_bias",
",",
"norm_scale",
",",
"norm_bias",
",",
"dy",
")",
":",
"\"\"\"Custom gradient function.\"\"\"",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"dy",
"]",
")",
":",
"n",
"=",
"common_layers",
".",
"layer_norm_compute",
"(",
"x",
",",
"epsilon",
",",
"norm_scale",
",",
"norm_bias",
")",
"wqkv_split",
"=",
"tf",
".",
"unstack",
"(",
"wqkv",
",",
"num",
"=",
"num_heads",
")",
"wo_split",
"=",
"tf",
".",
"unstack",
"(",
"wo",
",",
"num",
"=",
"num_heads",
")",
"deps",
"=",
"[",
"]",
"dwqkvs",
"=",
"[",
"]",
"dwos",
"=",
"[",
"]",
"dn",
"=",
"0",
"for",
"h",
"in",
"range",
"(",
"num_heads",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"deps",
")",
":",
"combined",
"=",
"tf",
".",
"nn",
".",
"conv1d",
"(",
"n",
",",
"wqkv_split",
"[",
"h",
"]",
",",
"1",
",",
"\"SAME\"",
")",
"q",
",",
"k",
",",
"v",
"=",
"tf",
".",
"split",
"(",
"combined",
",",
"3",
",",
"axis",
"=",
"2",
")",
"o",
"=",
"scaled_dot_product_attention_simple",
"(",
"q",
",",
"k",
",",
"v",
",",
"attention_bias",
")",
"partial_y",
"=",
"tf",
".",
"nn",
".",
"conv1d",
"(",
"o",
",",
"wo_split",
"[",
"h",
"]",
",",
"1",
",",
"\"SAME\"",
")",
"pdn",
",",
"dwqkvh",
",",
"dwoh",
"=",
"tf",
".",
"gradients",
"(",
"ys",
"=",
"[",
"partial_y",
"]",
",",
"xs",
"=",
"[",
"n",
",",
"wqkv_split",
"[",
"h",
"]",
",",
"wo_split",
"[",
"h",
"]",
"]",
",",
"grad_ys",
"=",
"[",
"dy",
"]",
")",
"dn",
"+=",
"pdn",
"dwqkvs",
".",
"append",
"(",
"dwqkvh",
")",
"dwos",
".",
"append",
"(",
"dwoh",
")",
"deps",
"=",
"[",
"dn",
",",
"dwqkvh",
",",
"dwoh",
"]",
"dwqkv",
"=",
"tf",
".",
"stack",
"(",
"dwqkvs",
")",
"dwo",
"=",
"tf",
".",
"stack",
"(",
"dwos",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"deps",
")",
":",
"dx",
",",
"dnorm_scale",
",",
"dnorm_bias",
"=",
"tf",
".",
"gradients",
"(",
"ys",
"=",
"[",
"n",
"]",
",",
"xs",
"=",
"[",
"x",
",",
"norm_scale",
",",
"norm_bias",
"]",
",",
"grad_ys",
"=",
"[",
"dn",
"]",
")",
"return",
"(",
"dx",
",",
"dwqkv",
",",
"dwo",
",",
"tf",
".",
"zeros_like",
"(",
"attention_bias",
")",
",",
"dnorm_scale",
",",
"dnorm_bias",
")",
"@",
"function",
".",
"Defun",
"(",
"grad_func",
"=",
"grad_fn",
",",
"compiled",
"=",
"True",
",",
"separate_compiled_gradients",
"=",
"True",
")",
"def",
"forward_fn",
"(",
"x",
",",
"wqkv",
",",
"wo",
",",
"attention_bias",
",",
"norm_scale",
",",
"norm_bias",
")",
":",
"return",
"forward_internal",
"(",
"x",
",",
"wqkv",
",",
"wo",
",",
"attention_bias",
",",
"norm_scale",
",",
"norm_bias",
")",
"_function_cache",
"[",
"key",
"]",
"=",
"forward_fn",
"if",
"bias",
"is",
"not",
"None",
":",
"bias",
"=",
"tf",
".",
"squeeze",
"(",
"bias",
",",
"1",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"multihead_attention\"",
",",
"values",
"=",
"[",
"x",
"]",
")",
":",
"# TODO(noam): it would be nice to save memory by casting x to float16",
"# here, but this causes problems with the gradients. Figure out if there",
"# is a way to leave the gradients as float32.",
"if",
"test_vars",
"is",
"not",
"None",
":",
"wqkv",
",",
"wo",
",",
"norm_scale",
",",
"norm_bias",
"=",
"list",
"(",
"test_vars",
")",
"else",
":",
"wqkv",
"=",
"tf",
".",
"get_variable",
"(",
"\"wqkv\"",
",",
"[",
"num_heads",
",",
"1",
",",
"io_size",
",",
"3",
"*",
"head_size",
"]",
",",
"initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"stddev",
"=",
"io_size",
"**",
"-",
"0.5",
")",
")",
"wo",
"=",
"tf",
".",
"get_variable",
"(",
"\"wo\"",
",",
"[",
"num_heads",
",",
"1",
",",
"head_size",
",",
"io_size",
"]",
",",
"initializer",
"=",
"tf",
".",
"random_normal_initializer",
"(",
"stddev",
"=",
"(",
"head_size",
"*",
"num_heads",
")",
"**",
"-",
"0.5",
")",
")",
"norm_scale",
",",
"norm_bias",
"=",
"common_layers",
".",
"layer_norm_vars",
"(",
"io_size",
")",
"y",
"=",
"forward_fn",
"(",
"x",
",",
"wqkv",
",",
"wo",
",",
"bias",
",",
"norm_scale",
",",
"norm_bias",
")",
"y",
".",
"set_shape",
"(",
"x",
".",
"get_shape",
"(",
")",
")",
"return",
"y"
] |
Multihead scaled-dot-product self-attention.
Includes layer norm.
Returns multihead-self-attention(layer_norm(x))
Computes one attention head at a time to avoid exhausting memory.
If forget=True, then forget all forwards activations and recompute on
the backwards pass.
Args:
x: a Tensor with shape [batch, length, input_size]
bias: an attention bias tensor broadcastable to [batch, 1, length, length]
num_heads: an integer
head_size: an optional integer - defaults to input_size/num_heads
epsilon: a float, for layer norm
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
A Tensor.
|
[
"Multihead",
"scaled",
"-",
"dot",
"-",
"product",
"self",
"-",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5382-L5501
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
LshGating._idx_to_bits
|
def _idx_to_bits(self, i):
"""Convert an group index to its bit representation."""
bits = bin(i)[2:].zfill(self.nb_hyperplanes) # Pad the bits str with 0
return [-1.0 if b == "0" else 1.0 for b in bits]
|
python
|
def _idx_to_bits(self, i):
"""Convert an group index to its bit representation."""
bits = bin(i)[2:].zfill(self.nb_hyperplanes) # Pad the bits str with 0
return [-1.0 if b == "0" else 1.0 for b in bits]
|
[
"def",
"_idx_to_bits",
"(",
"self",
",",
"i",
")",
":",
"bits",
"=",
"bin",
"(",
"i",
")",
"[",
"2",
":",
"]",
".",
"zfill",
"(",
"self",
".",
"nb_hyperplanes",
")",
"# Pad the bits str with 0",
"return",
"[",
"-",
"1.0",
"if",
"b",
"==",
"\"0\"",
"else",
"1.0",
"for",
"b",
"in",
"bits",
"]"
] |
Convert an group index to its bit representation.
|
[
"Convert",
"an",
"group",
"index",
"to",
"its",
"bit",
"representation",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L803-L806
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_attention.py
|
LshGating.get_gates
|
def get_gates(self, x):
"""Return the bucket id of the given tensor.
Args:
x (tf.Tensor): float32 of shape [length, depth]
Returns:
tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]
containing the id of the bucket
"""
# The balance loss don't propagate to the rest of the network
x = tf.stop_gradient(x)
# [length, depth] * [depth, nb_vectors * replicat]
x = tf.matmul(x, self.t_vectors)
# [length, nb_vector * replicat]
x = tf.sign(x) # Get on which side of the hyperplane the keys are.
# x = tf.reshape(x, [-1, nb_replicat, nb_vector])
# [length, replicat, nb_vector] * [nb_vector, 2^nb_vector - 1]
x = tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes
# We get a similarity score for each of the group between [-1, 1]
# [length, (replicat,) 2^nb_vector - 1]
# Do an argmax to get the most likely group for each replicat
x = tf.argmax(x, axis=-1)
# [length(, replicat)]
# One-hot for compatibility with the sparse dispatcher
x = tf.one_hot(x, self.nb_buckets)
# TODO(epot): Use a loss to force an even distribution
return x
|
python
|
def get_gates(self, x):
"""Return the bucket id of the given tensor.
Args:
x (tf.Tensor): float32 of shape [length, depth]
Returns:
tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]
containing the id of the bucket
"""
# The balance loss don't propagate to the rest of the network
x = tf.stop_gradient(x)
# [length, depth] * [depth, nb_vectors * replicat]
x = tf.matmul(x, self.t_vectors)
# [length, nb_vector * replicat]
x = tf.sign(x) # Get on which side of the hyperplane the keys are.
# x = tf.reshape(x, [-1, nb_replicat, nb_vector])
# [length, replicat, nb_vector] * [nb_vector, 2^nb_vector - 1]
x = tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes
# We get a similarity score for each of the group between [-1, 1]
# [length, (replicat,) 2^nb_vector - 1]
# Do an argmax to get the most likely group for each replicat
x = tf.argmax(x, axis=-1)
# [length(, replicat)]
# One-hot for compatibility with the sparse dispatcher
x = tf.one_hot(x, self.nb_buckets)
# TODO(epot): Use a loss to force an even distribution
return x
|
[
"def",
"get_gates",
"(",
"self",
",",
"x",
")",
":",
"# The balance loss don't propagate to the rest of the network",
"x",
"=",
"tf",
".",
"stop_gradient",
"(",
"x",
")",
"# [length, depth] * [depth, nb_vectors * replicat]",
"x",
"=",
"tf",
".",
"matmul",
"(",
"x",
",",
"self",
".",
"t_vectors",
")",
"# [length, nb_vector * replicat]",
"x",
"=",
"tf",
".",
"sign",
"(",
"x",
")",
"# Get on which side of the hyperplane the keys are.",
"# x = tf.reshape(x, [-1, nb_replicat, nb_vector])",
"# [length, replicat, nb_vector] * [nb_vector, 2^nb_vector - 1]",
"x",
"=",
"tf",
".",
"matmul",
"(",
"x",
",",
"self",
".",
"t_group",
",",
"transpose_b",
"=",
"True",
")",
"/",
"self",
".",
"nb_hyperplanes",
"# We get a similarity score for each of the group between [-1, 1]",
"# [length, (replicat,) 2^nb_vector - 1]",
"# Do an argmax to get the most likely group for each replicat",
"x",
"=",
"tf",
".",
"argmax",
"(",
"x",
",",
"axis",
"=",
"-",
"1",
")",
"# [length(, replicat)]",
"# One-hot for compatibility with the sparse dispatcher",
"x",
"=",
"tf",
".",
"one_hot",
"(",
"x",
",",
"self",
".",
"nb_buckets",
")",
"# TODO(epot): Use a loss to force an even distribution",
"return",
"x"
] |
Return the bucket id of the given tensor.
Args:
x (tf.Tensor): float32 of shape [length, depth]
Returns:
tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]
containing the id of the bucket
|
[
"Return",
"the",
"bucket",
"id",
"of",
"the",
"given",
"tensor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L809-L839
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
van_image_enc_2d
|
def van_image_enc_2d(x, first_depth, reuse=False, hparams=None):
"""The image encoder for the VAN.
Similar architecture as Ruben's paper
(http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf).
Args:
x: The image to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
hparams: The python hparams.
Returns:
The encoded image.
"""
with tf.variable_scope('van_image_enc', reuse=reuse):
enc_history = [x]
enc = tf.layers.conv2d(
x, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
enc = tf.nn.dropout(enc, hparams.van_keep_prob)
enc = tf.contrib.layers.layer_norm(enc)
enc_history.append(enc)
enc = tf.layers.conv2d(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
enc = tf.nn.dropout(enc, hparams.van_keep_prob)
enc = tf.contrib.layers.layer_norm(enc)
enc_history.append(enc)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
return enc, enc_history
|
python
|
def van_image_enc_2d(x, first_depth, reuse=False, hparams=None):
"""The image encoder for the VAN.
Similar architecture as Ruben's paper
(http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf).
Args:
x: The image to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
hparams: The python hparams.
Returns:
The encoded image.
"""
with tf.variable_scope('van_image_enc', reuse=reuse):
enc_history = [x]
enc = tf.layers.conv2d(
x, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
enc = tf.nn.dropout(enc, hparams.van_keep_prob)
enc = tf.contrib.layers.layer_norm(enc)
enc_history.append(enc)
enc = tf.layers.conv2d(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
enc = tf.nn.dropout(enc, hparams.van_keep_prob)
enc = tf.contrib.layers.layer_norm(enc)
enc_history.append(enc)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
return enc, enc_history
|
[
"def",
"van_image_enc_2d",
"(",
"x",
",",
"first_depth",
",",
"reuse",
"=",
"False",
",",
"hparams",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'van_image_enc'",
",",
"reuse",
"=",
"reuse",
")",
":",
"enc_history",
"=",
"[",
"x",
"]",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"x",
",",
"first_depth",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"enc",
",",
"first_depth",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"nn",
".",
"max_pool",
"(",
"enc",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"'SAME'",
")",
"enc",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"enc",
",",
"hparams",
".",
"van_keep_prob",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc_history",
".",
"append",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"enc",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"enc",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"nn",
".",
"max_pool",
"(",
"enc",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"'SAME'",
")",
"enc",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"enc",
",",
"hparams",
".",
"van_keep_prob",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc_history",
".",
"append",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"enc",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"enc",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"enc",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"nn",
".",
"max_pool",
"(",
"enc",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"1",
"]",
",",
"'SAME'",
")",
"return",
"enc",
",",
"enc_history"
] |
The image encoder for the VAN.
Similar architecture as Ruben's paper
(http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf).
Args:
x: The image to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
hparams: The python hparams.
Returns:
The encoded image.
|
[
"The",
"image",
"encoder",
"for",
"the",
"VAN",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L53-L124
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
van_enc_2d
|
def van_enc_2d(x, first_depth, reuse=False):
"""The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
The encoded image.
"""
with tf.variable_scope('van_enc', reuse=reuse):
a = 4 # depends on the inputs size
b = 4
# a, b = 4,4
enc = tf.nn.relu(x)
enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.reshape(enc, [-1, a, b, first_depth])
enc = tf.layers.conv2d_transpose(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2])
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4])
van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1)
return enc, van_higher_level
|
python
|
def van_enc_2d(x, first_depth, reuse=False):
"""The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
The encoded image.
"""
with tf.variable_scope('van_enc', reuse=reuse):
a = 4 # depends on the inputs size
b = 4
# a, b = 4,4
enc = tf.nn.relu(x)
enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.reshape(enc, [-1, a, b, first_depth])
enc = tf.layers.conv2d_transpose(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2])
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4])
van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1)
return enc, van_higher_level
|
[
"def",
"van_enc_2d",
"(",
"x",
",",
"first_depth",
",",
"reuse",
"=",
"False",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'van_enc'",
",",
"reuse",
"=",
"reuse",
")",
":",
"a",
"=",
"4",
"# depends on the inputs size",
"b",
"=",
"4",
"# a, b = 4,4",
"enc",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"x",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"enc",
",",
"first_depth",
"*",
"a",
"*",
"b",
",",
"tf",
".",
"nn",
".",
"relu",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"[",
"-",
"1",
",",
"a",
",",
"b",
",",
"first_depth",
"]",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"2",
")",
"van_higher_level_2",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"[",
"-",
"1",
",",
"a",
"*",
"2",
"*",
"b",
"*",
"2",
"*",
"first_depth",
"*",
"2",
"]",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"enc",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"enc",
")",
"enc",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"enc",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"van_higher_level_4",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"[",
"-",
"1",
",",
"a",
"*",
"2",
"*",
"b",
"*",
"2",
"*",
"first_depth",
"*",
"4",
"]",
")",
"van_higher_level",
"=",
"tf",
".",
"concat",
"(",
"[",
"x",
",",
"van_higher_level_2",
",",
"van_higher_level_4",
"]",
",",
"1",
")",
"return",
"enc",
",",
"van_higher_level"
] |
The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
The encoded image.
|
[
"The",
"higher",
"level",
"structure",
"encoder",
"for",
"the",
"VAN",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L127-L182
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
van_dec_2d
|
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
"""The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
"""
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(
x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
output_shape[3] + 1,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(
dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])
out = out_mask[:, :, :, :3]
return out * mask + skip_connections[0] * (1 - mask)
|
python
|
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
"""The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
"""
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(
x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
output_shape[3] + 1,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(
dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])
out = out_mask[:, :, :, :3]
return out * mask + skip_connections[0] * (1 - mask)
|
[
"def",
"van_dec_2d",
"(",
"x",
",",
"skip_connections",
",",
"output_shape",
",",
"first_depth",
",",
"hparams",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'van_dec'",
")",
":",
"dec",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"x",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"2",
")",
"dec",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"dec",
",",
"hparams",
".",
"van_keep_prob",
")",
"dec",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"dec",
")",
"dec",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"dec",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"dec",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"dec",
",",
"hparams",
".",
"van_keep_prob",
")",
"dec",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"dec",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"dec",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"dec",
",",
"hparams",
".",
"van_keep_prob",
")",
"dec",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"dec",
")",
"dec",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"dec",
",",
"first_depth",
"*",
"2",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"2",
")",
"dec",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"dec",
",",
"hparams",
".",
"van_keep_prob",
")",
"dec",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"dec",
",",
"first_depth",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"dec",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"dec",
",",
"hparams",
".",
"van_keep_prob",
")",
"dec",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"dec",
")",
"dec",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"dec",
",",
"output_shape",
"[",
"3",
"]",
"+",
"1",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"2",
")",
"dec",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"dec",
",",
"hparams",
".",
"van_keep_prob",
")",
"out_mask",
"=",
"tf",
".",
"layers",
".",
"conv2d_transpose",
"(",
"dec",
",",
"output_shape",
"[",
"3",
"]",
"+",
"1",
",",
"3",
",",
"strides",
"=",
"1",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"None",
")",
"mask",
"=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"out_mask",
"[",
":",
",",
":",
",",
":",
",",
"3",
":",
"4",
"]",
")",
"out",
"=",
"out_mask",
"[",
":",
",",
":",
",",
":",
",",
":",
"3",
"]",
"return",
"out",
"*",
"mask",
"+",
"skip_connections",
"[",
"0",
"]",
"*",
"(",
"1",
"-",
"mask",
")"
] |
The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
|
[
"The",
"VAN",
"decoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L185-L249
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
analogy_computation_2d
|
def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation."""
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
frame_enc_diff_enc = tf.layers.conv2d(
frame_enc_diff,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
f_current_enc_enc = tf.layers.conv2d(
f_current_enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.contrib.layers.layer_norm(analogy)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
return tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
|
python
|
def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation."""
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
frame_enc_diff_enc = tf.layers.conv2d(
frame_enc_diff,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
f_current_enc_enc = tf.layers.conv2d(
f_current_enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.contrib.layers.layer_norm(analogy)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
return tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
|
[
"def",
"analogy_computation_2d",
"(",
"f_first_enc",
",",
"f_first_frame",
",",
"f_current_enc",
",",
"first_depth",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'analogy_computation'",
")",
":",
"frame_enc_diff",
"=",
"f_first_frame",
"-",
"f_first_enc",
"frame_enc_diff_enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"frame_enc_diff",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"f_current_enc_enc",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"f_current_enc",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"analogy",
"=",
"tf",
".",
"concat",
"(",
"[",
"frame_enc_diff_enc",
",",
"f_current_enc_enc",
"]",
",",
"3",
")",
"analogy",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"analogy",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"analogy",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"analogy",
")",
"analogy",
"=",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"analogy",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")",
"return",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"analogy",
",",
"first_depth",
"*",
"4",
",",
"3",
",",
"padding",
"=",
"'same'",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"strides",
"=",
"1",
")"
] |
Implements the deep analogy computation.
|
[
"Implements",
"the",
"deep",
"analogy",
"computation",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L252-L298
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
van
|
def van(first_enc,
first_frame,
current_enc,
gt_image,
reuse=False,
scope_prefix='',
hparams=None):
"""Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
gt_image: The ground truth image, only used for regularization.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
Returns:
The generated image.
"""
with tf.variable_scope(scope_prefix + 'van', reuse=reuse):
output_shape = first_frame.get_shape().as_list()
output_shape[0] = -1
first_depth = 64
f_first_enc, _ = van_enc_2d(first_enc, first_depth)
f_first_frame, image_enc_history = van_image_enc_2d(
first_frame, first_depth, hparams=hparams)
f_current_enc, van_higher_level = van_enc_2d(
current_enc, first_depth, reuse=True)
f_gt_image, _ = van_image_enc_2d(gt_image, first_depth, True,
hparams=hparams)
analogy_t = analogy_computation_2d(
f_first_enc, f_first_frame, f_current_enc, first_depth)
enc_img = f_current_enc + analogy_t
img = van_dec_2d(
enc_img, image_enc_history, output_shape, first_depth, hparams=hparams)
batch_size = tf.to_float(tf.shape(first_enc)[0])
r_loss = tf.nn.l2_loss(f_gt_image - f_current_enc - analogy_t) / batch_size
return img, r_loss, van_higher_level
|
python
|
def van(first_enc,
first_frame,
current_enc,
gt_image,
reuse=False,
scope_prefix='',
hparams=None):
"""Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
gt_image: The ground truth image, only used for regularization.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
Returns:
The generated image.
"""
with tf.variable_scope(scope_prefix + 'van', reuse=reuse):
output_shape = first_frame.get_shape().as_list()
output_shape[0] = -1
first_depth = 64
f_first_enc, _ = van_enc_2d(first_enc, first_depth)
f_first_frame, image_enc_history = van_image_enc_2d(
first_frame, first_depth, hparams=hparams)
f_current_enc, van_higher_level = van_enc_2d(
current_enc, first_depth, reuse=True)
f_gt_image, _ = van_image_enc_2d(gt_image, first_depth, True,
hparams=hparams)
analogy_t = analogy_computation_2d(
f_first_enc, f_first_frame, f_current_enc, first_depth)
enc_img = f_current_enc + analogy_t
img = van_dec_2d(
enc_img, image_enc_history, output_shape, first_depth, hparams=hparams)
batch_size = tf.to_float(tf.shape(first_enc)[0])
r_loss = tf.nn.l2_loss(f_gt_image - f_current_enc - analogy_t) / batch_size
return img, r_loss, van_higher_level
|
[
"def",
"van",
"(",
"first_enc",
",",
"first_frame",
",",
"current_enc",
",",
"gt_image",
",",
"reuse",
"=",
"False",
",",
"scope_prefix",
"=",
"''",
",",
"hparams",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope_prefix",
"+",
"'van'",
",",
"reuse",
"=",
"reuse",
")",
":",
"output_shape",
"=",
"first_frame",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"output_shape",
"[",
"0",
"]",
"=",
"-",
"1",
"first_depth",
"=",
"64",
"f_first_enc",
",",
"_",
"=",
"van_enc_2d",
"(",
"first_enc",
",",
"first_depth",
")",
"f_first_frame",
",",
"image_enc_history",
"=",
"van_image_enc_2d",
"(",
"first_frame",
",",
"first_depth",
",",
"hparams",
"=",
"hparams",
")",
"f_current_enc",
",",
"van_higher_level",
"=",
"van_enc_2d",
"(",
"current_enc",
",",
"first_depth",
",",
"reuse",
"=",
"True",
")",
"f_gt_image",
",",
"_",
"=",
"van_image_enc_2d",
"(",
"gt_image",
",",
"first_depth",
",",
"True",
",",
"hparams",
"=",
"hparams",
")",
"analogy_t",
"=",
"analogy_computation_2d",
"(",
"f_first_enc",
",",
"f_first_frame",
",",
"f_current_enc",
",",
"first_depth",
")",
"enc_img",
"=",
"f_current_enc",
"+",
"analogy_t",
"img",
"=",
"van_dec_2d",
"(",
"enc_img",
",",
"image_enc_history",
",",
"output_shape",
",",
"first_depth",
",",
"hparams",
"=",
"hparams",
")",
"batch_size",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"shape",
"(",
"first_enc",
")",
"[",
"0",
"]",
")",
"r_loss",
"=",
"tf",
".",
"nn",
".",
"l2_loss",
"(",
"f_gt_image",
"-",
"f_current_enc",
"-",
"analogy_t",
")",
"/",
"batch_size",
"return",
"img",
",",
"r_loss",
",",
"van_higher_level"
] |
Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
gt_image: The ground truth image, only used for regularization.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
Returns:
The generated image.
|
[
"Implements",
"a",
"VAN",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L301-L346
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
encoder_vgg
|
def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
is_training=True):
"""VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
is_training: boolean value indicating if training is happening.
Returns:
The generated image.
"""
with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):
# Preprocess input
x *= 256
x = x - COLOR_NORMALIZATION_VECTOR
with arg_scope(vgg.vgg_arg_scope()):
# Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.
x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
[0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
_, end_points = vgg.vgg_16(
x,
num_classes=enc_final_size,
is_training=is_training)
pool5_key = [key for key in end_points.keys() if 'pool5' in key]
assert len(pool5_key) == 1
enc = end_points[pool5_key[0]]
# Undoing padding.
enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])
enc_shape = enc.get_shape().as_list()
enc_shape[0] = -1
enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]
enc_flat = tf.reshape(enc, (-1, enc_size))
enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)
enc_flat = tf.layers.dense(
enc_flat,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))
if hparams.enc_pred_use_l2norm:
enc_flat = tf.nn.l2_normalize(enc_flat, 1)
return enc_flat
|
python
|
def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
is_training=True):
"""VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
is_training: boolean value indicating if training is happening.
Returns:
The generated image.
"""
with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):
# Preprocess input
x *= 256
x = x - COLOR_NORMALIZATION_VECTOR
with arg_scope(vgg.vgg_arg_scope()):
# Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.
x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
[0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
_, end_points = vgg.vgg_16(
x,
num_classes=enc_final_size,
is_training=is_training)
pool5_key = [key for key in end_points.keys() if 'pool5' in key]
assert len(pool5_key) == 1
enc = end_points[pool5_key[0]]
# Undoing padding.
enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])
enc_shape = enc.get_shape().as_list()
enc_shape[0] = -1
enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]
enc_flat = tf.reshape(enc, (-1, enc_size))
enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)
enc_flat = tf.layers.dense(
enc_flat,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))
if hparams.enc_pred_use_l2norm:
enc_flat = tf.nn.l2_normalize(enc_flat, 1)
return enc_flat
|
[
"def",
"encoder_vgg",
"(",
"x",
",",
"enc_final_size",
",",
"reuse",
"=",
"False",
",",
"scope_prefix",
"=",
"''",
",",
"hparams",
"=",
"None",
",",
"is_training",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope_prefix",
"+",
"'encoder'",
",",
"reuse",
"=",
"reuse",
")",
":",
"# Preprocess input",
"x",
"*=",
"256",
"x",
"=",
"x",
"-",
"COLOR_NORMALIZATION_VECTOR",
"with",
"arg_scope",
"(",
"vgg",
".",
"vgg_arg_scope",
"(",
")",
")",
":",
"# Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.",
"x",
"=",
"tf",
".",
"pad",
"(",
"x",
",",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"VGG_IMAGE_SIZE",
"-",
"IMG_WIDTH",
"]",
",",
"[",
"0",
",",
"VGG_IMAGE_SIZE",
"-",
"IMG_HEIGHT",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
")",
"_",
",",
"end_points",
"=",
"vgg",
".",
"vgg_16",
"(",
"x",
",",
"num_classes",
"=",
"enc_final_size",
",",
"is_training",
"=",
"is_training",
")",
"pool5_key",
"=",
"[",
"key",
"for",
"key",
"in",
"end_points",
".",
"keys",
"(",
")",
"if",
"'pool5'",
"in",
"key",
"]",
"assert",
"len",
"(",
"pool5_key",
")",
"==",
"1",
"enc",
"=",
"end_points",
"[",
"pool5_key",
"[",
"0",
"]",
"]",
"# Undoing padding.",
"enc",
"=",
"tf",
".",
"slice",
"(",
"enc",
",",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"2",
",",
"2",
",",
"-",
"1",
"]",
")",
"enc_shape",
"=",
"enc",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"enc_shape",
"[",
"0",
"]",
"=",
"-",
"1",
"enc_size",
"=",
"enc_shape",
"[",
"1",
"]",
"*",
"enc_shape",
"[",
"2",
"]",
"*",
"enc_shape",
"[",
"3",
"]",
"enc_flat",
"=",
"tf",
".",
"reshape",
"(",
"enc",
",",
"(",
"-",
"1",
",",
"enc_size",
")",
")",
"enc_flat",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"enc_flat",
",",
"hparams",
".",
"enc_keep_prob",
")",
"enc_flat",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"enc_flat",
",",
"enc_final_size",
",",
"kernel_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"1e-4",
",",
")",
")",
"if",
"hparams",
".",
"enc_pred_use_l2norm",
":",
"enc_flat",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"enc_flat",
",",
"1",
")",
"return",
"enc_flat"
] |
VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
is_training: boolean value indicating if training is happening.
Returns:
The generated image.
|
[
"VGG",
"network",
"to",
"use",
"as",
"encoder",
"without",
"the",
"top",
"few",
"layers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L349-L401
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
predictor
|
def predictor(enc_flat,
action,
lstm_states,
pred_depth,
reuse=False,
scope_prefix='',
hparams=None):
"""LSTM predictor network."""
with tf.variable_scope(scope_prefix + 'predict', reuse=reuse):
enc_final_size = enc_flat.get_shape().as_list()[1]
action_size = action.get_shape().as_list()[1]
initial_size = (enc_final_size + action_size)
batch_size = tf.shape(enc_flat)[0]
init_stddev = 1e-2
pre_pred = tf.concat([enc_flat, action], 1)
pre_pred = tf.layers.dense(
pre_pred,
initial_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev))
# This is only needed or the GAN version.
if hparams.pred_noise_std > 0:
# Add the noise like this so a pretrained model can be used.
pred_noise = tf.random_normal(
shape=[batch_size, 100], stddev=hparams.pred_noise_std)
pre_pred += tf.layers.dense(
pred_noise,
initial_size,
kernel_initializer=tf.truncated_normal_initializer(
stddev=init_stddev),
name='noise_dense')
pre_pred = tf.nn.relu(pre_pred)
if lstm_states[pred_depth - 2] is None:
back_connect = tf.tile(
tf.get_variable(
'back_connect_init',
shape=[1, initial_size * 2],
initializer=tf.truncated_normal_initializer(stddev=init_stddev))
, (batch_size, 1))
else:
back_connect = lstm_states[pred_depth - 2]
lstm_init_stddev = 1e-4
part_pred, lstm_states[0] = common_video.lstm_cell(
tf.concat([pre_pred, back_connect], 1),
lstm_states[0],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
part_pred = tf.contrib.layers.layer_norm(part_pred)
pred = part_pred
for pred_layer_num in range(1, pred_depth, 2):
part_pred, lstm_states[pred_layer_num] = common_video.lstm_cell(
pred,
lstm_states[pred_layer_num],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
pred += part_pred
part_pred, lstm_states[pred_layer_num + 1] = common_video.lstm_cell(
tf.concat([pred, pre_pred], 1),
lstm_states[pred_layer_num + 1],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
part_pred = tf.contrib.layers.layer_norm(part_pred)
pred += part_pred
pred = tf.layers.dense(
pred,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev))
if hparams.enc_pred_use_l2norm:
pred = tf.nn.l2_normalize(pred, 1)
return pred
|
python
|
def predictor(enc_flat,
action,
lstm_states,
pred_depth,
reuse=False,
scope_prefix='',
hparams=None):
"""LSTM predictor network."""
with tf.variable_scope(scope_prefix + 'predict', reuse=reuse):
enc_final_size = enc_flat.get_shape().as_list()[1]
action_size = action.get_shape().as_list()[1]
initial_size = (enc_final_size + action_size)
batch_size = tf.shape(enc_flat)[0]
init_stddev = 1e-2
pre_pred = tf.concat([enc_flat, action], 1)
pre_pred = tf.layers.dense(
pre_pred,
initial_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev))
# This is only needed or the GAN version.
if hparams.pred_noise_std > 0:
# Add the noise like this so a pretrained model can be used.
pred_noise = tf.random_normal(
shape=[batch_size, 100], stddev=hparams.pred_noise_std)
pre_pred += tf.layers.dense(
pred_noise,
initial_size,
kernel_initializer=tf.truncated_normal_initializer(
stddev=init_stddev),
name='noise_dense')
pre_pred = tf.nn.relu(pre_pred)
if lstm_states[pred_depth - 2] is None:
back_connect = tf.tile(
tf.get_variable(
'back_connect_init',
shape=[1, initial_size * 2],
initializer=tf.truncated_normal_initializer(stddev=init_stddev))
, (batch_size, 1))
else:
back_connect = lstm_states[pred_depth - 2]
lstm_init_stddev = 1e-4
part_pred, lstm_states[0] = common_video.lstm_cell(
tf.concat([pre_pred, back_connect], 1),
lstm_states[0],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
part_pred = tf.contrib.layers.layer_norm(part_pred)
pred = part_pred
for pred_layer_num in range(1, pred_depth, 2):
part_pred, lstm_states[pred_layer_num] = common_video.lstm_cell(
pred,
lstm_states[pred_layer_num],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
pred += part_pred
part_pred, lstm_states[pred_layer_num + 1] = common_video.lstm_cell(
tf.concat([pred, pre_pred], 1),
lstm_states[pred_layer_num + 1],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
part_pred = tf.contrib.layers.layer_norm(part_pred)
pred += part_pred
pred = tf.layers.dense(
pred,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev))
if hparams.enc_pred_use_l2norm:
pred = tf.nn.l2_normalize(pred, 1)
return pred
|
[
"def",
"predictor",
"(",
"enc_flat",
",",
"action",
",",
"lstm_states",
",",
"pred_depth",
",",
"reuse",
"=",
"False",
",",
"scope_prefix",
"=",
"''",
",",
"hparams",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope_prefix",
"+",
"'predict'",
",",
"reuse",
"=",
"reuse",
")",
":",
"enc_final_size",
"=",
"enc_flat",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
"]",
"action_size",
"=",
"action",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
"]",
"initial_size",
"=",
"(",
"enc_final_size",
"+",
"action_size",
")",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"enc_flat",
")",
"[",
"0",
"]",
"init_stddev",
"=",
"1e-2",
"pre_pred",
"=",
"tf",
".",
"concat",
"(",
"[",
"enc_flat",
",",
"action",
"]",
",",
"1",
")",
"pre_pred",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"pre_pred",
",",
"initial_size",
",",
"kernel_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"init_stddev",
")",
")",
"# This is only needed or the GAN version.",
"if",
"hparams",
".",
"pred_noise_std",
">",
"0",
":",
"# Add the noise like this so a pretrained model can be used.",
"pred_noise",
"=",
"tf",
".",
"random_normal",
"(",
"shape",
"=",
"[",
"batch_size",
",",
"100",
"]",
",",
"stddev",
"=",
"hparams",
".",
"pred_noise_std",
")",
"pre_pred",
"+=",
"tf",
".",
"layers",
".",
"dense",
"(",
"pred_noise",
",",
"initial_size",
",",
"kernel_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"init_stddev",
")",
",",
"name",
"=",
"'noise_dense'",
")",
"pre_pred",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"pre_pred",
")",
"if",
"lstm_states",
"[",
"pred_depth",
"-",
"2",
"]",
"is",
"None",
":",
"back_connect",
"=",
"tf",
".",
"tile",
"(",
"tf",
".",
"get_variable",
"(",
"'back_connect_init'",
",",
"shape",
"=",
"[",
"1",
",",
"initial_size",
"*",
"2",
"]",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"init_stddev",
")",
")",
",",
"(",
"batch_size",
",",
"1",
")",
")",
"else",
":",
"back_connect",
"=",
"lstm_states",
"[",
"pred_depth",
"-",
"2",
"]",
"lstm_init_stddev",
"=",
"1e-4",
"part_pred",
",",
"lstm_states",
"[",
"0",
"]",
"=",
"common_video",
".",
"lstm_cell",
"(",
"tf",
".",
"concat",
"(",
"[",
"pre_pred",
",",
"back_connect",
"]",
",",
"1",
")",
",",
"lstm_states",
"[",
"0",
"]",
",",
"initial_size",
",",
"use_peepholes",
"=",
"True",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"lstm_init_stddev",
")",
",",
"num_proj",
"=",
"initial_size",
")",
"part_pred",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"part_pred",
")",
"pred",
"=",
"part_pred",
"for",
"pred_layer_num",
"in",
"range",
"(",
"1",
",",
"pred_depth",
",",
"2",
")",
":",
"part_pred",
",",
"lstm_states",
"[",
"pred_layer_num",
"]",
"=",
"common_video",
".",
"lstm_cell",
"(",
"pred",
",",
"lstm_states",
"[",
"pred_layer_num",
"]",
",",
"initial_size",
",",
"use_peepholes",
"=",
"True",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"lstm_init_stddev",
")",
",",
"num_proj",
"=",
"initial_size",
")",
"pred",
"+=",
"part_pred",
"part_pred",
",",
"lstm_states",
"[",
"pred_layer_num",
"+",
"1",
"]",
"=",
"common_video",
".",
"lstm_cell",
"(",
"tf",
".",
"concat",
"(",
"[",
"pred",
",",
"pre_pred",
"]",
",",
"1",
")",
",",
"lstm_states",
"[",
"pred_layer_num",
"+",
"1",
"]",
",",
"initial_size",
",",
"use_peepholes",
"=",
"True",
",",
"initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"lstm_init_stddev",
")",
",",
"num_proj",
"=",
"initial_size",
")",
"part_pred",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"layer_norm",
"(",
"part_pred",
")",
"pred",
"+=",
"part_pred",
"pred",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"pred",
",",
"enc_final_size",
",",
"kernel_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"init_stddev",
")",
")",
"if",
"hparams",
".",
"enc_pred_use_l2norm",
":",
"pred",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"pred",
",",
"1",
")",
"return",
"pred"
] |
LSTM predictor network.
|
[
"LSTM",
"predictor",
"network",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L404-L492
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
construct_model
|
def construct_model(images,
actions=None,
context_frames=2,
hparams=None,
is_training=True):
"""Constructs the tensorflow graph of the hierarchical model."""
pred_depth = 20
enc_out_all, pred_out_all, van_out_all, van_on_enc_all = [], [], [], []
lstm_states = [None] * (pred_depth + 2)
enc_out = encoder_vgg(
images[0], hparams.enc_size, False, scope_prefix='timestep/',
hparams=hparams, is_training=is_training)
enc_out = tf.identity(enc_out, 'enc_out')
enc_out_all.append(enc_out)
num_timesteps = len(actions) - 1
sum_freq = int(num_timesteps / 4 + 1)
reuse = False
for timestep, action in zip(range(len(actions) - 1), actions[:-1]):
done_warm_start = timestep > context_frames - 1
with tf.variable_scope('timestep', reuse=reuse):
if done_warm_start:
pred_input = pred_out_all[-1]
else:
pred_input = enc_out_all[-1]
pred_out = predictor(
pred_input, action, lstm_states, pred_depth, False, hparams=hparams)
pred_out = tf.identity(pred_out, 'pred_out')
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('pred_out', pred_out)
pred_out_all.append(pred_out)
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('lstm_state', lstm_states[0])
van_out, _, _ = van(
enc_out_all[0],
images[0],
pred_out,
images[timestep + 1],
tf.AUTO_REUSE,
hparams=hparams)
van_out = tf.identity(van_out, 'van_out')
van_out_all.append(van_out)
enc_out = encoder_vgg(
images[timestep + 1], hparams.enc_size, True, hparams=hparams,
is_training=is_training)
enc_out = tf.identity(enc_out, 'enc_out')
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('enc_out', enc_out)
enc_out_all.append(enc_out)
van_input = images[0]
enc_noise = tf.zeros_like(enc_out)
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('enc_noise', enc_noise)
van_on_enc, _, _ = van(
enc_out_all[0],
van_input,
enc_out + enc_noise,
images[timestep + 1],
tf.AUTO_REUSE,
hparams=hparams)
van_on_enc = tf.identity(van_on_enc, 'van_on_enc')
van_on_enc_all.append(van_on_enc)
reuse = True
return enc_out_all, pred_out_all, van_out_all, van_on_enc_all
|
python
|
def construct_model(images,
actions=None,
context_frames=2,
hparams=None,
is_training=True):
"""Constructs the tensorflow graph of the hierarchical model."""
pred_depth = 20
enc_out_all, pred_out_all, van_out_all, van_on_enc_all = [], [], [], []
lstm_states = [None] * (pred_depth + 2)
enc_out = encoder_vgg(
images[0], hparams.enc_size, False, scope_prefix='timestep/',
hparams=hparams, is_training=is_training)
enc_out = tf.identity(enc_out, 'enc_out')
enc_out_all.append(enc_out)
num_timesteps = len(actions) - 1
sum_freq = int(num_timesteps / 4 + 1)
reuse = False
for timestep, action in zip(range(len(actions) - 1), actions[:-1]):
done_warm_start = timestep > context_frames - 1
with tf.variable_scope('timestep', reuse=reuse):
if done_warm_start:
pred_input = pred_out_all[-1]
else:
pred_input = enc_out_all[-1]
pred_out = predictor(
pred_input, action, lstm_states, pred_depth, False, hparams=hparams)
pred_out = tf.identity(pred_out, 'pred_out')
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('pred_out', pred_out)
pred_out_all.append(pred_out)
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('lstm_state', lstm_states[0])
van_out, _, _ = van(
enc_out_all[0],
images[0],
pred_out,
images[timestep + 1],
tf.AUTO_REUSE,
hparams=hparams)
van_out = tf.identity(van_out, 'van_out')
van_out_all.append(van_out)
enc_out = encoder_vgg(
images[timestep + 1], hparams.enc_size, True, hparams=hparams,
is_training=is_training)
enc_out = tf.identity(enc_out, 'enc_out')
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('enc_out', enc_out)
enc_out_all.append(enc_out)
van_input = images[0]
enc_noise = tf.zeros_like(enc_out)
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('enc_noise', enc_noise)
van_on_enc, _, _ = van(
enc_out_all[0],
van_input,
enc_out + enc_noise,
images[timestep + 1],
tf.AUTO_REUSE,
hparams=hparams)
van_on_enc = tf.identity(van_on_enc, 'van_on_enc')
van_on_enc_all.append(van_on_enc)
reuse = True
return enc_out_all, pred_out_all, van_out_all, van_on_enc_all
|
[
"def",
"construct_model",
"(",
"images",
",",
"actions",
"=",
"None",
",",
"context_frames",
"=",
"2",
",",
"hparams",
"=",
"None",
",",
"is_training",
"=",
"True",
")",
":",
"pred_depth",
"=",
"20",
"enc_out_all",
",",
"pred_out_all",
",",
"van_out_all",
",",
"van_on_enc_all",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"lstm_states",
"=",
"[",
"None",
"]",
"*",
"(",
"pred_depth",
"+",
"2",
")",
"enc_out",
"=",
"encoder_vgg",
"(",
"images",
"[",
"0",
"]",
",",
"hparams",
".",
"enc_size",
",",
"False",
",",
"scope_prefix",
"=",
"'timestep/'",
",",
"hparams",
"=",
"hparams",
",",
"is_training",
"=",
"is_training",
")",
"enc_out",
"=",
"tf",
".",
"identity",
"(",
"enc_out",
",",
"'enc_out'",
")",
"enc_out_all",
".",
"append",
"(",
"enc_out",
")",
"num_timesteps",
"=",
"len",
"(",
"actions",
")",
"-",
"1",
"sum_freq",
"=",
"int",
"(",
"num_timesteps",
"/",
"4",
"+",
"1",
")",
"reuse",
"=",
"False",
"for",
"timestep",
",",
"action",
"in",
"zip",
"(",
"range",
"(",
"len",
"(",
"actions",
")",
"-",
"1",
")",
",",
"actions",
"[",
":",
"-",
"1",
"]",
")",
":",
"done_warm_start",
"=",
"timestep",
">",
"context_frames",
"-",
"1",
"with",
"tf",
".",
"variable_scope",
"(",
"'timestep'",
",",
"reuse",
"=",
"reuse",
")",
":",
"if",
"done_warm_start",
":",
"pred_input",
"=",
"pred_out_all",
"[",
"-",
"1",
"]",
"else",
":",
"pred_input",
"=",
"enc_out_all",
"[",
"-",
"1",
"]",
"pred_out",
"=",
"predictor",
"(",
"pred_input",
",",
"action",
",",
"lstm_states",
",",
"pred_depth",
",",
"False",
",",
"hparams",
"=",
"hparams",
")",
"pred_out",
"=",
"tf",
".",
"identity",
"(",
"pred_out",
",",
"'pred_out'",
")",
"if",
"timestep",
"%",
"sum_freq",
"==",
"0",
":",
"# and not hparams.use_tpu:",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'pred_out'",
",",
"pred_out",
")",
"pred_out_all",
".",
"append",
"(",
"pred_out",
")",
"if",
"timestep",
"%",
"sum_freq",
"==",
"0",
":",
"# and not hparams.use_tpu:",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'lstm_state'",
",",
"lstm_states",
"[",
"0",
"]",
")",
"van_out",
",",
"_",
",",
"_",
"=",
"van",
"(",
"enc_out_all",
"[",
"0",
"]",
",",
"images",
"[",
"0",
"]",
",",
"pred_out",
",",
"images",
"[",
"timestep",
"+",
"1",
"]",
",",
"tf",
".",
"AUTO_REUSE",
",",
"hparams",
"=",
"hparams",
")",
"van_out",
"=",
"tf",
".",
"identity",
"(",
"van_out",
",",
"'van_out'",
")",
"van_out_all",
".",
"append",
"(",
"van_out",
")",
"enc_out",
"=",
"encoder_vgg",
"(",
"images",
"[",
"timestep",
"+",
"1",
"]",
",",
"hparams",
".",
"enc_size",
",",
"True",
",",
"hparams",
"=",
"hparams",
",",
"is_training",
"=",
"is_training",
")",
"enc_out",
"=",
"tf",
".",
"identity",
"(",
"enc_out",
",",
"'enc_out'",
")",
"if",
"timestep",
"%",
"sum_freq",
"==",
"0",
":",
"# and not hparams.use_tpu:",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'enc_out'",
",",
"enc_out",
")",
"enc_out_all",
".",
"append",
"(",
"enc_out",
")",
"van_input",
"=",
"images",
"[",
"0",
"]",
"enc_noise",
"=",
"tf",
".",
"zeros_like",
"(",
"enc_out",
")",
"if",
"timestep",
"%",
"sum_freq",
"==",
"0",
":",
"# and not hparams.use_tpu:",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'enc_noise'",
",",
"enc_noise",
")",
"van_on_enc",
",",
"_",
",",
"_",
"=",
"van",
"(",
"enc_out_all",
"[",
"0",
"]",
",",
"van_input",
",",
"enc_out",
"+",
"enc_noise",
",",
"images",
"[",
"timestep",
"+",
"1",
"]",
",",
"tf",
".",
"AUTO_REUSE",
",",
"hparams",
"=",
"hparams",
")",
"van_on_enc",
"=",
"tf",
".",
"identity",
"(",
"van_on_enc",
",",
"'van_on_enc'",
")",
"van_on_enc_all",
".",
"append",
"(",
"van_on_enc",
")",
"reuse",
"=",
"True",
"return",
"enc_out_all",
",",
"pred_out_all",
",",
"van_out_all",
",",
"van_on_enc_all"
] |
Constructs the tensorflow graph of the hierarchical model.
|
[
"Constructs",
"the",
"tensorflow",
"graph",
"of",
"the",
"hierarchical",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L495-L569
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
peak_signal_to_noise_ratio
|
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
|
python
|
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
|
[
"def",
"peak_signal_to_noise_ratio",
"(",
"true",
",",
"pred",
")",
":",
"return",
"10.0",
"*",
"tf",
".",
"log",
"(",
"1.0",
"/",
"mean_squared_error",
"(",
"true",
",",
"pred",
")",
")",
"/",
"tf",
".",
"log",
"(",
"10.0",
")"
] |
Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
|
[
"Image",
"quality",
"metric",
"based",
"on",
"maximal",
"signal",
"power",
"vs",
".",
"power",
"of",
"the",
"noise",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L572-L581
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
mean_squared_error
|
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
result = tf.reduce_sum(
tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred))
return result
|
python
|
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
result = tf.reduce_sum(
tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred))
return result
|
[
"def",
"mean_squared_error",
"(",
"true",
",",
"pred",
")",
":",
"result",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"true",
",",
"pred",
")",
")",
"/",
"tf",
".",
"to_float",
"(",
"tf",
".",
"size",
"(",
"pred",
")",
")",
"return",
"result"
] |
L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
|
[
"L2",
"distance",
"between",
"tensors",
"true",
"and",
"pred",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L584-L595
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
l1_error
|
def l1_error(true, pred):
"""L1 distance between tensors true and pred."""
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))
|
python
|
def l1_error(true, pred):
"""L1 distance between tensors true and pred."""
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))
|
[
"def",
"l1_error",
"(",
"true",
",",
"pred",
")",
":",
"return",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"true",
"-",
"pred",
")",
")",
"/",
"tf",
".",
"to_float",
"(",
"tf",
".",
"size",
"(",
"pred",
")",
")"
] |
L1 distance between tensors true and pred.
|
[
"L1",
"distance",
"between",
"tensors",
"true",
"and",
"pred",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L598-L600
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/epva.py
|
calc_loss_psnr
|
def calc_loss_psnr(gen_images, images, name, hparams=None, use_l1_loss=False):
"""Calculates loss and psnr for predictions over multiple timesteps."""
del hparams
with tf.name_scope(name):
loss, error, psnr_all = 0.0, 0.0, 0.0
for _, x, gx in zip(range(len(gen_images)), images, gen_images):
recon_cost = mean_squared_error(x, gx)
if use_l1_loss:
recon_cost = l1_error(x, gx)
error_i = l1_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
error += error_i
loss += recon_cost
psnr_all /= tf.to_float(len(gen_images))
loss /= tf.to_float(len(gen_images))
error /= tf.to_float(len(gen_images))
# if not hparams.use_tpu:
tf.summary.scalar('psnr_all', psnr_all)
tf.summary.scalar('loss', loss)
return loss, psnr_all
|
python
|
def calc_loss_psnr(gen_images, images, name, hparams=None, use_l1_loss=False):
"""Calculates loss and psnr for predictions over multiple timesteps."""
del hparams
with tf.name_scope(name):
loss, error, psnr_all = 0.0, 0.0, 0.0
for _, x, gx in zip(range(len(gen_images)), images, gen_images):
recon_cost = mean_squared_error(x, gx)
if use_l1_loss:
recon_cost = l1_error(x, gx)
error_i = l1_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
error += error_i
loss += recon_cost
psnr_all /= tf.to_float(len(gen_images))
loss /= tf.to_float(len(gen_images))
error /= tf.to_float(len(gen_images))
# if not hparams.use_tpu:
tf.summary.scalar('psnr_all', psnr_all)
tf.summary.scalar('loss', loss)
return loss, psnr_all
|
[
"def",
"calc_loss_psnr",
"(",
"gen_images",
",",
"images",
",",
"name",
",",
"hparams",
"=",
"None",
",",
"use_l1_loss",
"=",
"False",
")",
":",
"del",
"hparams",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"loss",
",",
"error",
",",
"psnr_all",
"=",
"0.0",
",",
"0.0",
",",
"0.0",
"for",
"_",
",",
"x",
",",
"gx",
"in",
"zip",
"(",
"range",
"(",
"len",
"(",
"gen_images",
")",
")",
",",
"images",
",",
"gen_images",
")",
":",
"recon_cost",
"=",
"mean_squared_error",
"(",
"x",
",",
"gx",
")",
"if",
"use_l1_loss",
":",
"recon_cost",
"=",
"l1_error",
"(",
"x",
",",
"gx",
")",
"error_i",
"=",
"l1_error",
"(",
"x",
",",
"gx",
")",
"psnr_i",
"=",
"peak_signal_to_noise_ratio",
"(",
"x",
",",
"gx",
")",
"psnr_all",
"+=",
"psnr_i",
"error",
"+=",
"error_i",
"loss",
"+=",
"recon_cost",
"psnr_all",
"/=",
"tf",
".",
"to_float",
"(",
"len",
"(",
"gen_images",
")",
")",
"loss",
"/=",
"tf",
".",
"to_float",
"(",
"len",
"(",
"gen_images",
")",
")",
"error",
"/=",
"tf",
".",
"to_float",
"(",
"len",
"(",
"gen_images",
")",
")",
"# if not hparams.use_tpu:",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'psnr_all'",
",",
"psnr_all",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'loss'",
",",
"loss",
")",
"return",
"loss",
",",
"psnr_all"
] |
Calculates loss and psnr for predictions over multiple timesteps.
|
[
"Calculates",
"loss",
"and",
"psnr",
"for",
"predictions",
"over",
"multiple",
"timesteps",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/epva.py#L603-L627
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/sv2p_params.py
|
next_frame_sv2p
|
def next_frame_sv2p():
"""SV2P model hparams."""
hparams = basic_stochastic.next_frame_basic_stochastic()
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = "constant"
hparams.learning_rate_constant = 1e-3
hparams.video_num_input_frames = 1
hparams.video_num_target_frames = 3
hparams.batch_size = 16
hparams.bottom = {
"inputs": modalities.video_raw_bottom,
"targets": modalities.video_raw_targets_bottom,
}
hparams.loss = {
"targets": modalities.video_l2_raw_loss,
}
hparams.top = {
"targets": modalities.video_raw_top,
}
hparams.video_modality_loss_cutoff = 0.0
hparams.scheduled_sampling_mode = "count"
hparams.scheduled_sampling_k = 900.0
hparams.add_hparam("reward_prediction", True)
hparams.add_hparam("reward_prediction_stop_gradient", False)
hparams.add_hparam("reward_prediction_buffer_size", 0)
hparams.add_hparam("model_options", "CDNA")
hparams.add_hparam("num_masks", 10)
hparams.add_hparam("multi_latent", False)
hparams.add_hparam("relu_shift", 1e-12)
hparams.add_hparam("dna_kernel_size", 5)
hparams.add_hparam("upsample_method", "conv2d_transpose")
hparams.add_hparam("reward_model", "basic")
hparams.add_hparam("visualize_logits_histogram", True)
return hparams
|
python
|
def next_frame_sv2p():
"""SV2P model hparams."""
hparams = basic_stochastic.next_frame_basic_stochastic()
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = "constant"
hparams.learning_rate_constant = 1e-3
hparams.video_num_input_frames = 1
hparams.video_num_target_frames = 3
hparams.batch_size = 16
hparams.bottom = {
"inputs": modalities.video_raw_bottom,
"targets": modalities.video_raw_targets_bottom,
}
hparams.loss = {
"targets": modalities.video_l2_raw_loss,
}
hparams.top = {
"targets": modalities.video_raw_top,
}
hparams.video_modality_loss_cutoff = 0.0
hparams.scheduled_sampling_mode = "count"
hparams.scheduled_sampling_k = 900.0
hparams.add_hparam("reward_prediction", True)
hparams.add_hparam("reward_prediction_stop_gradient", False)
hparams.add_hparam("reward_prediction_buffer_size", 0)
hparams.add_hparam("model_options", "CDNA")
hparams.add_hparam("num_masks", 10)
hparams.add_hparam("multi_latent", False)
hparams.add_hparam("relu_shift", 1e-12)
hparams.add_hparam("dna_kernel_size", 5)
hparams.add_hparam("upsample_method", "conv2d_transpose")
hparams.add_hparam("reward_model", "basic")
hparams.add_hparam("visualize_logits_histogram", True)
return hparams
|
[
"def",
"next_frame_sv2p",
"(",
")",
":",
"hparams",
"=",
"basic_stochastic",
".",
"next_frame_basic_stochastic",
"(",
")",
"hparams",
".",
"optimizer",
"=",
"\"true_adam\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"constant\"",
"hparams",
".",
"learning_rate_constant",
"=",
"1e-3",
"hparams",
".",
"video_num_input_frames",
"=",
"1",
"hparams",
".",
"video_num_target_frames",
"=",
"3",
"hparams",
".",
"batch_size",
"=",
"16",
"hparams",
".",
"bottom",
"=",
"{",
"\"inputs\"",
":",
"modalities",
".",
"video_raw_bottom",
",",
"\"targets\"",
":",
"modalities",
".",
"video_raw_targets_bottom",
",",
"}",
"hparams",
".",
"loss",
"=",
"{",
"\"targets\"",
":",
"modalities",
".",
"video_l2_raw_loss",
",",
"}",
"hparams",
".",
"top",
"=",
"{",
"\"targets\"",
":",
"modalities",
".",
"video_raw_top",
",",
"}",
"hparams",
".",
"video_modality_loss_cutoff",
"=",
"0.0",
"hparams",
".",
"scheduled_sampling_mode",
"=",
"\"count\"",
"hparams",
".",
"scheduled_sampling_k",
"=",
"900.0",
"hparams",
".",
"add_hparam",
"(",
"\"reward_prediction\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"reward_prediction_stop_gradient\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"reward_prediction_buffer_size\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"model_options\"",
",",
"\"CDNA\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_masks\"",
",",
"10",
")",
"hparams",
".",
"add_hparam",
"(",
"\"multi_latent\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"relu_shift\"",
",",
"1e-12",
")",
"hparams",
".",
"add_hparam",
"(",
"\"dna_kernel_size\"",
",",
"5",
")",
"hparams",
".",
"add_hparam",
"(",
"\"upsample_method\"",
",",
"\"conv2d_transpose\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"reward_model\"",
",",
"\"basic\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"visualize_logits_histogram\"",
",",
"True",
")",
"return",
"hparams"
] |
SV2P model hparams.
|
[
"SV2P",
"model",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L27-L60
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/sv2p_params.py
|
next_frame_sv2p_discrete
|
def next_frame_sv2p_discrete():
"""SV2P discrete model hparams."""
hparams = next_frame_sv2p()
hparams.action_injection = "multiplicative"
hparams.small_mode = True
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.02)
hparams.add_hparam("discrete_warmup_steps", 40000)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 0.5)
hparams.add_hparam("discretize_warmup_steps", 40000)
return hparams
|
python
|
def next_frame_sv2p_discrete():
"""SV2P discrete model hparams."""
hparams = next_frame_sv2p()
hparams.action_injection = "multiplicative"
hparams.small_mode = True
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.02)
hparams.add_hparam("discrete_warmup_steps", 40000)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 0.5)
hparams.add_hparam("discretize_warmup_steps", 40000)
return hparams
|
[
"def",
"next_frame_sv2p_discrete",
"(",
")",
":",
"hparams",
"=",
"next_frame_sv2p",
"(",
")",
"hparams",
".",
"action_injection",
"=",
"\"multiplicative\"",
"hparams",
".",
"small_mode",
"=",
"True",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_bits\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_noise\"",
",",
"0.02",
")",
"hparams",
".",
"add_hparam",
"(",
"\"discrete_warmup_steps\"",
",",
"40000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"full_latent_tower\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_predictor_state_size\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_predictor_temperature\"",
",",
"0.5",
")",
"hparams",
".",
"add_hparam",
"(",
"\"discretize_warmup_steps\"",
",",
"40000",
")",
"return",
"hparams"
] |
SV2P discrete model hparams.
|
[
"SV2P",
"discrete",
"model",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L64-L76
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/sv2p_params.py
|
next_frame_sv2p_atari
|
def next_frame_sv2p_atari():
"""SV2P model for atari."""
hparams = next_frame_sv2p()
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
hparams.action_injection = "multiplicative"
hparams.num_iterations_1st_stage = 12000
hparams.num_iterations_2nd_stage = 12000
hparams.anneal_end = 40000
hparams.latent_loss_multiplier_schedule = "noisy_linear_cosine_decay"
hparams.latent_loss_multiplier = 1e-3
hparams.information_capacity = 0.0
hparams.small_mode = True
return hparams
|
python
|
def next_frame_sv2p_atari():
"""SV2P model for atari."""
hparams = next_frame_sv2p()
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
hparams.action_injection = "multiplicative"
hparams.num_iterations_1st_stage = 12000
hparams.num_iterations_2nd_stage = 12000
hparams.anneal_end = 40000
hparams.latent_loss_multiplier_schedule = "noisy_linear_cosine_decay"
hparams.latent_loss_multiplier = 1e-3
hparams.information_capacity = 0.0
hparams.small_mode = True
return hparams
|
[
"def",
"next_frame_sv2p_atari",
"(",
")",
":",
"hparams",
"=",
"next_frame_sv2p",
"(",
")",
"hparams",
".",
"video_num_input_frames",
"=",
"4",
"hparams",
".",
"video_num_target_frames",
"=",
"4",
"hparams",
".",
"action_injection",
"=",
"\"multiplicative\"",
"hparams",
".",
"num_iterations_1st_stage",
"=",
"12000",
"hparams",
".",
"num_iterations_2nd_stage",
"=",
"12000",
"hparams",
".",
"anneal_end",
"=",
"40000",
"hparams",
".",
"latent_loss_multiplier_schedule",
"=",
"\"noisy_linear_cosine_decay\"",
"hparams",
".",
"latent_loss_multiplier",
"=",
"1e-3",
"hparams",
".",
"information_capacity",
"=",
"0.0",
"hparams",
".",
"small_mode",
"=",
"True",
"return",
"hparams"
] |
SV2P model for atari.
|
[
"SV2P",
"model",
"for",
"atari",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L80-L93
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/sv2p_params.py
|
next_frame_sv2p_atari_softmax
|
def next_frame_sv2p_atari_softmax():
"""SV2P model for atari with softmax."""
hparams = next_frame_sv2p_atari()
hparams.bottom = {}
hparams.loss = {}
hparams.top = {}
hparams.internal_loss = True
return hparams
|
python
|
def next_frame_sv2p_atari_softmax():
"""SV2P model for atari with softmax."""
hparams = next_frame_sv2p_atari()
hparams.bottom = {}
hparams.loss = {}
hparams.top = {}
hparams.internal_loss = True
return hparams
|
[
"def",
"next_frame_sv2p_atari_softmax",
"(",
")",
":",
"hparams",
"=",
"next_frame_sv2p_atari",
"(",
")",
"hparams",
".",
"bottom",
"=",
"{",
"}",
"hparams",
".",
"loss",
"=",
"{",
"}",
"hparams",
".",
"top",
"=",
"{",
"}",
"hparams",
".",
"internal_loss",
"=",
"True",
"return",
"hparams"
] |
SV2P model for atari with softmax.
|
[
"SV2P",
"model",
"for",
"atari",
"with",
"softmax",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L97-L104
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/sv2p_params.py
|
next_frame_sv2p_tiny
|
def next_frame_sv2p_tiny():
"""Tiny SV2P model."""
hparams = next_frame_sv2p_atari_softmax()
hparams.batch_size = 2
hparams.tiny_mode = True
hparams.num_masks = 1
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
return hparams
|
python
|
def next_frame_sv2p_tiny():
"""Tiny SV2P model."""
hparams = next_frame_sv2p_atari_softmax()
hparams.batch_size = 2
hparams.tiny_mode = True
hparams.num_masks = 1
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 4
return hparams
|
[
"def",
"next_frame_sv2p_tiny",
"(",
")",
":",
"hparams",
"=",
"next_frame_sv2p_atari_softmax",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"2",
"hparams",
".",
"tiny_mode",
"=",
"True",
"hparams",
".",
"num_masks",
"=",
"1",
"hparams",
".",
"video_modality_loss_cutoff",
"=",
"0.4",
"hparams",
".",
"video_num_input_frames",
"=",
"4",
"hparams",
".",
"video_num_target_frames",
"=",
"4",
"return",
"hparams"
] |
Tiny SV2P model.
|
[
"Tiny",
"SV2P",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L124-L133
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/sv2p_params.py
|
next_frame_sv2p_cutoff
|
def next_frame_sv2p_cutoff():
"""SV2P model with additional cutoff in L2 loss for environments like pong."""
hparams = next_frame_sv2p()
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 1
return hparams
|
python
|
def next_frame_sv2p_cutoff():
"""SV2P model with additional cutoff in L2 loss for environments like pong."""
hparams = next_frame_sv2p()
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 1
return hparams
|
[
"def",
"next_frame_sv2p_cutoff",
"(",
")",
":",
"hparams",
"=",
"next_frame_sv2p",
"(",
")",
"hparams",
".",
"video_modality_loss_cutoff",
"=",
"0.4",
"hparams",
".",
"video_num_input_frames",
"=",
"4",
"hparams",
".",
"video_num_target_frames",
"=",
"1",
"return",
"hparams"
] |
SV2P model with additional cutoff in L2 loss for environments like pong.
|
[
"SV2P",
"model",
"with",
"additional",
"cutoff",
"in",
"L2",
"loss",
"for",
"environments",
"like",
"pong",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/sv2p_params.py#L145-L151
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mscoco.py
|
_get_mscoco
|
def _get_mscoco(directory):
"""Download and extract MSCOCO datasets to directory unless it is there."""
for url in _MSCOCO_URLS:
filename = os.path.basename(url)
download_url = os.path.join(_MSCOCO_ROOT_URL, url)
path = generator_utils.maybe_download(directory, filename, download_url)
unzip_dir = os.path.join(directory, filename.strip(".zip"))
if not tf.gfile.Exists(unzip_dir):
zipfile.ZipFile(path, "r").extractall(directory)
|
python
|
def _get_mscoco(directory):
"""Download and extract MSCOCO datasets to directory unless it is there."""
for url in _MSCOCO_URLS:
filename = os.path.basename(url)
download_url = os.path.join(_MSCOCO_ROOT_URL, url)
path = generator_utils.maybe_download(directory, filename, download_url)
unzip_dir = os.path.join(directory, filename.strip(".zip"))
if not tf.gfile.Exists(unzip_dir):
zipfile.ZipFile(path, "r").extractall(directory)
|
[
"def",
"_get_mscoco",
"(",
"directory",
")",
":",
"for",
"url",
"in",
"_MSCOCO_URLS",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
"download_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_MSCOCO_ROOT_URL",
",",
"url",
")",
"path",
"=",
"generator_utils",
".",
"maybe_download",
"(",
"directory",
",",
"filename",
",",
"download_url",
")",
"unzip_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
".",
"strip",
"(",
"\".zip\"",
")",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"unzip_dir",
")",
":",
"zipfile",
".",
"ZipFile",
"(",
"path",
",",
"\"r\"",
")",
".",
"extractall",
"(",
"directory",
")"
] |
Download and extract MSCOCO datasets to directory unless it is there.
|
[
"Download",
"and",
"extract",
"MSCOCO",
"datasets",
"to",
"directory",
"unless",
"it",
"is",
"there",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mscoco.py#L49-L57
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/mscoco.py
|
mscoco_generator
|
def mscoco_generator(data_dir,
tmp_dir,
training,
how_many,
start_from=0,
eos_list=None,
vocab_filename=None):
"""Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
"""
eos_list = [1] if eos_list is None else eos_list
def get_vocab():
"""Get vocab for caption text encoder."""
if data_dir is not None and vocab_filename is not None:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)
return vocab_symbolizer
else:
raise ValueError("Vocab file does not exist: %s" % vocab_filepath)
return None
vocab_symbolizer = get_vocab()
_get_mscoco(tmp_dir)
caption_filepath = (
_MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE)
caption_filepath = os.path.join(tmp_dir, caption_filepath)
prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX
caption_file = io.open(caption_filepath)
caption_json = json.load(caption_file)
# Dictionary from image_id to ((filename, height, width), captions).
image_dict = {}
for image in caption_json["images"]:
image_dict[image["id"]] = [(image["file_name"], image["height"],
image["width"]), []]
annotations = caption_json["annotations"]
annotation_count = len(annotations)
image_count = len(image_dict)
tf.logging.info("Processing %d images and %d labels\n" % (image_count,
annotation_count))
for annotation in annotations:
image_id = annotation["image_id"]
image_dict[image_id][1].append(annotation["caption"])
data = list(image_dict.values())[start_from:start_from + how_many]
random.shuffle(data)
for image_info, labels in data:
image_filename = image_info[0]
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
height, width = image_info[1], image_info[2]
for label in labels:
if vocab_filename is None or vocab_symbolizer is None:
label = [ord(c) for c in label] + eos_list
else:
label = vocab_symbolizer.encode(label) + eos_list
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"image/class/label": label,
"image/height": [height],
"image/width": [width]
}
|
python
|
def mscoco_generator(data_dir,
tmp_dir,
training,
how_many,
start_from=0,
eos_list=None,
vocab_filename=None):
"""Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
"""
eos_list = [1] if eos_list is None else eos_list
def get_vocab():
"""Get vocab for caption text encoder."""
if data_dir is not None and vocab_filename is not None:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)
return vocab_symbolizer
else:
raise ValueError("Vocab file does not exist: %s" % vocab_filepath)
return None
vocab_symbolizer = get_vocab()
_get_mscoco(tmp_dir)
caption_filepath = (
_MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE)
caption_filepath = os.path.join(tmp_dir, caption_filepath)
prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX
caption_file = io.open(caption_filepath)
caption_json = json.load(caption_file)
# Dictionary from image_id to ((filename, height, width), captions).
image_dict = {}
for image in caption_json["images"]:
image_dict[image["id"]] = [(image["file_name"], image["height"],
image["width"]), []]
annotations = caption_json["annotations"]
annotation_count = len(annotations)
image_count = len(image_dict)
tf.logging.info("Processing %d images and %d labels\n" % (image_count,
annotation_count))
for annotation in annotations:
image_id = annotation["image_id"]
image_dict[image_id][1].append(annotation["caption"])
data = list(image_dict.values())[start_from:start_from + how_many]
random.shuffle(data)
for image_info, labels in data:
image_filename = image_info[0]
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
height, width = image_info[1], image_info[2]
for label in labels:
if vocab_filename is None or vocab_symbolizer is None:
label = [ord(c) for c in label] + eos_list
else:
label = vocab_symbolizer.encode(label) + eos_list
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"image/class/label": label,
"image/height": [height],
"image/width": [width]
}
|
[
"def",
"mscoco_generator",
"(",
"data_dir",
",",
"tmp_dir",
",",
"training",
",",
"how_many",
",",
"start_from",
"=",
"0",
",",
"eos_list",
"=",
"None",
",",
"vocab_filename",
"=",
"None",
")",
":",
"eos_list",
"=",
"[",
"1",
"]",
"if",
"eos_list",
"is",
"None",
"else",
"eos_list",
"def",
"get_vocab",
"(",
")",
":",
"\"\"\"Get vocab for caption text encoder.\"\"\"",
"if",
"data_dir",
"is",
"not",
"None",
"and",
"vocab_filename",
"is",
"not",
"None",
":",
"vocab_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"vocab_filename",
")",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"vocab_filepath",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Found vocab file: %s\"",
",",
"vocab_filepath",
")",
"vocab_symbolizer",
"=",
"text_encoder",
".",
"SubwordTextEncoder",
"(",
"vocab_filepath",
")",
"return",
"vocab_symbolizer",
"else",
":",
"raise",
"ValueError",
"(",
"\"Vocab file does not exist: %s\"",
"%",
"vocab_filepath",
")",
"return",
"None",
"vocab_symbolizer",
"=",
"get_vocab",
"(",
")",
"_get_mscoco",
"(",
"tmp_dir",
")",
"caption_filepath",
"=",
"(",
"_MSCOCO_TRAIN_CAPTION_FILE",
"if",
"training",
"else",
"_MSCOCO_EVAL_CAPTION_FILE",
")",
"caption_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"caption_filepath",
")",
"prefix",
"=",
"_MSCOCO_TRAIN_PREFIX",
"if",
"training",
"else",
"_MSCOCO_EVAL_PREFIX",
"caption_file",
"=",
"io",
".",
"open",
"(",
"caption_filepath",
")",
"caption_json",
"=",
"json",
".",
"load",
"(",
"caption_file",
")",
"# Dictionary from image_id to ((filename, height, width), captions).",
"image_dict",
"=",
"{",
"}",
"for",
"image",
"in",
"caption_json",
"[",
"\"images\"",
"]",
":",
"image_dict",
"[",
"image",
"[",
"\"id\"",
"]",
"]",
"=",
"[",
"(",
"image",
"[",
"\"file_name\"",
"]",
",",
"image",
"[",
"\"height\"",
"]",
",",
"image",
"[",
"\"width\"",
"]",
")",
",",
"[",
"]",
"]",
"annotations",
"=",
"caption_json",
"[",
"\"annotations\"",
"]",
"annotation_count",
"=",
"len",
"(",
"annotations",
")",
"image_count",
"=",
"len",
"(",
"image_dict",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Processing %d images and %d labels\\n\"",
"%",
"(",
"image_count",
",",
"annotation_count",
")",
")",
"for",
"annotation",
"in",
"annotations",
":",
"image_id",
"=",
"annotation",
"[",
"\"image_id\"",
"]",
"image_dict",
"[",
"image_id",
"]",
"[",
"1",
"]",
".",
"append",
"(",
"annotation",
"[",
"\"caption\"",
"]",
")",
"data",
"=",
"list",
"(",
"image_dict",
".",
"values",
"(",
")",
")",
"[",
"start_from",
":",
"start_from",
"+",
"how_many",
"]",
"random",
".",
"shuffle",
"(",
"data",
")",
"for",
"image_info",
",",
"labels",
"in",
"data",
":",
"image_filename",
"=",
"image_info",
"[",
"0",
"]",
"image_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"prefix",
",",
"image_filename",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"image_filepath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"encoded_image_data",
"=",
"f",
".",
"read",
"(",
")",
"height",
",",
"width",
"=",
"image_info",
"[",
"1",
"]",
",",
"image_info",
"[",
"2",
"]",
"for",
"label",
"in",
"labels",
":",
"if",
"vocab_filename",
"is",
"None",
"or",
"vocab_symbolizer",
"is",
"None",
":",
"label",
"=",
"[",
"ord",
"(",
"c",
")",
"for",
"c",
"in",
"label",
"]",
"+",
"eos_list",
"else",
":",
"label",
"=",
"vocab_symbolizer",
".",
"encode",
"(",
"label",
")",
"+",
"eos_list",
"yield",
"{",
"\"image/encoded\"",
":",
"[",
"encoded_image_data",
"]",
",",
"\"image/format\"",
":",
"[",
"\"jpeg\"",
"]",
",",
"\"image/class/label\"",
":",
"label",
",",
"\"image/height\"",
":",
"[",
"height",
"]",
",",
"\"image/width\"",
":",
"[",
"width",
"]",
"}"
] |
Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
|
[
"Image",
"generator",
"for",
"MSCOCO",
"captioning",
"problem",
"with",
"token",
"-",
"wise",
"captions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mscoco.py#L60-L142
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
flags_as_args
|
def flags_as_args():
"""Convert FLAGS to list of args suitable for passing on cmd line."""
if hasattr(FLAGS, "flag_values_dict"):
args_dict = FLAGS.flag_values_dict()
else:
args_dict = dict(FLAGS.__dict__["__flags"])
del args_dict["cloud_mlengine"]
# Configured later
del args_dict["t2t_usr_dir"]
args_dict.pop("h", None)
args_dict.pop("helpfull", None)
args_dict.pop("helpshort", None)
args_dict.pop("help", None)
args = []
for name, val in args_dict.items():
if val is None:
continue
if name.startswith("autotune"):
continue
args.extend(["--%s=%s" % (name, str(val))])
return args
|
python
|
def flags_as_args():
"""Convert FLAGS to list of args suitable for passing on cmd line."""
if hasattr(FLAGS, "flag_values_dict"):
args_dict = FLAGS.flag_values_dict()
else:
args_dict = dict(FLAGS.__dict__["__flags"])
del args_dict["cloud_mlengine"]
# Configured later
del args_dict["t2t_usr_dir"]
args_dict.pop("h", None)
args_dict.pop("helpfull", None)
args_dict.pop("helpshort", None)
args_dict.pop("help", None)
args = []
for name, val in args_dict.items():
if val is None:
continue
if name.startswith("autotune"):
continue
args.extend(["--%s=%s" % (name, str(val))])
return args
|
[
"def",
"flags_as_args",
"(",
")",
":",
"if",
"hasattr",
"(",
"FLAGS",
",",
"\"flag_values_dict\"",
")",
":",
"args_dict",
"=",
"FLAGS",
".",
"flag_values_dict",
"(",
")",
"else",
":",
"args_dict",
"=",
"dict",
"(",
"FLAGS",
".",
"__dict__",
"[",
"\"__flags\"",
"]",
")",
"del",
"args_dict",
"[",
"\"cloud_mlengine\"",
"]",
"# Configured later",
"del",
"args_dict",
"[",
"\"t2t_usr_dir\"",
"]",
"args_dict",
".",
"pop",
"(",
"\"h\"",
",",
"None",
")",
"args_dict",
".",
"pop",
"(",
"\"helpfull\"",
",",
"None",
")",
"args_dict",
".",
"pop",
"(",
"\"helpshort\"",
",",
"None",
")",
"args_dict",
".",
"pop",
"(",
"\"help\"",
",",
"None",
")",
"args",
"=",
"[",
"]",
"for",
"name",
",",
"val",
"in",
"args_dict",
".",
"items",
"(",
")",
":",
"if",
"val",
"is",
"None",
":",
"continue",
"if",
"name",
".",
"startswith",
"(",
"\"autotune\"",
")",
":",
"continue",
"args",
".",
"extend",
"(",
"[",
"\"--%s=%s\"",
"%",
"(",
"name",
",",
"str",
"(",
"val",
")",
")",
"]",
")",
"return",
"args"
] |
Convert FLAGS to list of args suitable for passing on cmd line.
|
[
"Convert",
"FLAGS",
"to",
"list",
"of",
"args",
"suitable",
"for",
"passing",
"on",
"cmd",
"line",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L93-L113
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
get_default_master_type
|
def get_default_master_type(num_gpus=1):
"""Returns master_type for trainingInput."""
gpus_to_master_map = {
0: "standard",
1: "standard_p100",
4: "complex_model_m_p100",
8: "complex_model_l_gpu",
}
if num_gpus not in gpus_to_master_map:
raise ValueError("Num gpus must be in %s" %
str(sorted(list(gpus_to_master_map.keys()))))
return gpus_to_master_map[num_gpus]
|
python
|
def get_default_master_type(num_gpus=1):
"""Returns master_type for trainingInput."""
gpus_to_master_map = {
0: "standard",
1: "standard_p100",
4: "complex_model_m_p100",
8: "complex_model_l_gpu",
}
if num_gpus not in gpus_to_master_map:
raise ValueError("Num gpus must be in %s" %
str(sorted(list(gpus_to_master_map.keys()))))
return gpus_to_master_map[num_gpus]
|
[
"def",
"get_default_master_type",
"(",
"num_gpus",
"=",
"1",
")",
":",
"gpus_to_master_map",
"=",
"{",
"0",
":",
"\"standard\"",
",",
"1",
":",
"\"standard_p100\"",
",",
"4",
":",
"\"complex_model_m_p100\"",
",",
"8",
":",
"\"complex_model_l_gpu\"",
",",
"}",
"if",
"num_gpus",
"not",
"in",
"gpus_to_master_map",
":",
"raise",
"ValueError",
"(",
"\"Num gpus must be in %s\"",
"%",
"str",
"(",
"sorted",
"(",
"list",
"(",
"gpus_to_master_map",
".",
"keys",
"(",
")",
")",
")",
")",
")",
"return",
"gpus_to_master_map",
"[",
"num_gpus",
"]"
] |
Returns master_type for trainingInput.
|
[
"Returns",
"master_type",
"for",
"trainingInput",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L116-L127
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
configure_job
|
def configure_job():
"""Construct jobSpec for ML Engine job."""
# See documentation:
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput
training_input = {
"pythonModule": "tensor2tensor.bin.t2t_trainer",
"args": flags_as_args(),
"region": text_encoder.native_to_unicode(default_region()),
"runtimeVersion": RUNTIME_VERSION,
"pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7",
"jobDir": FLAGS.output_dir,
"scaleTier": "CUSTOM",
"masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type(
num_gpus=FLAGS.worker_gpu)
}
if FLAGS.use_tpu:
training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or
"standard")
training_input["workerType"] = "cloud_tpu"
training_input["workerCount"] = 1
if FLAGS.hparams_range:
tf.logging.info("Configuring hyperparameter tuning.")
training_input["hyperparameters"] = configure_autotune(
FLAGS.hparams_range,
FLAGS.autotune_objective,
FLAGS.autotune_maximize,
FLAGS.autotune_max_trials,
FLAGS.autotune_parallel_trials,
)
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
job_spec = {
"jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp),
"labels": {
"model": FLAGS.model,
"problem": FLAGS.problem,
"hparams": FLAGS.hparams_set
},
"trainingInput": training_input,
}
return job_spec
|
python
|
def configure_job():
"""Construct jobSpec for ML Engine job."""
# See documentation:
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput
training_input = {
"pythonModule": "tensor2tensor.bin.t2t_trainer",
"args": flags_as_args(),
"region": text_encoder.native_to_unicode(default_region()),
"runtimeVersion": RUNTIME_VERSION,
"pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7",
"jobDir": FLAGS.output_dir,
"scaleTier": "CUSTOM",
"masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type(
num_gpus=FLAGS.worker_gpu)
}
if FLAGS.use_tpu:
training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or
"standard")
training_input["workerType"] = "cloud_tpu"
training_input["workerCount"] = 1
if FLAGS.hparams_range:
tf.logging.info("Configuring hyperparameter tuning.")
training_input["hyperparameters"] = configure_autotune(
FLAGS.hparams_range,
FLAGS.autotune_objective,
FLAGS.autotune_maximize,
FLAGS.autotune_max_trials,
FLAGS.autotune_parallel_trials,
)
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
job_spec = {
"jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp),
"labels": {
"model": FLAGS.model,
"problem": FLAGS.problem,
"hparams": FLAGS.hparams_set
},
"trainingInput": training_input,
}
return job_spec
|
[
"def",
"configure_job",
"(",
")",
":",
"# See documentation:",
"# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput",
"training_input",
"=",
"{",
"\"pythonModule\"",
":",
"\"tensor2tensor.bin.t2t_trainer\"",
",",
"\"args\"",
":",
"flags_as_args",
"(",
")",
",",
"\"region\"",
":",
"text_encoder",
".",
"native_to_unicode",
"(",
"default_region",
"(",
")",
")",
",",
"\"runtimeVersion\"",
":",
"RUNTIME_VERSION",
",",
"\"pythonVersion\"",
":",
"\"3.5\"",
"if",
"sys",
".",
"version_info",
".",
"major",
"==",
"3",
"else",
"\"2.7\"",
",",
"\"jobDir\"",
":",
"FLAGS",
".",
"output_dir",
",",
"\"scaleTier\"",
":",
"\"CUSTOM\"",
",",
"\"masterType\"",
":",
"FLAGS",
".",
"cloud_mlengine_master_type",
"or",
"get_default_master_type",
"(",
"num_gpus",
"=",
"FLAGS",
".",
"worker_gpu",
")",
"}",
"if",
"FLAGS",
".",
"use_tpu",
":",
"training_input",
"[",
"\"masterType\"",
"]",
"=",
"(",
"FLAGS",
".",
"cloud_mlengine_master_type",
"or",
"\"standard\"",
")",
"training_input",
"[",
"\"workerType\"",
"]",
"=",
"\"cloud_tpu\"",
"training_input",
"[",
"\"workerCount\"",
"]",
"=",
"1",
"if",
"FLAGS",
".",
"hparams_range",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Configuring hyperparameter tuning.\"",
")",
"training_input",
"[",
"\"hyperparameters\"",
"]",
"=",
"configure_autotune",
"(",
"FLAGS",
".",
"hparams_range",
",",
"FLAGS",
".",
"autotune_objective",
",",
"FLAGS",
".",
"autotune_maximize",
",",
"FLAGS",
".",
"autotune_max_trials",
",",
"FLAGS",
".",
"autotune_parallel_trials",
",",
")",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y%m%d_%H%M%S\"",
")",
"job_spec",
"=",
"{",
"\"jobId\"",
":",
"\"%s_%s_t2t_%s\"",
"%",
"(",
"FLAGS",
".",
"model",
",",
"FLAGS",
".",
"problem",
",",
"timestamp",
")",
",",
"\"labels\"",
":",
"{",
"\"model\"",
":",
"FLAGS",
".",
"model",
",",
"\"problem\"",
":",
"FLAGS",
".",
"problem",
",",
"\"hparams\"",
":",
"FLAGS",
".",
"hparams_set",
"}",
",",
"\"trainingInput\"",
":",
"training_input",
",",
"}",
"return",
"job_spec"
] |
Construct jobSpec for ML Engine job.
|
[
"Construct",
"jobSpec",
"for",
"ML",
"Engine",
"job",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L130-L170
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
launch_job
|
def launch_job(job_spec):
"""Launch job on ML Engine."""
project_id = "projects/{}".format(
text_encoder.native_to_unicode(default_project()))
credentials = GoogleCredentials.get_application_default()
cloudml = discovery.build("ml", "v1", credentials=credentials,
cache_discovery=False)
request = cloudml.projects().jobs().create(body=job_spec, parent=project_id)
request.execute()
|
python
|
def launch_job(job_spec):
"""Launch job on ML Engine."""
project_id = "projects/{}".format(
text_encoder.native_to_unicode(default_project()))
credentials = GoogleCredentials.get_application_default()
cloudml = discovery.build("ml", "v1", credentials=credentials,
cache_discovery=False)
request = cloudml.projects().jobs().create(body=job_spec, parent=project_id)
request.execute()
|
[
"def",
"launch_job",
"(",
"job_spec",
")",
":",
"project_id",
"=",
"\"projects/{}\"",
".",
"format",
"(",
"text_encoder",
".",
"native_to_unicode",
"(",
"default_project",
"(",
")",
")",
")",
"credentials",
"=",
"GoogleCredentials",
".",
"get_application_default",
"(",
")",
"cloudml",
"=",
"discovery",
".",
"build",
"(",
"\"ml\"",
",",
"\"v1\"",
",",
"credentials",
"=",
"credentials",
",",
"cache_discovery",
"=",
"False",
")",
"request",
"=",
"cloudml",
".",
"projects",
"(",
")",
".",
"jobs",
"(",
")",
".",
"create",
"(",
"body",
"=",
"job_spec",
",",
"parent",
"=",
"project_id",
")",
"request",
".",
"execute",
"(",
")"
] |
Launch job on ML Engine.
|
[
"Launch",
"job",
"on",
"ML",
"Engine",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L173-L181
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
_tar_and_copy
|
def _tar_and_copy(src_dir, target_dir):
"""Tar and gzip src_dir and copy to GCS target_dir."""
src_dir = src_dir.rstrip("/")
target_dir = target_dir.rstrip("/")
tmp_dir = tempfile.gettempdir().rstrip("/")
src_base = os.path.basename(src_dir)
shell_run(
"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .",
src_dir=src_dir,
src_base=src_base,
tmp_dir=tmp_dir)
final_destination = "%s/%s.tar.gz" % (target_dir, src_base)
shell_run(
("gsutil cp {tmp_dir}/{src_base}.tar.gz "
"{final_destination}"),
tmp_dir=tmp_dir,
src_base=src_base,
final_destination=final_destination)
return final_destination
|
python
|
def _tar_and_copy(src_dir, target_dir):
"""Tar and gzip src_dir and copy to GCS target_dir."""
src_dir = src_dir.rstrip("/")
target_dir = target_dir.rstrip("/")
tmp_dir = tempfile.gettempdir().rstrip("/")
src_base = os.path.basename(src_dir)
shell_run(
"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .",
src_dir=src_dir,
src_base=src_base,
tmp_dir=tmp_dir)
final_destination = "%s/%s.tar.gz" % (target_dir, src_base)
shell_run(
("gsutil cp {tmp_dir}/{src_base}.tar.gz "
"{final_destination}"),
tmp_dir=tmp_dir,
src_base=src_base,
final_destination=final_destination)
return final_destination
|
[
"def",
"_tar_and_copy",
"(",
"src_dir",
",",
"target_dir",
")",
":",
"src_dir",
"=",
"src_dir",
".",
"rstrip",
"(",
"\"/\"",
")",
"target_dir",
"=",
"target_dir",
".",
"rstrip",
"(",
"\"/\"",
")",
"tmp_dir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
".",
"rstrip",
"(",
"\"/\"",
")",
"src_base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"src_dir",
")",
"shell_run",
"(",
"\"tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .\"",
",",
"src_dir",
"=",
"src_dir",
",",
"src_base",
"=",
"src_base",
",",
"tmp_dir",
"=",
"tmp_dir",
")",
"final_destination",
"=",
"\"%s/%s.tar.gz\"",
"%",
"(",
"target_dir",
",",
"src_base",
")",
"shell_run",
"(",
"(",
"\"gsutil cp {tmp_dir}/{src_base}.tar.gz \"",
"\"{final_destination}\"",
")",
",",
"tmp_dir",
"=",
"tmp_dir",
",",
"src_base",
"=",
"src_base",
",",
"final_destination",
"=",
"final_destination",
")",
"return",
"final_destination"
] |
Tar and gzip src_dir and copy to GCS target_dir.
|
[
"Tar",
"and",
"gzip",
"src_dir",
"and",
"copy",
"to",
"GCS",
"target_dir",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L184-L202
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
tar_and_copy_t2t
|
def tar_and_copy_t2t(train_dir):
"""Tar Tensor2Tensor and cp to train_dir."""
tf.logging.info("Tarring and pushing local Tensor2Tensor package.")
output = text_encoder.native_to_unicode(shell_output(
"pip show tensor2tensor")).split("\n")
assert output[1].startswith("Version")
assert output[7].startswith("Location")
t2t_version = output[1].split(":")[1].strip()
t2t_dir = output[7].split(":")[1].strip()
# A local installation cloned from GitHub will have a setup.py file and a docs
# folder
is_local_t2t = all([
tf.gfile.Exists(os.path.join(t2t_dir, fname))
for fname in ["setup.py", "docs/cloud_mlengine.md"]
])
if is_local_t2t:
tf.logging.info("Found local T2T installation. Tarring directory %s",
t2t_dir)
else:
# PyPI installation
# Create a folder with just a setup.py file pointing to the right version
tf.logging.info("Found PyPI T2T installation. Launching tensor2tensor==%s",
t2t_version)
t2t_dir = os.path.join(tempfile.gettempdir(), "tensor2tensor_tmp")
shutil.rmtree(t2t_dir, ignore_errors=True)
os.mkdir(t2t_dir)
setup_fname = os.path.join(t2t_dir, "setup.py")
setup_file_str = get_setup_file(
name="DummyT2TPackage",
packages=["tensor2tensor==%s" % t2t_version]
)
with tf.gfile.Open(setup_fname, "w") as f:
f.write(setup_file_str)
t2t_tar = _tar_and_copy(t2t_dir, train_dir)
return t2t_tar
|
python
|
def tar_and_copy_t2t(train_dir):
"""Tar Tensor2Tensor and cp to train_dir."""
tf.logging.info("Tarring and pushing local Tensor2Tensor package.")
output = text_encoder.native_to_unicode(shell_output(
"pip show tensor2tensor")).split("\n")
assert output[1].startswith("Version")
assert output[7].startswith("Location")
t2t_version = output[1].split(":")[1].strip()
t2t_dir = output[7].split(":")[1].strip()
# A local installation cloned from GitHub will have a setup.py file and a docs
# folder
is_local_t2t = all([
tf.gfile.Exists(os.path.join(t2t_dir, fname))
for fname in ["setup.py", "docs/cloud_mlengine.md"]
])
if is_local_t2t:
tf.logging.info("Found local T2T installation. Tarring directory %s",
t2t_dir)
else:
# PyPI installation
# Create a folder with just a setup.py file pointing to the right version
tf.logging.info("Found PyPI T2T installation. Launching tensor2tensor==%s",
t2t_version)
t2t_dir = os.path.join(tempfile.gettempdir(), "tensor2tensor_tmp")
shutil.rmtree(t2t_dir, ignore_errors=True)
os.mkdir(t2t_dir)
setup_fname = os.path.join(t2t_dir, "setup.py")
setup_file_str = get_setup_file(
name="DummyT2TPackage",
packages=["tensor2tensor==%s" % t2t_version]
)
with tf.gfile.Open(setup_fname, "w") as f:
f.write(setup_file_str)
t2t_tar = _tar_and_copy(t2t_dir, train_dir)
return t2t_tar
|
[
"def",
"tar_and_copy_t2t",
"(",
"train_dir",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Tarring and pushing local Tensor2Tensor package.\"",
")",
"output",
"=",
"text_encoder",
".",
"native_to_unicode",
"(",
"shell_output",
"(",
"\"pip show tensor2tensor\"",
")",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"assert",
"output",
"[",
"1",
"]",
".",
"startswith",
"(",
"\"Version\"",
")",
"assert",
"output",
"[",
"7",
"]",
".",
"startswith",
"(",
"\"Location\"",
")",
"t2t_version",
"=",
"output",
"[",
"1",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"t2t_dir",
"=",
"output",
"[",
"7",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"# A local installation cloned from GitHub will have a setup.py file and a docs",
"# folder",
"is_local_t2t",
"=",
"all",
"(",
"[",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"t2t_dir",
",",
"fname",
")",
")",
"for",
"fname",
"in",
"[",
"\"setup.py\"",
",",
"\"docs/cloud_mlengine.md\"",
"]",
"]",
")",
"if",
"is_local_t2t",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Found local T2T installation. Tarring directory %s\"",
",",
"t2t_dir",
")",
"else",
":",
"# PyPI installation",
"# Create a folder with just a setup.py file pointing to the right version",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Found PyPI T2T installation. Launching tensor2tensor==%s\"",
",",
"t2t_version",
")",
"t2t_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"\"tensor2tensor_tmp\"",
")",
"shutil",
".",
"rmtree",
"(",
"t2t_dir",
",",
"ignore_errors",
"=",
"True",
")",
"os",
".",
"mkdir",
"(",
"t2t_dir",
")",
"setup_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"t2t_dir",
",",
"\"setup.py\"",
")",
"setup_file_str",
"=",
"get_setup_file",
"(",
"name",
"=",
"\"DummyT2TPackage\"",
",",
"packages",
"=",
"[",
"\"tensor2tensor==%s\"",
"%",
"t2t_version",
"]",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"setup_fname",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"setup_file_str",
")",
"t2t_tar",
"=",
"_tar_and_copy",
"(",
"t2t_dir",
",",
"train_dir",
")",
"return",
"t2t_tar"
] |
Tar Tensor2Tensor and cp to train_dir.
|
[
"Tar",
"Tensor2Tensor",
"and",
"cp",
"to",
"train_dir",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L205-L242
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
tar_and_copy_usr_dir
|
def tar_and_copy_usr_dir(usr_dir, train_dir):
"""Package, tar, and copy usr_dir to GCS train_dir."""
tf.logging.info("Tarring and pushing t2t_usr_dir.")
usr_dir = os.path.abspath(os.path.expanduser(usr_dir))
# Copy usr dir to a temp location
top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container")
tmp_usr_dir = os.path.join(top_dir, usr_dir_lib.INTERNAL_USR_DIR_PACKAGE)
shutil.rmtree(top_dir, ignore_errors=True)
shutil.copytree(usr_dir, tmp_usr_dir)
# Insert setup.py if one does not exist
top_setup_fname = os.path.join(top_dir, "setup.py")
setup_file_str = get_setup_file(
name="DummyUsrDirPackage",
packages=get_requirements(usr_dir)
)
with tf.gfile.Open(top_setup_fname, "w") as f:
f.write(setup_file_str)
usr_tar = _tar_and_copy(top_dir, train_dir)
return usr_tar
|
python
|
def tar_and_copy_usr_dir(usr_dir, train_dir):
"""Package, tar, and copy usr_dir to GCS train_dir."""
tf.logging.info("Tarring and pushing t2t_usr_dir.")
usr_dir = os.path.abspath(os.path.expanduser(usr_dir))
# Copy usr dir to a temp location
top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container")
tmp_usr_dir = os.path.join(top_dir, usr_dir_lib.INTERNAL_USR_DIR_PACKAGE)
shutil.rmtree(top_dir, ignore_errors=True)
shutil.copytree(usr_dir, tmp_usr_dir)
# Insert setup.py if one does not exist
top_setup_fname = os.path.join(top_dir, "setup.py")
setup_file_str = get_setup_file(
name="DummyUsrDirPackage",
packages=get_requirements(usr_dir)
)
with tf.gfile.Open(top_setup_fname, "w") as f:
f.write(setup_file_str)
usr_tar = _tar_and_copy(top_dir, train_dir)
return usr_tar
|
[
"def",
"tar_and_copy_usr_dir",
"(",
"usr_dir",
",",
"train_dir",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Tarring and pushing t2t_usr_dir.\"",
")",
"usr_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"usr_dir",
")",
")",
"# Copy usr dir to a temp location",
"top_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"\"t2t_usr_container\"",
")",
"tmp_usr_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"top_dir",
",",
"usr_dir_lib",
".",
"INTERNAL_USR_DIR_PACKAGE",
")",
"shutil",
".",
"rmtree",
"(",
"top_dir",
",",
"ignore_errors",
"=",
"True",
")",
"shutil",
".",
"copytree",
"(",
"usr_dir",
",",
"tmp_usr_dir",
")",
"# Insert setup.py if one does not exist",
"top_setup_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"top_dir",
",",
"\"setup.py\"",
")",
"setup_file_str",
"=",
"get_setup_file",
"(",
"name",
"=",
"\"DummyUsrDirPackage\"",
",",
"packages",
"=",
"get_requirements",
"(",
"usr_dir",
")",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"top_setup_fname",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"setup_file_str",
")",
"usr_tar",
"=",
"_tar_and_copy",
"(",
"top_dir",
",",
"train_dir",
")",
"return",
"usr_tar"
] |
Package, tar, and copy usr_dir to GCS train_dir.
|
[
"Package",
"tar",
"and",
"copy",
"usr_dir",
"to",
"GCS",
"train_dir",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L245-L263
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
validate_flags
|
def validate_flags():
"""Validates flags are set to acceptable values for CloudML Engine runs."""
assert not job_dir()
assert FLAGS.output_dir.startswith("gs://")
assert FLAGS.data_dir.startswith("gs://")
assert FLAGS.worker_replicas <= 1
assert FLAGS.ps_replicas <= 0
if FLAGS.hparams_range:
assert FLAGS.autotune_objective
if FLAGS.worker_gpu:
assert FLAGS.worker_gpu in [1, 4, 8]
if FLAGS.cloud_mlengine_master_type:
if FLAGS.worker_gpu:
if FLAGS.worker_gpu == 1:
assert FLAGS.cloud_mlengine_master_type in ["standard_gpu",
"standard_p100"]
elif FLAGS.worker_gpu == 4:
assert FLAGS.cloud_mlengine_master_type in ["complex_model_m_gpu",
"complex_model_m_p100"]
else:
assert FLAGS.cloud_mlengine_master_type == "complex_model_l_gpu"
else:
assert FLAGS.cloud_mlengine_master_type in ["standard", "large_model",
"complex_model_s",
"complex_model_m",
"complex_model_l"]
|
python
|
def validate_flags():
"""Validates flags are set to acceptable values for CloudML Engine runs."""
assert not job_dir()
assert FLAGS.output_dir.startswith("gs://")
assert FLAGS.data_dir.startswith("gs://")
assert FLAGS.worker_replicas <= 1
assert FLAGS.ps_replicas <= 0
if FLAGS.hparams_range:
assert FLAGS.autotune_objective
if FLAGS.worker_gpu:
assert FLAGS.worker_gpu in [1, 4, 8]
if FLAGS.cloud_mlengine_master_type:
if FLAGS.worker_gpu:
if FLAGS.worker_gpu == 1:
assert FLAGS.cloud_mlengine_master_type in ["standard_gpu",
"standard_p100"]
elif FLAGS.worker_gpu == 4:
assert FLAGS.cloud_mlengine_master_type in ["complex_model_m_gpu",
"complex_model_m_p100"]
else:
assert FLAGS.cloud_mlengine_master_type == "complex_model_l_gpu"
else:
assert FLAGS.cloud_mlengine_master_type in ["standard", "large_model",
"complex_model_s",
"complex_model_m",
"complex_model_l"]
|
[
"def",
"validate_flags",
"(",
")",
":",
"assert",
"not",
"job_dir",
"(",
")",
"assert",
"FLAGS",
".",
"output_dir",
".",
"startswith",
"(",
"\"gs://\"",
")",
"assert",
"FLAGS",
".",
"data_dir",
".",
"startswith",
"(",
"\"gs://\"",
")",
"assert",
"FLAGS",
".",
"worker_replicas",
"<=",
"1",
"assert",
"FLAGS",
".",
"ps_replicas",
"<=",
"0",
"if",
"FLAGS",
".",
"hparams_range",
":",
"assert",
"FLAGS",
".",
"autotune_objective",
"if",
"FLAGS",
".",
"worker_gpu",
":",
"assert",
"FLAGS",
".",
"worker_gpu",
"in",
"[",
"1",
",",
"4",
",",
"8",
"]",
"if",
"FLAGS",
".",
"cloud_mlengine_master_type",
":",
"if",
"FLAGS",
".",
"worker_gpu",
":",
"if",
"FLAGS",
".",
"worker_gpu",
"==",
"1",
":",
"assert",
"FLAGS",
".",
"cloud_mlengine_master_type",
"in",
"[",
"\"standard_gpu\"",
",",
"\"standard_p100\"",
"]",
"elif",
"FLAGS",
".",
"worker_gpu",
"==",
"4",
":",
"assert",
"FLAGS",
".",
"cloud_mlengine_master_type",
"in",
"[",
"\"complex_model_m_gpu\"",
",",
"\"complex_model_m_p100\"",
"]",
"else",
":",
"assert",
"FLAGS",
".",
"cloud_mlengine_master_type",
"==",
"\"complex_model_l_gpu\"",
"else",
":",
"assert",
"FLAGS",
".",
"cloud_mlengine_master_type",
"in",
"[",
"\"standard\"",
",",
"\"large_model\"",
",",
"\"complex_model_s\"",
",",
"\"complex_model_m\"",
",",
"\"complex_model_l\"",
"]"
] |
Validates flags are set to acceptable values for CloudML Engine runs.
|
[
"Validates",
"flags",
"are",
"set",
"to",
"acceptable",
"values",
"for",
"CloudML",
"Engine",
"runs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L298-L323
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/cloud_mlengine.py
|
launch
|
def launch():
"""Launch t2t_trainer on Cloud ML Engine."""
validate_flags()
job_spec = configure_job()
job_name = job_spec["jobId"]
tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name,
pprint.pformat(job_spec))
assert confirm()
train_dir = FLAGS.output_dir
t2t_tar = tar_and_copy_t2t(train_dir)
configure_trainer_package(job_spec, t2t_tar)
if FLAGS.t2t_usr_dir:
usr_tar = tar_and_copy_usr_dir(FLAGS.t2t_usr_dir, train_dir)
configure_usr_dir(job_spec, usr_tar)
launch_job(job_spec)
tf.logging.info("Launched %s. See console to track: %s.", job_name,
CONSOLE_URL)
tf.logging.info("Interact with the training job from the command line:")
tf.logging.info("Abort job: gcloud ml-engine jobs cancel %s", job_name)
tf.logging.info("Stream logs: gcloud ml-engine jobs stream-logs %s", job_name)
tf.logging.info("Open tensorboard: tensorboard --logdir %s", train_dir)
|
python
|
def launch():
"""Launch t2t_trainer on Cloud ML Engine."""
validate_flags()
job_spec = configure_job()
job_name = job_spec["jobId"]
tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name,
pprint.pformat(job_spec))
assert confirm()
train_dir = FLAGS.output_dir
t2t_tar = tar_and_copy_t2t(train_dir)
configure_trainer_package(job_spec, t2t_tar)
if FLAGS.t2t_usr_dir:
usr_tar = tar_and_copy_usr_dir(FLAGS.t2t_usr_dir, train_dir)
configure_usr_dir(job_spec, usr_tar)
launch_job(job_spec)
tf.logging.info("Launched %s. See console to track: %s.", job_name,
CONSOLE_URL)
tf.logging.info("Interact with the training job from the command line:")
tf.logging.info("Abort job: gcloud ml-engine jobs cancel %s", job_name)
tf.logging.info("Stream logs: gcloud ml-engine jobs stream-logs %s", job_name)
tf.logging.info("Open tensorboard: tensorboard --logdir %s", train_dir)
|
[
"def",
"launch",
"(",
")",
":",
"validate_flags",
"(",
")",
"job_spec",
"=",
"configure_job",
"(",
")",
"job_name",
"=",
"job_spec",
"[",
"\"jobId\"",
"]",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Launching job %s with ML Engine spec:\\n%s\"",
",",
"job_name",
",",
"pprint",
".",
"pformat",
"(",
"job_spec",
")",
")",
"assert",
"confirm",
"(",
")",
"train_dir",
"=",
"FLAGS",
".",
"output_dir",
"t2t_tar",
"=",
"tar_and_copy_t2t",
"(",
"train_dir",
")",
"configure_trainer_package",
"(",
"job_spec",
",",
"t2t_tar",
")",
"if",
"FLAGS",
".",
"t2t_usr_dir",
":",
"usr_tar",
"=",
"tar_and_copy_usr_dir",
"(",
"FLAGS",
".",
"t2t_usr_dir",
",",
"train_dir",
")",
"configure_usr_dir",
"(",
"job_spec",
",",
"usr_tar",
")",
"launch_job",
"(",
"job_spec",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Launched %s. See console to track: %s.\"",
",",
"job_name",
",",
"CONSOLE_URL",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Interact with the training job from the command line:\"",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Abort job: gcloud ml-engine jobs cancel %s\"",
",",
"job_name",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Stream logs: gcloud ml-engine jobs stream-logs %s\"",
",",
"job_name",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Open tensorboard: tensorboard --logdir %s\"",
",",
"train_dir",
")"
] |
Launch t2t_trainer on Cloud ML Engine.
|
[
"Launch",
"t2t_trainer",
"on",
"Cloud",
"ML",
"Engine",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/cloud_mlengine.py#L331-L351
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/bayes.py
|
add_weight
|
def add_weight(cls):
"""Decorator for Layers, overriding add_weight for trainable initializers."""
@functools.wraps(cls.add_weight)
def _add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
**kwargs):
"""Adds weight."""
if isinstance(initializer, tf.keras.layers.Layer):
weight = initializer(shape, dtype)
self._trainable_weights.extend(initializer.trainable_weights) # pylint: disable=protected-access
self._non_trainable_weights.extend(initializer.non_trainable_weights) # pylint: disable=protected-access
if regularizer is not None:
# TODO(trandustin): Replace need for this with
# Layer._handle_weight_regularization. For Eager compatibility, random
# variable __init__s cannot apply TF ops (cl/220898007).
def loss_fn():
"""Creates a regularization loss `Tensor`."""
with tf.name_scope(name + '/Regularizer'):
return regularizer(initializer(shape, dtype))
self.add_loss(loss_fn)
return weight
return super(cls, self).add_weight(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
**kwargs)
cls.add_weight = _add_weight
return cls
|
python
|
def add_weight(cls):
"""Decorator for Layers, overriding add_weight for trainable initializers."""
@functools.wraps(cls.add_weight)
def _add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
**kwargs):
"""Adds weight."""
if isinstance(initializer, tf.keras.layers.Layer):
weight = initializer(shape, dtype)
self._trainable_weights.extend(initializer.trainable_weights) # pylint: disable=protected-access
self._non_trainable_weights.extend(initializer.non_trainable_weights) # pylint: disable=protected-access
if regularizer is not None:
# TODO(trandustin): Replace need for this with
# Layer._handle_weight_regularization. For Eager compatibility, random
# variable __init__s cannot apply TF ops (cl/220898007).
def loss_fn():
"""Creates a regularization loss `Tensor`."""
with tf.name_scope(name + '/Regularizer'):
return regularizer(initializer(shape, dtype))
self.add_loss(loss_fn)
return weight
return super(cls, self).add_weight(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
**kwargs)
cls.add_weight = _add_weight
return cls
|
[
"def",
"add_weight",
"(",
"cls",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"cls",
".",
"add_weight",
")",
"def",
"_add_weight",
"(",
"self",
",",
"name",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"initializer",
"=",
"None",
",",
"regularizer",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Adds weight.\"\"\"",
"if",
"isinstance",
"(",
"initializer",
",",
"tf",
".",
"keras",
".",
"layers",
".",
"Layer",
")",
":",
"weight",
"=",
"initializer",
"(",
"shape",
",",
"dtype",
")",
"self",
".",
"_trainable_weights",
".",
"extend",
"(",
"initializer",
".",
"trainable_weights",
")",
"# pylint: disable=protected-access",
"self",
".",
"_non_trainable_weights",
".",
"extend",
"(",
"initializer",
".",
"non_trainable_weights",
")",
"# pylint: disable=protected-access",
"if",
"regularizer",
"is",
"not",
"None",
":",
"# TODO(trandustin): Replace need for this with",
"# Layer._handle_weight_regularization. For Eager compatibility, random",
"# variable __init__s cannot apply TF ops (cl/220898007).",
"def",
"loss_fn",
"(",
")",
":",
"\"\"\"Creates a regularization loss `Tensor`.\"\"\"",
"with",
"tf",
".",
"name_scope",
"(",
"name",
"+",
"'/Regularizer'",
")",
":",
"return",
"regularizer",
"(",
"initializer",
"(",
"shape",
",",
"dtype",
")",
")",
"self",
".",
"add_loss",
"(",
"loss_fn",
")",
"return",
"weight",
"return",
"super",
"(",
"cls",
",",
"self",
")",
".",
"add_weight",
"(",
"name",
"=",
"name",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"initializer",
",",
"regularizer",
"=",
"regularizer",
",",
"*",
"*",
"kwargs",
")",
"cls",
".",
"add_weight",
"=",
"_add_weight",
"return",
"cls"
] |
Decorator for Layers, overriding add_weight for trainable initializers.
|
[
"Decorator",
"for",
"Layers",
"overriding",
"add_weight",
"for",
"trainable",
"initializers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/bayes.py#L32-L64
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/base_vae.py
|
NextFrameBaseVae.get_beta
|
def get_beta(self, kl_loss=0.0):
"""Get the KL multiplier, either dynamically or schedule based.
if hparams.latent_loss_multiplier_dynamic is set to true, then beta
is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon.
In order to do so, the beta is being updated at each iteration
by taking steps of size hparams.latent_loss_multiplier_alpha.
The same formulation can be retrieved by solving the Lagrangian
with KL < epsilon as a constraint.
Args:
kl_loss: KL loss. Only used for dynamic adjustment.
Returns:
beta: the final value of beta.
"""
if self.hparams.latent_loss_multiplier_dynamic:
beta = tf.Variable(self.hparams.latent_loss_multiplier,
trainable=False, dtype=tf.float32)
alpha = self.hparams.latent_loss_multiplier_alpha
epsilon = self.hparams.latent_loss_multiplier_epsilon
shadow_beta = beta + alpha * (kl_loss - epsilon)
# Caping the beta between 0 and 1. May need to change this later on.
shadow_beta = tf.maximum(shadow_beta, 0.0)
shadow_beta = tf.minimum(shadow_beta, 1.0)
update_op = tf.assign(beta, shadow_beta)
else:
beta = common_video.beta_schedule(
schedule=self.hparams.latent_loss_multiplier_schedule,
global_step=self.get_iteration_num(),
final_beta=self.hparams.latent_loss_multiplier,
decay_start=(self.hparams.num_iterations_1st_stage +
self.hparams.num_iterations_2nd_stage),
decay_end=self.hparams.anneal_end)
update_op = tf.identity(beta) # fake update for regular beta.
with tf.control_dependencies([update_op]):
tf.summary.scalar("beta", beta)
return beta
|
python
|
def get_beta(self, kl_loss=0.0):
"""Get the KL multiplier, either dynamically or schedule based.
if hparams.latent_loss_multiplier_dynamic is set to true, then beta
is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon.
In order to do so, the beta is being updated at each iteration
by taking steps of size hparams.latent_loss_multiplier_alpha.
The same formulation can be retrieved by solving the Lagrangian
with KL < epsilon as a constraint.
Args:
kl_loss: KL loss. Only used for dynamic adjustment.
Returns:
beta: the final value of beta.
"""
if self.hparams.latent_loss_multiplier_dynamic:
beta = tf.Variable(self.hparams.latent_loss_multiplier,
trainable=False, dtype=tf.float32)
alpha = self.hparams.latent_loss_multiplier_alpha
epsilon = self.hparams.latent_loss_multiplier_epsilon
shadow_beta = beta + alpha * (kl_loss - epsilon)
# Caping the beta between 0 and 1. May need to change this later on.
shadow_beta = tf.maximum(shadow_beta, 0.0)
shadow_beta = tf.minimum(shadow_beta, 1.0)
update_op = tf.assign(beta, shadow_beta)
else:
beta = common_video.beta_schedule(
schedule=self.hparams.latent_loss_multiplier_schedule,
global_step=self.get_iteration_num(),
final_beta=self.hparams.latent_loss_multiplier,
decay_start=(self.hparams.num_iterations_1st_stage +
self.hparams.num_iterations_2nd_stage),
decay_end=self.hparams.anneal_end)
update_op = tf.identity(beta) # fake update for regular beta.
with tf.control_dependencies([update_op]):
tf.summary.scalar("beta", beta)
return beta
|
[
"def",
"get_beta",
"(",
"self",
",",
"kl_loss",
"=",
"0.0",
")",
":",
"if",
"self",
".",
"hparams",
".",
"latent_loss_multiplier_dynamic",
":",
"beta",
"=",
"tf",
".",
"Variable",
"(",
"self",
".",
"hparams",
".",
"latent_loss_multiplier",
",",
"trainable",
"=",
"False",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"alpha",
"=",
"self",
".",
"hparams",
".",
"latent_loss_multiplier_alpha",
"epsilon",
"=",
"self",
".",
"hparams",
".",
"latent_loss_multiplier_epsilon",
"shadow_beta",
"=",
"beta",
"+",
"alpha",
"*",
"(",
"kl_loss",
"-",
"epsilon",
")",
"# Caping the beta between 0 and 1. May need to change this later on.",
"shadow_beta",
"=",
"tf",
".",
"maximum",
"(",
"shadow_beta",
",",
"0.0",
")",
"shadow_beta",
"=",
"tf",
".",
"minimum",
"(",
"shadow_beta",
",",
"1.0",
")",
"update_op",
"=",
"tf",
".",
"assign",
"(",
"beta",
",",
"shadow_beta",
")",
"else",
":",
"beta",
"=",
"common_video",
".",
"beta_schedule",
"(",
"schedule",
"=",
"self",
".",
"hparams",
".",
"latent_loss_multiplier_schedule",
",",
"global_step",
"=",
"self",
".",
"get_iteration_num",
"(",
")",
",",
"final_beta",
"=",
"self",
".",
"hparams",
".",
"latent_loss_multiplier",
",",
"decay_start",
"=",
"(",
"self",
".",
"hparams",
".",
"num_iterations_1st_stage",
"+",
"self",
".",
"hparams",
".",
"num_iterations_2nd_stage",
")",
",",
"decay_end",
"=",
"self",
".",
"hparams",
".",
"anneal_end",
")",
"update_op",
"=",
"tf",
".",
"identity",
"(",
"beta",
")",
"# fake update for regular beta.",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"update_op",
"]",
")",
":",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"beta\"",
",",
"beta",
")",
"return",
"beta"
] |
Get the KL multiplier, either dynamically or schedule based.
if hparams.latent_loss_multiplier_dynamic is set to true, then beta
is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon.
In order to do so, the beta is being updated at each iteration
by taking steps of size hparams.latent_loss_multiplier_alpha.
The same formulation can be retrieved by solving the Lagrangian
with KL < epsilon as a constraint.
Args:
kl_loss: KL loss. Only used for dynamic adjustment.
Returns:
beta: the final value of beta.
|
[
"Get",
"the",
"KL",
"multiplier",
"either",
"dynamically",
"or",
"schedule",
"based",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L34-L72
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/base_vae.py
|
NextFrameBaseVae.get_kl_loss
|
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None):
"""Get KL loss for all the predicted Gaussians."""
kl_loss = 0.0
if means_p is None:
means_p = tf.unstack(tf.zeros_like(means))
if log_vars_p is None:
log_vars_p = tf.unstack(tf.zeros_like(log_vars))
enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p))
if self.is_training and self.hparams.stochastic_model:
for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs:
kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p)
tf.summary.histogram("posterior_mean_%d" % i, mean)
tf.summary.histogram("posterior_log_var_%d" % i, log_var)
tf.summary.histogram("prior_mean_%d" % i, mean_p)
tf.summary.histogram("prior_log_var_%d" % i, log_var_p)
tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss))
beta = self.get_beta(kl_loss)
# information capacity from "Understanding disentangling in beta-VAE"
if self.hparams.information_capacity > 0.0:
kl_loss = tf.abs(kl_loss - self.hparams.information_capacity)
return beta * kl_loss
|
python
|
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None):
"""Get KL loss for all the predicted Gaussians."""
kl_loss = 0.0
if means_p is None:
means_p = tf.unstack(tf.zeros_like(means))
if log_vars_p is None:
log_vars_p = tf.unstack(tf.zeros_like(log_vars))
enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p))
if self.is_training and self.hparams.stochastic_model:
for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs:
kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p)
tf.summary.histogram("posterior_mean_%d" % i, mean)
tf.summary.histogram("posterior_log_var_%d" % i, log_var)
tf.summary.histogram("prior_mean_%d" % i, mean_p)
tf.summary.histogram("prior_log_var_%d" % i, log_var_p)
tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss))
beta = self.get_beta(kl_loss)
# information capacity from "Understanding disentangling in beta-VAE"
if self.hparams.information_capacity > 0.0:
kl_loss = tf.abs(kl_loss - self.hparams.information_capacity)
return beta * kl_loss
|
[
"def",
"get_kl_loss",
"(",
"self",
",",
"means",
",",
"log_vars",
",",
"means_p",
"=",
"None",
",",
"log_vars_p",
"=",
"None",
")",
":",
"kl_loss",
"=",
"0.0",
"if",
"means_p",
"is",
"None",
":",
"means_p",
"=",
"tf",
".",
"unstack",
"(",
"tf",
".",
"zeros_like",
"(",
"means",
")",
")",
"if",
"log_vars_p",
"is",
"None",
":",
"log_vars_p",
"=",
"tf",
".",
"unstack",
"(",
"tf",
".",
"zeros_like",
"(",
"log_vars",
")",
")",
"enumerated_inputs",
"=",
"enumerate",
"(",
"zip",
"(",
"means",
",",
"log_vars",
",",
"means_p",
",",
"log_vars_p",
")",
")",
"if",
"self",
".",
"is_training",
"and",
"self",
".",
"hparams",
".",
"stochastic_model",
":",
"for",
"i",
",",
"(",
"mean",
",",
"log_var",
",",
"mean_p",
",",
"log_var_p",
")",
"in",
"enumerated_inputs",
":",
"kl_loss",
"+=",
"common_layers",
".",
"kl_divergence",
"(",
"mean",
",",
"log_var",
",",
"mean_p",
",",
"log_var_p",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"posterior_mean_%d\"",
"%",
"i",
",",
"mean",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"posterior_log_var_%d\"",
"%",
"i",
",",
"log_var",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"prior_mean_%d\"",
"%",
"i",
",",
"mean_p",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"prior_log_var_%d\"",
"%",
"i",
",",
"log_var_p",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"kl_raw\"",
",",
"tf",
".",
"reduce_mean",
"(",
"kl_loss",
")",
")",
"beta",
"=",
"self",
".",
"get_beta",
"(",
"kl_loss",
")",
"# information capacity from \"Understanding disentangling in beta-VAE\"",
"if",
"self",
".",
"hparams",
".",
"information_capacity",
">",
"0.0",
":",
"kl_loss",
"=",
"tf",
".",
"abs",
"(",
"kl_loss",
"-",
"self",
".",
"hparams",
".",
"information_capacity",
")",
"return",
"beta",
"*",
"kl_loss"
] |
Get KL loss for all the predicted Gaussians.
|
[
"Get",
"KL",
"loss",
"for",
"all",
"the",
"predicted",
"Gaussians",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L74-L95
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/video/base_vae.py
|
NextFrameBaseVae.construct_latent_tower
|
def construct_latent_tower(self, images, time_axis):
"""Create the latent tower."""
# No latent in the first phase
first_phase = tf.less(
self.get_iteration_num(), self.hparams.num_iterations_1st_stage)
# use all frames by default but this allows more
# predicted frames at inference time
latent_num_frames = self.hparams.latent_num_frames
tf.logging.info("Creating latent tower with %d frames." % latent_num_frames)
if latent_num_frames > 0:
images = images[:, :latent_num_frames]
return common_video.conv_latent_tower(
images=images,
time_axis=time_axis,
latent_channels=self.hparams.latent_channels,
min_logvar=self.hparams.latent_std_min,
is_training=self.is_training,
random_latent=first_phase,
tiny_mode=self.hparams.tiny_mode,
small_mode=self.hparams.small_mode)
|
python
|
def construct_latent_tower(self, images, time_axis):
"""Create the latent tower."""
# No latent in the first phase
first_phase = tf.less(
self.get_iteration_num(), self.hparams.num_iterations_1st_stage)
# use all frames by default but this allows more
# predicted frames at inference time
latent_num_frames = self.hparams.latent_num_frames
tf.logging.info("Creating latent tower with %d frames." % latent_num_frames)
if latent_num_frames > 0:
images = images[:, :latent_num_frames]
return common_video.conv_latent_tower(
images=images,
time_axis=time_axis,
latent_channels=self.hparams.latent_channels,
min_logvar=self.hparams.latent_std_min,
is_training=self.is_training,
random_latent=first_phase,
tiny_mode=self.hparams.tiny_mode,
small_mode=self.hparams.small_mode)
|
[
"def",
"construct_latent_tower",
"(",
"self",
",",
"images",
",",
"time_axis",
")",
":",
"# No latent in the first phase",
"first_phase",
"=",
"tf",
".",
"less",
"(",
"self",
".",
"get_iteration_num",
"(",
")",
",",
"self",
".",
"hparams",
".",
"num_iterations_1st_stage",
")",
"# use all frames by default but this allows more",
"# predicted frames at inference time",
"latent_num_frames",
"=",
"self",
".",
"hparams",
".",
"latent_num_frames",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Creating latent tower with %d frames.\"",
"%",
"latent_num_frames",
")",
"if",
"latent_num_frames",
">",
"0",
":",
"images",
"=",
"images",
"[",
":",
",",
":",
"latent_num_frames",
"]",
"return",
"common_video",
".",
"conv_latent_tower",
"(",
"images",
"=",
"images",
",",
"time_axis",
"=",
"time_axis",
",",
"latent_channels",
"=",
"self",
".",
"hparams",
".",
"latent_channels",
",",
"min_logvar",
"=",
"self",
".",
"hparams",
".",
"latent_std_min",
",",
"is_training",
"=",
"self",
".",
"is_training",
",",
"random_latent",
"=",
"first_phase",
",",
"tiny_mode",
"=",
"self",
".",
"hparams",
".",
"tiny_mode",
",",
"small_mode",
"=",
"self",
".",
"hparams",
".",
"small_mode",
")"
] |
Create the latent tower.
|
[
"Create",
"the",
"latent",
"tower",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/base_vae.py#L97-L118
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_encode
|
def transformer_encode(encoder_function, inputs, target_space, hparams,
attention_weights=None, features=None, losses=None,
**kwargs):
"""Encode transformer inputs.
Args:
encoder_function: the encoder function
inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which
will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to encoder_function
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
"""
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
attn_bias_for_padding = None
# Otherwise the encoder will just use encoder_self_attention_bias.
if hparams.unidirectional_encoder:
attn_bias_for_padding = encoder_decoder_attention_bias
encoder_output = encoder_function(
encoder_input,
self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"),
save_weights_to=attention_weights,
make_image_summary=not common_layers.is_xla_compiled(),
losses=losses,
attn_bias_for_padding=attn_bias_for_padding,
**kwargs)
return encoder_output, encoder_decoder_attention_bias
|
python
|
def transformer_encode(encoder_function, inputs, target_space, hparams,
attention_weights=None, features=None, losses=None,
**kwargs):
"""Encode transformer inputs.
Args:
encoder_function: the encoder function
inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which
will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to encoder_function
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
"""
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
attn_bias_for_padding = None
# Otherwise the encoder will just use encoder_self_attention_bias.
if hparams.unidirectional_encoder:
attn_bias_for_padding = encoder_decoder_attention_bias
encoder_output = encoder_function(
encoder_input,
self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"),
save_weights_to=attention_weights,
make_image_summary=not common_layers.is_xla_compiled(),
losses=losses,
attn_bias_for_padding=attn_bias_for_padding,
**kwargs)
return encoder_output, encoder_decoder_attention_bias
|
[
"def",
"transformer_encode",
"(",
"encoder_function",
",",
"inputs",
",",
"target_space",
",",
"hparams",
",",
"attention_weights",
"=",
"None",
",",
"features",
"=",
"None",
",",
"losses",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"inputs",
"=",
"common_layers",
".",
"flatten4d3d",
"(",
"inputs",
")",
"encoder_input",
",",
"self_attention_bias",
",",
"encoder_decoder_attention_bias",
"=",
"(",
"transformer_prepare_encoder",
"(",
"inputs",
",",
"target_space",
",",
"hparams",
",",
"features",
"=",
"features",
")",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_LAYER_POSTPROCESS_DROPOUT",
",",
"value",
"=",
"hparams",
".",
"layer_prepostprocess_dropout",
",",
"hparams",
"=",
"hparams",
")",
"encoder_input",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"encoder_input",
",",
"1.0",
"-",
"hparams",
".",
"layer_prepostprocess_dropout",
")",
"attn_bias_for_padding",
"=",
"None",
"# Otherwise the encoder will just use encoder_self_attention_bias.",
"if",
"hparams",
".",
"unidirectional_encoder",
":",
"attn_bias_for_padding",
"=",
"encoder_decoder_attention_bias",
"encoder_output",
"=",
"encoder_function",
"(",
"encoder_input",
",",
"self_attention_bias",
",",
"hparams",
",",
"nonpadding",
"=",
"features_to_nonpadding",
"(",
"features",
",",
"\"inputs\"",
")",
",",
"save_weights_to",
"=",
"attention_weights",
",",
"make_image_summary",
"=",
"not",
"common_layers",
".",
"is_xla_compiled",
"(",
")",
",",
"losses",
"=",
"losses",
",",
"attn_bias_for_padding",
"=",
"attn_bias_for_padding",
",",
"*",
"*",
"kwargs",
")",
"return",
"encoder_output",
",",
"encoder_decoder_attention_bias"
] |
Encode transformer inputs.
Args:
encoder_function: the encoder function
inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which
will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to encoder_function
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
|
[
"Encode",
"transformer",
"inputs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L57-L111
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_decode
|
def transformer_decode(decoder_function,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
attention_weights=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs from encoder representation.
Args:
decoder_function: the decoder function
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to decoder_function
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = decoder_function(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=attention_weights,
losses=losses,
**kwargs)
if (common_layers.is_xla_compiled() and
hparams.mode == tf.estimator.ModeKeys.TRAIN):
# TPU does not react kindly to extra dimensions.
# TODO(noam): remove this once TPU is more forgiving of extra dims.
return decoder_output
else:
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2)
|
python
|
def transformer_decode(decoder_function,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
attention_weights=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs from encoder representation.
Args:
decoder_function: the decoder function
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to decoder_function
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = decoder_function(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=attention_weights,
losses=losses,
**kwargs)
if (common_layers.is_xla_compiled() and
hparams.mode == tf.estimator.ModeKeys.TRAIN):
# TPU does not react kindly to extra dimensions.
# TODO(noam): remove this once TPU is more forgiving of extra dims.
return decoder_output
else:
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2)
|
[
"def",
"transformer_decode",
"(",
"decoder_function",
",",
"decoder_input",
",",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"decoder_self_attention_bias",
",",
"hparams",
",",
"attention_weights",
"=",
"None",
",",
"cache",
"=",
"None",
",",
"decode_loop_step",
"=",
"None",
",",
"nonpadding",
"=",
"None",
",",
"losses",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_LAYER_POSTPROCESS_DROPOUT",
",",
"value",
"=",
"hparams",
".",
"layer_prepostprocess_dropout",
",",
"hparams",
"=",
"hparams",
")",
"decoder_input",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"decoder_input",
",",
"1.0",
"-",
"hparams",
".",
"layer_prepostprocess_dropout",
")",
"decoder_output",
"=",
"decoder_function",
"(",
"decoder_input",
",",
"encoder_output",
",",
"decoder_self_attention_bias",
",",
"encoder_decoder_attention_bias",
",",
"hparams",
",",
"cache",
"=",
"cache",
",",
"decode_loop_step",
"=",
"decode_loop_step",
",",
"nonpadding",
"=",
"nonpadding",
",",
"save_weights_to",
"=",
"attention_weights",
",",
"losses",
"=",
"losses",
",",
"*",
"*",
"kwargs",
")",
"if",
"(",
"common_layers",
".",
"is_xla_compiled",
"(",
")",
"and",
"hparams",
".",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
":",
"# TPU does not react kindly to extra dimensions.",
"# TODO(noam): remove this once TPU is more forgiving of extra dims.",
"return",
"decoder_output",
"else",
":",
"# Expand since t2t expects 4d tensors.",
"return",
"tf",
".",
"expand_dims",
"(",
"decoder_output",
",",
"axis",
"=",
"2",
")"
] |
Decode Transformer outputs from encoder representation.
Args:
decoder_function: the decoder function
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to decoder_function
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
|
[
"Decode",
"Transformer",
"outputs",
"from",
"encoder",
"representation",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L114-L178
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
_init_transformer_cache
|
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Transformer fast decoding."""
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
"k":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
key_channels]), hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
value_channels]), hparams.num_heads),
} for layer in range(num_layers)
})
# If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the
# cache key "f" won't be used, which means that the` shape of cache["f"]`
# won't be changed to
# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause
# error when applying `nest.map reshape function` on it.
if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]:
for layer in range(num_layers):
cache["layer_%d" % layer]["f"] = tf.zeros(
[batch_size, 0, hparams.hidden_size])
if encoder_output is not None:
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(
"%sdecoder/%s/encdec_attention/multihead_attention" %
(scope_prefix, layer_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name]["k_encdec"] = k_encdec
cache[layer_name]["v_encdec"] = v_encdec
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache
|
python
|
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Transformer fast decoding."""
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
"k":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
key_channels]), hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
value_channels]), hparams.num_heads),
} for layer in range(num_layers)
})
# If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the
# cache key "f" won't be used, which means that the` shape of cache["f"]`
# won't be changed to
# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause
# error when applying `nest.map reshape function` on it.
if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]:
for layer in range(num_layers):
cache["layer_%d" % layer]["f"] = tf.zeros(
[batch_size, 0, hparams.hidden_size])
if encoder_output is not None:
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(
"%sdecoder/%s/encdec_attention/multihead_attention" %
(scope_prefix, layer_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name]["k_encdec"] = k_encdec
cache[layer_name]["v_encdec"] = v_encdec
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache
|
[
"def",
"_init_transformer_cache",
"(",
"cache",
",",
"hparams",
",",
"batch_size",
",",
"attention_init_length",
",",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"scope_prefix",
")",
":",
"key_channels",
"=",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
"value_channels",
"=",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
"num_layers",
"=",
"hparams",
".",
"num_decoder_layers",
"or",
"hparams",
".",
"num_hidden_layers",
"vars_3d_num_heads",
"=",
"(",
"hparams",
".",
"num_heads",
"if",
"hparams",
".",
"get",
"(",
"\"attention_variables_3d\"",
")",
"else",
"0",
")",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"{",
"}",
"cache",
".",
"update",
"(",
"{",
"\"layer_%d\"",
"%",
"layer",
":",
"{",
"# pylint: disable=g-complex-comprehension",
"\"k\"",
":",
"common_attention",
".",
"split_heads",
"(",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"attention_init_length",
",",
"key_channels",
"]",
")",
",",
"hparams",
".",
"num_heads",
")",
",",
"\"v\"",
":",
"common_attention",
".",
"split_heads",
"(",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"attention_init_length",
",",
"value_channels",
"]",
")",
",",
"hparams",
".",
"num_heads",
")",
",",
"}",
"for",
"layer",
"in",
"range",
"(",
"num_layers",
")",
"}",
")",
"# If `ffn_layer` is in `[\"dense_relu_dense\" or \"conv_hidden_relu\"]`, then the",
"# cache key \"f\" won't be used, which means that the` shape of cache[\"f\"]`",
"# won't be changed to",
"# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause",
"# error when applying `nest.map reshape function` on it.",
"if",
"hparams",
".",
"ffn_layer",
"not",
"in",
"[",
"\"dense_relu_dense\"",
",",
"\"conv_hidden_relu\"",
"]",
":",
"for",
"layer",
"in",
"range",
"(",
"num_layers",
")",
":",
"cache",
"[",
"\"layer_%d\"",
"%",
"layer",
"]",
"[",
"\"f\"",
"]",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"0",
",",
"hparams",
".",
"hidden_size",
"]",
")",
"if",
"encoder_output",
"is",
"not",
"None",
":",
"for",
"layer",
"in",
"range",
"(",
"num_layers",
")",
":",
"layer_name",
"=",
"\"layer_%d\"",
"%",
"layer",
"with",
"tf",
".",
"variable_scope",
"(",
"\"%sdecoder/%s/encdec_attention/multihead_attention\"",
"%",
"(",
"scope_prefix",
",",
"layer_name",
")",
")",
":",
"k_encdec",
"=",
"common_attention",
".",
"compute_attention_component",
"(",
"encoder_output",
",",
"key_channels",
",",
"name",
"=",
"\"k\"",
",",
"vars_3d_num_heads",
"=",
"vars_3d_num_heads",
")",
"k_encdec",
"=",
"common_attention",
".",
"split_heads",
"(",
"k_encdec",
",",
"hparams",
".",
"num_heads",
")",
"v_encdec",
"=",
"common_attention",
".",
"compute_attention_component",
"(",
"encoder_output",
",",
"value_channels",
",",
"name",
"=",
"\"v\"",
",",
"vars_3d_num_heads",
"=",
"vars_3d_num_heads",
")",
"v_encdec",
"=",
"common_attention",
".",
"split_heads",
"(",
"v_encdec",
",",
"hparams",
".",
"num_heads",
")",
"cache",
"[",
"layer_name",
"]",
"[",
"\"k_encdec\"",
"]",
"=",
"k_encdec",
"cache",
"[",
"layer_name",
"]",
"[",
"\"v_encdec\"",
"]",
"=",
"v_encdec",
"cache",
"[",
"\"encoder_output\"",
"]",
"=",
"encoder_output",
"cache",
"[",
"\"encoder_decoder_attention_bias\"",
"]",
"=",
"encoder_decoder_attention_bias",
"return",
"cache"
] |
Create the initial cache for Transformer fast decoding.
|
[
"Create",
"the",
"initial",
"cache",
"for",
"Transformer",
"fast",
"decoding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L832-L892
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
fast_decode_tpu
|
def fast_decode_tpu(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
use_top_k_with_unique=True):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding for TPU, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: A tensor, output from encoder.
encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder
attention.
symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: Run hyperparameters.
decode_length: An integer, how many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha, stronger
the preference for longer translations.
sos_id: Start-of-sequence symbol.
eos_id: End-of-sequence symbol.
batch_size: An integer, must be passed if there is no input.
force_decode_length: A bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during beam search.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(None, hparams, batch_size, decode_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,
value={
"vocab_size": vocab_size,
"batch_size": batch_size,
"beam_size": beam_size,
"alpha": alpha,
"max_decode_length": decode_length
},
hparams=hparams)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1),
use_tpu=True,
use_top_k_with_unique=use_top_k_with_unique)
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
hit_eos |= tf.equal(next_id, eos_id)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(log_probs, log_prob_indices)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.transpose(decoded_ids)
decoded_ids = inplace_ops.alias_inplace_update(
decoded_ids, i, tf.squeeze(next_id, axis=1))
decoded_ids = tf.transpose(decoded_ids)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
def compute_cache_shape_invariants(tensor):
return tf.TensorShape(tensor.shape.as_list())
_, _, _, decoded_ids, _, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, 1]),
tf.TensorShape([batch_size, decode_length]),
nest.map_structure(compute_cache_shape_invariants, cache),
tf.TensorShape([batch_size]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores}
|
python
|
def fast_decode_tpu(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
use_top_k_with_unique=True):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding for TPU, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: A tensor, output from encoder.
encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder
attention.
symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: Run hyperparameters.
decode_length: An integer, how many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha, stronger
the preference for longer translations.
sos_id: Start-of-sequence symbol.
eos_id: End-of-sequence symbol.
batch_size: An integer, must be passed if there is no input.
force_decode_length: A bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during beam search.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(None, hparams, batch_size, decode_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,
value={
"vocab_size": vocab_size,
"batch_size": batch_size,
"beam_size": beam_size,
"alpha": alpha,
"max_decode_length": decode_length
},
hparams=hparams)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1),
use_tpu=True,
use_top_k_with_unique=use_top_k_with_unique)
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
hit_eos |= tf.equal(next_id, eos_id)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(log_probs, log_prob_indices)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.transpose(decoded_ids)
decoded_ids = inplace_ops.alias_inplace_update(
decoded_ids, i, tf.squeeze(next_id, axis=1))
decoded_ids = tf.transpose(decoded_ids)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
def compute_cache_shape_invariants(tensor):
return tf.TensorShape(tensor.shape.as_list())
_, _, _, decoded_ids, _, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, 1]),
tf.TensorShape([batch_size, decode_length]),
nest.map_structure(compute_cache_shape_invariants, cache),
tf.TensorShape([batch_size]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores}
|
[
"def",
"fast_decode_tpu",
"(",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"symbols_to_logits_fn",
",",
"hparams",
",",
"decode_length",
",",
"vocab_size",
",",
"init_cache_fn",
"=",
"_init_transformer_cache",
",",
"beam_size",
"=",
"1",
",",
"top_beams",
"=",
"1",
",",
"alpha",
"=",
"1.0",
",",
"sos_id",
"=",
"0",
",",
"eos_id",
"=",
"beam_search",
".",
"EOS_ID",
",",
"batch_size",
"=",
"None",
",",
"force_decode_length",
"=",
"False",
",",
"scope_prefix",
"=",
"\"body/\"",
",",
"use_top_k_with_unique",
"=",
"True",
")",
":",
"if",
"encoder_output",
"is",
"not",
"None",
":",
"batch_size",
"=",
"common_layers",
".",
"shape_list",
"(",
"encoder_output",
")",
"[",
"0",
"]",
"cache",
"=",
"init_cache_fn",
"(",
"None",
",",
"hparams",
",",
"batch_size",
",",
"decode_length",
",",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"scope_prefix",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_SEQ_BEAM_SEARCH",
",",
"value",
"=",
"{",
"\"vocab_size\"",
":",
"vocab_size",
",",
"\"batch_size\"",
":",
"batch_size",
",",
"\"beam_size\"",
":",
"beam_size",
",",
"\"alpha\"",
":",
"alpha",
",",
"\"max_decode_length\"",
":",
"decode_length",
"}",
",",
"hparams",
"=",
"hparams",
")",
"if",
"beam_size",
">",
"1",
":",
"# Beam Search",
"initial_ids",
"=",
"sos_id",
"*",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"decoded_ids",
",",
"scores",
",",
"_",
"=",
"beam_search",
".",
"beam_search",
"(",
"symbols_to_logits_fn",
",",
"initial_ids",
",",
"beam_size",
",",
"decode_length",
",",
"vocab_size",
",",
"alpha",
",",
"states",
"=",
"cache",
",",
"eos_id",
"=",
"eos_id",
",",
"stop_early",
"=",
"(",
"top_beams",
"==",
"1",
")",
",",
"use_tpu",
"=",
"True",
",",
"use_top_k_with_unique",
"=",
"use_top_k_with_unique",
")",
"if",
"top_beams",
"==",
"1",
":",
"decoded_ids",
"=",
"decoded_ids",
"[",
":",
",",
"0",
",",
"1",
":",
"]",
"scores",
"=",
"scores",
"[",
":",
",",
"0",
"]",
"else",
":",
"decoded_ids",
"=",
"decoded_ids",
"[",
":",
",",
":",
"top_beams",
",",
"1",
":",
"]",
"scores",
"=",
"scores",
"[",
":",
",",
":",
"top_beams",
"]",
"else",
":",
"# Greedy",
"def",
"inner_loop",
"(",
"i",
",",
"hit_eos",
",",
"next_id",
",",
"decoded_ids",
",",
"cache",
",",
"log_prob",
")",
":",
"\"\"\"One step of greedy decoding.\"\"\"",
"logits",
",",
"cache",
"=",
"symbols_to_logits_fn",
"(",
"next_id",
",",
"i",
",",
"cache",
")",
"log_probs",
"=",
"common_layers",
".",
"log_prob_from_logits",
"(",
"logits",
")",
"temperature",
"=",
"getattr",
"(",
"hparams",
",",
"\"sampling_temp\"",
",",
"0.0",
")",
"keep_top",
"=",
"getattr",
"(",
"hparams",
",",
"\"sampling_keep_top_k\"",
",",
"-",
"1",
")",
"if",
"hparams",
".",
"sampling_method",
"==",
"\"argmax\"",
":",
"temperature",
"=",
"0.0",
"next_id",
"=",
"common_layers",
".",
"sample_with_temperature",
"(",
"logits",
",",
"temperature",
",",
"keep_top",
")",
"hit_eos",
"|=",
"tf",
".",
"equal",
"(",
"next_id",
",",
"eos_id",
")",
"log_prob_indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"range",
"(",
"tf",
".",
"to_int64",
"(",
"batch_size",
")",
")",
",",
"next_id",
"]",
",",
"axis",
"=",
"1",
")",
"log_prob",
"+=",
"tf",
".",
"gather_nd",
"(",
"log_probs",
",",
"log_prob_indices",
")",
"next_id",
"=",
"tf",
".",
"expand_dims",
"(",
"next_id",
",",
"axis",
"=",
"1",
")",
"decoded_ids",
"=",
"tf",
".",
"transpose",
"(",
"decoded_ids",
")",
"decoded_ids",
"=",
"inplace_ops",
".",
"alias_inplace_update",
"(",
"decoded_ids",
",",
"i",
",",
"tf",
".",
"squeeze",
"(",
"next_id",
",",
"axis",
"=",
"1",
")",
")",
"decoded_ids",
"=",
"tf",
".",
"transpose",
"(",
"decoded_ids",
")",
"return",
"i",
"+",
"1",
",",
"hit_eos",
",",
"next_id",
",",
"decoded_ids",
",",
"cache",
",",
"log_prob",
"def",
"is_not_finished",
"(",
"i",
",",
"hit_eos",
",",
"*",
"_",
")",
":",
"finished",
"=",
"i",
">=",
"decode_length",
"if",
"not",
"force_decode_length",
":",
"finished",
"|=",
"tf",
".",
"reduce_all",
"(",
"hit_eos",
")",
"return",
"tf",
".",
"logical_not",
"(",
"finished",
")",
"decoded_ids",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"decode_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"hit_eos",
"=",
"tf",
".",
"fill",
"(",
"[",
"batch_size",
"]",
",",
"False",
")",
"next_id",
"=",
"sos_id",
"*",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
",",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"initial_log_prob",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"def",
"compute_cache_shape_invariants",
"(",
"tensor",
")",
":",
"return",
"tf",
".",
"TensorShape",
"(",
"tensor",
".",
"shape",
".",
"as_list",
"(",
")",
")",
"_",
",",
"_",
",",
"_",
",",
"decoded_ids",
",",
"_",
",",
"log_prob",
"=",
"tf",
".",
"while_loop",
"(",
"is_not_finished",
",",
"inner_loop",
",",
"[",
"tf",
".",
"constant",
"(",
"0",
")",
",",
"hit_eos",
",",
"next_id",
",",
"decoded_ids",
",",
"cache",
",",
"initial_log_prob",
"]",
",",
"shape_invariants",
"=",
"[",
"tf",
".",
"TensorShape",
"(",
"[",
"]",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
"]",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
",",
"1",
"]",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
",",
"decode_length",
"]",
")",
",",
"nest",
".",
"map_structure",
"(",
"compute_cache_shape_invariants",
",",
"cache",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
"]",
")",
",",
"]",
")",
"scores",
"=",
"log_prob",
"return",
"{",
"\"outputs\"",
":",
"decoded_ids",
",",
"\"scores\"",
":",
"scores",
"}"
] |
Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding for TPU, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: A tensor, output from encoder.
encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder
attention.
symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: Run hyperparameters.
decode_length: An integer, how many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha, stronger
the preference for longer translations.
sos_id: Start-of-sequence symbol.
eos_id: End-of-sequence symbol.
batch_size: An integer, must be passed if there is no input.
force_decode_length: A bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during beam search.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If beam size > 1 with partial targets.
|
[
"Given",
"encoder",
"output",
"and",
"a",
"symbols",
"to",
"logits",
"function",
"does",
"fast",
"decoding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L895-L1045
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
fast_decode
|
def fast_decode(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
cache=None):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: Output from encoder.
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: run hyperparameters
decode_length: an integer. How many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
sos_id: End-of-sequence symbol in beam search.
eos_id: End-of-sequence symbol in beam search.
batch_size: an integer scalar - must be passed if there is no input
force_decode_length: bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
cache: cache dictionary for additional predictions.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(
cache=cache,
hparams=hparams,
batch_size=batch_size,
attention_init_length=0,
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
scope_prefix=scope_prefix)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, cache = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1))
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
hit_eos |= tf.equal(next_id, eos_id)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(log_probs, log_prob_indices)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.concat([decoded_ids, next_id], axis=1)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
_, _, _, decoded_ids, cache, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
nest.map_structure(beam_search.get_state_shape_invariants, cache),
tf.TensorShape([None]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores, "cache": cache}
|
python
|
def fast_decode(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
cache=None):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: Output from encoder.
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: run hyperparameters
decode_length: an integer. How many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
sos_id: End-of-sequence symbol in beam search.
eos_id: End-of-sequence symbol in beam search.
batch_size: an integer scalar - must be passed if there is no input
force_decode_length: bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
cache: cache dictionary for additional predictions.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(
cache=cache,
hparams=hparams,
batch_size=batch_size,
attention_init_length=0,
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
scope_prefix=scope_prefix)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, cache = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1))
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
hit_eos |= tf.equal(next_id, eos_id)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(log_probs, log_prob_indices)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.concat([decoded_ids, next_id], axis=1)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
_, _, _, decoded_ids, cache, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
nest.map_structure(beam_search.get_state_shape_invariants, cache),
tf.TensorShape([None]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores, "cache": cache}
|
[
"def",
"fast_decode",
"(",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"symbols_to_logits_fn",
",",
"hparams",
",",
"decode_length",
",",
"vocab_size",
",",
"init_cache_fn",
"=",
"_init_transformer_cache",
",",
"beam_size",
"=",
"1",
",",
"top_beams",
"=",
"1",
",",
"alpha",
"=",
"1.0",
",",
"sos_id",
"=",
"0",
",",
"eos_id",
"=",
"beam_search",
".",
"EOS_ID",
",",
"batch_size",
"=",
"None",
",",
"force_decode_length",
"=",
"False",
",",
"scope_prefix",
"=",
"\"body/\"",
",",
"cache",
"=",
"None",
")",
":",
"if",
"encoder_output",
"is",
"not",
"None",
":",
"batch_size",
"=",
"common_layers",
".",
"shape_list",
"(",
"encoder_output",
")",
"[",
"0",
"]",
"cache",
"=",
"init_cache_fn",
"(",
"cache",
"=",
"cache",
",",
"hparams",
"=",
"hparams",
",",
"batch_size",
"=",
"batch_size",
",",
"attention_init_length",
"=",
"0",
",",
"encoder_output",
"=",
"encoder_output",
",",
"encoder_decoder_attention_bias",
"=",
"encoder_decoder_attention_bias",
",",
"scope_prefix",
"=",
"scope_prefix",
")",
"if",
"beam_size",
">",
"1",
":",
"# Beam Search",
"initial_ids",
"=",
"sos_id",
"*",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"decoded_ids",
",",
"scores",
",",
"cache",
"=",
"beam_search",
".",
"beam_search",
"(",
"symbols_to_logits_fn",
",",
"initial_ids",
",",
"beam_size",
",",
"decode_length",
",",
"vocab_size",
",",
"alpha",
",",
"states",
"=",
"cache",
",",
"eos_id",
"=",
"eos_id",
",",
"stop_early",
"=",
"(",
"top_beams",
"==",
"1",
")",
")",
"if",
"top_beams",
"==",
"1",
":",
"decoded_ids",
"=",
"decoded_ids",
"[",
":",
",",
"0",
",",
"1",
":",
"]",
"scores",
"=",
"scores",
"[",
":",
",",
"0",
"]",
"else",
":",
"decoded_ids",
"=",
"decoded_ids",
"[",
":",
",",
":",
"top_beams",
",",
"1",
":",
"]",
"scores",
"=",
"scores",
"[",
":",
",",
":",
"top_beams",
"]",
"else",
":",
"# Greedy",
"def",
"inner_loop",
"(",
"i",
",",
"hit_eos",
",",
"next_id",
",",
"decoded_ids",
",",
"cache",
",",
"log_prob",
")",
":",
"\"\"\"One step of greedy decoding.\"\"\"",
"logits",
",",
"cache",
"=",
"symbols_to_logits_fn",
"(",
"next_id",
",",
"i",
",",
"cache",
")",
"log_probs",
"=",
"common_layers",
".",
"log_prob_from_logits",
"(",
"logits",
")",
"temperature",
"=",
"getattr",
"(",
"hparams",
",",
"\"sampling_temp\"",
",",
"0.0",
")",
"keep_top",
"=",
"getattr",
"(",
"hparams",
",",
"\"sampling_keep_top_k\"",
",",
"-",
"1",
")",
"if",
"hparams",
".",
"sampling_method",
"==",
"\"argmax\"",
":",
"temperature",
"=",
"0.0",
"next_id",
"=",
"common_layers",
".",
"sample_with_temperature",
"(",
"logits",
",",
"temperature",
",",
"keep_top",
")",
"hit_eos",
"|=",
"tf",
".",
"equal",
"(",
"next_id",
",",
"eos_id",
")",
"log_prob_indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"range",
"(",
"tf",
".",
"to_int64",
"(",
"batch_size",
")",
")",
",",
"next_id",
"]",
",",
"axis",
"=",
"1",
")",
"log_prob",
"+=",
"tf",
".",
"gather_nd",
"(",
"log_probs",
",",
"log_prob_indices",
")",
"next_id",
"=",
"tf",
".",
"expand_dims",
"(",
"next_id",
",",
"axis",
"=",
"1",
")",
"decoded_ids",
"=",
"tf",
".",
"concat",
"(",
"[",
"decoded_ids",
",",
"next_id",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"i",
"+",
"1",
",",
"hit_eos",
",",
"next_id",
",",
"decoded_ids",
",",
"cache",
",",
"log_prob",
"def",
"is_not_finished",
"(",
"i",
",",
"hit_eos",
",",
"*",
"_",
")",
":",
"finished",
"=",
"i",
">=",
"decode_length",
"if",
"not",
"force_decode_length",
":",
"finished",
"|=",
"tf",
".",
"reduce_all",
"(",
"hit_eos",
")",
"return",
"tf",
".",
"logical_not",
"(",
"finished",
")",
"decoded_ids",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"0",
"]",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"hit_eos",
"=",
"tf",
".",
"fill",
"(",
"[",
"batch_size",
"]",
",",
"False",
")",
"next_id",
"=",
"sos_id",
"*",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
",",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"initial_log_prob",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"_",
",",
"_",
",",
"_",
",",
"decoded_ids",
",",
"cache",
",",
"log_prob",
"=",
"tf",
".",
"while_loop",
"(",
"is_not_finished",
",",
"inner_loop",
",",
"[",
"tf",
".",
"constant",
"(",
"0",
")",
",",
"hit_eos",
",",
"next_id",
",",
"decoded_ids",
",",
"cache",
",",
"initial_log_prob",
"]",
",",
"shape_invariants",
"=",
"[",
"tf",
".",
"TensorShape",
"(",
"[",
"]",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"None",
"]",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"None",
",",
"None",
"]",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"None",
",",
"None",
"]",
")",
",",
"nest",
".",
"map_structure",
"(",
"beam_search",
".",
"get_state_shape_invariants",
",",
"cache",
")",
",",
"tf",
".",
"TensorShape",
"(",
"[",
"None",
"]",
")",
",",
"]",
")",
"scores",
"=",
"log_prob",
"return",
"{",
"\"outputs\"",
":",
"decoded_ids",
",",
"\"scores\"",
":",
"scores",
",",
"\"cache\"",
":",
"cache",
"}"
] |
Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: Output from encoder.
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: run hyperparameters
decode_length: an integer. How many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
sos_id: End-of-sequence symbol in beam search.
eos_id: End-of-sequence symbol in beam search.
batch_size: an integer scalar - must be passed if there is no input
force_decode_length: bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
cache: cache dictionary for additional predictions.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If beam size > 1 with partial targets.
|
[
"Given",
"encoder",
"output",
"and",
"a",
"symbols",
"to",
"logits",
"function",
"does",
"fast",
"decoding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1048-L1182
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_prepare_decoder
|
def transformer_prepare_decoder(targets, hparams, features=None):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a bias tensor for use in decoder self-attention
"""
if hparams.causal_decoder_self_attention:
# Causal attention.
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
common_attention.embedding_to_padding(targets)))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(targets)[1]))
else:
# Full attention.
decoder_padding = common_attention.embedding_to_padding(targets)
decoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(decoder_padding))
if features and "targets_segmentation" in features:
# "Packed" dataset - keep the examples from seeing each other.
targets_segmentation = features["targets_segmentation"]
targets_position = features["targets_position"]
decoder_self_attention_bias += common_attention.attention_bias_same_segment(
targets_segmentation, targets_segmentation)
else:
targets_position = None
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
common_layers.shape_list(targets)[1])
decoder_input = common_layers.shift_right_3d(targets)
if hparams.pos == "timing":
if targets_position is not None:
decoder_input = common_attention.add_timing_signal_1d_given_position(
decoder_input, targets_position)
else:
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
elif hparams.pos == "emb":
decoder_input = common_attention.add_positional_embedding(
decoder_input, hparams.max_length, "targets_positional_embedding",
targets_position)
if hparams.activation_dtype == "bfloat16":
decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,
tf.bfloat16)
return (decoder_input, decoder_self_attention_bias)
|
python
|
def transformer_prepare_decoder(targets, hparams, features=None):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a bias tensor for use in decoder self-attention
"""
if hparams.causal_decoder_self_attention:
# Causal attention.
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
common_attention.embedding_to_padding(targets)))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(targets)[1]))
else:
# Full attention.
decoder_padding = common_attention.embedding_to_padding(targets)
decoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(decoder_padding))
if features and "targets_segmentation" in features:
# "Packed" dataset - keep the examples from seeing each other.
targets_segmentation = features["targets_segmentation"]
targets_position = features["targets_position"]
decoder_self_attention_bias += common_attention.attention_bias_same_segment(
targets_segmentation, targets_segmentation)
else:
targets_position = None
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
common_layers.shape_list(targets)[1])
decoder_input = common_layers.shift_right_3d(targets)
if hparams.pos == "timing":
if targets_position is not None:
decoder_input = common_attention.add_timing_signal_1d_given_position(
decoder_input, targets_position)
else:
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
elif hparams.pos == "emb":
decoder_input = common_attention.add_positional_embedding(
decoder_input, hparams.max_length, "targets_positional_embedding",
targets_position)
if hparams.activation_dtype == "bfloat16":
decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,
tf.bfloat16)
return (decoder_input, decoder_self_attention_bias)
|
[
"def",
"transformer_prepare_decoder",
"(",
"targets",
",",
"hparams",
",",
"features",
"=",
"None",
")",
":",
"if",
"hparams",
".",
"causal_decoder_self_attention",
":",
"# Causal attention.",
"if",
"hparams",
".",
"prepend_mode",
"==",
"\"prepend_inputs_full_attention\"",
":",
"decoder_self_attention_bias",
"=",
"(",
"common_attention",
".",
"attention_bias_prepend_inputs_full_attention",
"(",
"common_attention",
".",
"embedding_to_padding",
"(",
"targets",
")",
")",
")",
"else",
":",
"decoder_self_attention_bias",
"=",
"(",
"common_attention",
".",
"attention_bias_lower_triangle",
"(",
"common_layers",
".",
"shape_list",
"(",
"targets",
")",
"[",
"1",
"]",
")",
")",
"else",
":",
"# Full attention.",
"decoder_padding",
"=",
"common_attention",
".",
"embedding_to_padding",
"(",
"targets",
")",
"decoder_self_attention_bias",
"=",
"(",
"common_attention",
".",
"attention_bias_ignore_padding",
"(",
"decoder_padding",
")",
")",
"if",
"features",
"and",
"\"targets_segmentation\"",
"in",
"features",
":",
"# \"Packed\" dataset - keep the examples from seeing each other.",
"targets_segmentation",
"=",
"features",
"[",
"\"targets_segmentation\"",
"]",
"targets_position",
"=",
"features",
"[",
"\"targets_position\"",
"]",
"decoder_self_attention_bias",
"+=",
"common_attention",
".",
"attention_bias_same_segment",
"(",
"targets_segmentation",
",",
"targets_segmentation",
")",
"else",
":",
"targets_position",
"=",
"None",
"if",
"hparams",
".",
"proximity_bias",
":",
"decoder_self_attention_bias",
"+=",
"common_attention",
".",
"attention_bias_proximal",
"(",
"common_layers",
".",
"shape_list",
"(",
"targets",
")",
"[",
"1",
"]",
")",
"decoder_input",
"=",
"common_layers",
".",
"shift_right_3d",
"(",
"targets",
")",
"if",
"hparams",
".",
"pos",
"==",
"\"timing\"",
":",
"if",
"targets_position",
"is",
"not",
"None",
":",
"decoder_input",
"=",
"common_attention",
".",
"add_timing_signal_1d_given_position",
"(",
"decoder_input",
",",
"targets_position",
")",
"else",
":",
"decoder_input",
"=",
"common_attention",
".",
"add_timing_signal_1d",
"(",
"decoder_input",
")",
"elif",
"hparams",
".",
"pos",
"==",
"\"emb\"",
":",
"decoder_input",
"=",
"common_attention",
".",
"add_positional_embedding",
"(",
"decoder_input",
",",
"hparams",
".",
"max_length",
",",
"\"targets_positional_embedding\"",
",",
"targets_position",
")",
"if",
"hparams",
".",
"activation_dtype",
"==",
"\"bfloat16\"",
":",
"decoder_self_attention_bias",
"=",
"tf",
".",
"cast",
"(",
"decoder_self_attention_bias",
",",
"tf",
".",
"bfloat16",
")",
"return",
"(",
"decoder_input",
",",
"decoder_self_attention_bias",
")"
] |
Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a bias tensor for use in decoder self-attention
|
[
"Prepare",
"one",
"shard",
"of",
"the",
"model",
"for",
"the",
"decoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1281-L1336
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_decoder
|
def transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
losses=None,
layer_collection=None,
recurrent_memory_by_layer=None,
chunk_number=None,
):
"""A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention (see
common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used to mask out
padding in convolutional layers. We generally only need this mask for
"packed" datasets, because for ordinary datasets, no padding is ever
followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: optional list onto which to append extra training losses
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
recurrent_memory_by_layer: Optional dict, mapping layer names to instances
of transformer_memory.RecurrentMemory. Default is None.
chunk_number: an optional integer Tensor with shape [batch] used to operate
the recurrent_memory.
Returns:
y: a Tensors
"""
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
},
hparams=hparams)
with tf.variable_scope(name):
for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):
layer_name = "layer_%d" % layer
layer_cache = cache[layer_name] if cache is not None else None
if recurrent_memory_by_layer is not None:
recurrent_memory = recurrent_memory_by_layer[layer_name]
else:
recurrent_memory = None
if layer < hparams.get("num_area_layers", 0):
max_area_width = hparams.get("max_area_width", 1)
max_area_height = hparams.get("max_area_height", 1)
memory_height = hparams.get("max_area_height", 1)
else:
max_area_width = 1
max_area_height = 1
memory_height = 1
with tf.variable_scope(layer_name):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
decode_loop_step=decode_loop_step,
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
recurrent_memory=recurrent_memory,
chunk_number=chunk_number,
hard_attention_k=hparams.get("hard_attention_k", 0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN)
== tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
hard_attention_k=hparams.get("hard_attention_k", 0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN)
== tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
hparams,
conv_padding="LEFT",
nonpadding_mask=nonpadding,
losses=losses,
cache=layer_cache,
decode_loop_step=decode_loop_step,
layer_collection=layer_collection)
x = common_layers.layer_postprocess(x, y, hparams)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection)
|
python
|
def transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
losses=None,
layer_collection=None,
recurrent_memory_by_layer=None,
chunk_number=None,
):
"""A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention (see
common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used to mask out
padding in convolutional layers. We generally only need this mask for
"packed" datasets, because for ordinary datasets, no padding is ever
followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: optional list onto which to append extra training losses
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
recurrent_memory_by_layer: Optional dict, mapping layer names to instances
of transformer_memory.RecurrentMemory. Default is None.
chunk_number: an optional integer Tensor with shape [batch] used to operate
the recurrent_memory.
Returns:
y: a Tensors
"""
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
},
hparams=hparams)
with tf.variable_scope(name):
for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):
layer_name = "layer_%d" % layer
layer_cache = cache[layer_name] if cache is not None else None
if recurrent_memory_by_layer is not None:
recurrent_memory = recurrent_memory_by_layer[layer_name]
else:
recurrent_memory = None
if layer < hparams.get("num_area_layers", 0):
max_area_width = hparams.get("max_area_width", 1)
max_area_height = hparams.get("max_area_height", 1)
memory_height = hparams.get("max_area_height", 1)
else:
max_area_width = 1
max_area_height = 1
memory_height = 1
with tf.variable_scope(layer_name):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
decode_loop_step=decode_loop_step,
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
recurrent_memory=recurrent_memory,
chunk_number=chunk_number,
hard_attention_k=hparams.get("hard_attention_k", 0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN)
== tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
hard_attention_k=hparams.get("hard_attention_k", 0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN)
== tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
hparams,
conv_padding="LEFT",
nonpadding_mask=nonpadding,
losses=losses,
cache=layer_cache,
decode_loop_step=decode_loop_step,
layer_collection=layer_collection)
x = common_layers.layer_postprocess(x, y, hparams)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection)
|
[
"def",
"transformer_decoder",
"(",
"decoder_input",
",",
"encoder_output",
",",
"decoder_self_attention_bias",
",",
"encoder_decoder_attention_bias",
",",
"hparams",
",",
"cache",
"=",
"None",
",",
"decode_loop_step",
"=",
"None",
",",
"name",
"=",
"\"decoder\"",
",",
"nonpadding",
"=",
"None",
",",
"save_weights_to",
"=",
"None",
",",
"make_image_summary",
"=",
"True",
",",
"losses",
"=",
"None",
",",
"layer_collection",
"=",
"None",
",",
"recurrent_memory_by_layer",
"=",
"None",
",",
"chunk_number",
"=",
"None",
",",
")",
":",
"x",
"=",
"decoder_input",
"attention_dropout_broadcast_dims",
"=",
"(",
"common_layers",
".",
"comma_separated_string_to_integer_list",
"(",
"getattr",
"(",
"hparams",
",",
"\"attention_dropout_broadcast_dims\"",
",",
"\"\"",
")",
")",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_NUM_HIDDEN_LAYERS",
",",
"value",
"=",
"hparams",
".",
"num_decoder_layers",
"or",
"hparams",
".",
"num_hidden_layers",
",",
"hparams",
"=",
"hparams",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_ATTENTION_DROPOUT",
",",
"value",
"=",
"hparams",
".",
"attention_dropout",
",",
"hparams",
"=",
"hparams",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_ATTENTION_DENSE",
",",
"value",
"=",
"{",
"\"use_bias\"",
":",
"\"false\"",
",",
"\"num_heads\"",
":",
"hparams",
".",
"num_heads",
",",
"\"hidden_size\"",
":",
"hparams",
".",
"hidden_size",
"}",
",",
"hparams",
"=",
"hparams",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_decoder_layers",
"or",
"hparams",
".",
"num_hidden_layers",
")",
":",
"layer_name",
"=",
"\"layer_%d\"",
"%",
"layer",
"layer_cache",
"=",
"cache",
"[",
"layer_name",
"]",
"if",
"cache",
"is",
"not",
"None",
"else",
"None",
"if",
"recurrent_memory_by_layer",
"is",
"not",
"None",
":",
"recurrent_memory",
"=",
"recurrent_memory_by_layer",
"[",
"layer_name",
"]",
"else",
":",
"recurrent_memory",
"=",
"None",
"if",
"layer",
"<",
"hparams",
".",
"get",
"(",
"\"num_area_layers\"",
",",
"0",
")",
":",
"max_area_width",
"=",
"hparams",
".",
"get",
"(",
"\"max_area_width\"",
",",
"1",
")",
"max_area_height",
"=",
"hparams",
".",
"get",
"(",
"\"max_area_height\"",
",",
"1",
")",
"memory_height",
"=",
"hparams",
".",
"get",
"(",
"\"max_area_height\"",
",",
"1",
")",
"else",
":",
"max_area_width",
"=",
"1",
"max_area_height",
"=",
"1",
"memory_height",
"=",
"1",
"with",
"tf",
".",
"variable_scope",
"(",
"layer_name",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"self_attention\"",
")",
":",
"y",
"=",
"common_attention",
".",
"multihead_attention",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
",",
"layer_collection",
"=",
"layer_collection",
")",
",",
"None",
",",
"decoder_self_attention_bias",
",",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"num_heads",
",",
"hparams",
".",
"attention_dropout",
",",
"attention_type",
"=",
"hparams",
".",
"self_attention_type",
",",
"max_relative_position",
"=",
"hparams",
".",
"max_relative_position",
",",
"heads_share_relative_embedding",
"=",
"(",
"hparams",
".",
"heads_share_relative_embedding",
")",
",",
"add_relative_to_values",
"=",
"hparams",
".",
"add_relative_to_values",
",",
"save_weights_to",
"=",
"save_weights_to",
",",
"cache",
"=",
"layer_cache",
",",
"make_image_summary",
"=",
"make_image_summary",
",",
"dropout_broadcast_dims",
"=",
"attention_dropout_broadcast_dims",
",",
"max_length",
"=",
"hparams",
".",
"get",
"(",
"\"max_length\"",
")",
",",
"decode_loop_step",
"=",
"decode_loop_step",
",",
"vars_3d",
"=",
"hparams",
".",
"get",
"(",
"\"attention_variables_3d\"",
")",
",",
"activation_dtype",
"=",
"hparams",
".",
"get",
"(",
"\"activation_dtype\"",
",",
"\"float32\"",
")",
",",
"weight_dtype",
"=",
"hparams",
".",
"get",
"(",
"\"weight_dtype\"",
",",
"\"float32\"",
")",
",",
"layer_collection",
"=",
"layer_collection",
",",
"recurrent_memory",
"=",
"recurrent_memory",
",",
"chunk_number",
"=",
"chunk_number",
",",
"hard_attention_k",
"=",
"hparams",
".",
"get",
"(",
"\"hard_attention_k\"",
",",
"0",
")",
",",
"max_area_width",
"=",
"max_area_width",
",",
"max_area_height",
"=",
"max_area_height",
",",
"memory_height",
"=",
"memory_height",
",",
"area_key_mode",
"=",
"hparams",
".",
"get",
"(",
"\"area_key_mode\"",
",",
"\"none\"",
")",
",",
"area_value_mode",
"=",
"hparams",
".",
"get",
"(",
"\"area_value_mode\"",
",",
"\"none\"",
")",
",",
"training",
"=",
"(",
"hparams",
".",
"get",
"(",
"\"mode\"",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
")",
"x",
"=",
"common_layers",
".",
"layer_postprocess",
"(",
"x",
",",
"y",
",",
"hparams",
")",
"if",
"encoder_output",
"is",
"not",
"None",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"encdec_attention\"",
")",
":",
"y",
"=",
"common_attention",
".",
"multihead_attention",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
",",
"layer_collection",
"=",
"layer_collection",
")",
",",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"num_heads",
",",
"hparams",
".",
"attention_dropout",
",",
"max_relative_position",
"=",
"hparams",
".",
"max_relative_position",
",",
"heads_share_relative_embedding",
"=",
"(",
"hparams",
".",
"heads_share_relative_embedding",
")",
",",
"add_relative_to_values",
"=",
"hparams",
".",
"add_relative_to_values",
",",
"save_weights_to",
"=",
"save_weights_to",
",",
"cache",
"=",
"layer_cache",
",",
"make_image_summary",
"=",
"make_image_summary",
",",
"dropout_broadcast_dims",
"=",
"attention_dropout_broadcast_dims",
",",
"max_length",
"=",
"hparams",
".",
"get",
"(",
"\"max_length\"",
")",
",",
"vars_3d",
"=",
"hparams",
".",
"get",
"(",
"\"attention_variables_3d\"",
")",
",",
"activation_dtype",
"=",
"hparams",
".",
"get",
"(",
"\"activation_dtype\"",
",",
"\"float32\"",
")",
",",
"weight_dtype",
"=",
"hparams",
".",
"get",
"(",
"\"weight_dtype\"",
",",
"\"float32\"",
")",
",",
"layer_collection",
"=",
"layer_collection",
",",
"hard_attention_k",
"=",
"hparams",
".",
"get",
"(",
"\"hard_attention_k\"",
",",
"0",
")",
",",
"max_area_width",
"=",
"max_area_width",
",",
"max_area_height",
"=",
"max_area_height",
",",
"memory_height",
"=",
"memory_height",
",",
"area_key_mode",
"=",
"hparams",
".",
"get",
"(",
"\"area_key_mode\"",
",",
"\"none\"",
")",
",",
"area_value_mode",
"=",
"hparams",
".",
"get",
"(",
"\"area_value_mode\"",
",",
"\"none\"",
")",
",",
"training",
"=",
"(",
"hparams",
".",
"get",
"(",
"\"mode\"",
",",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
")",
"x",
"=",
"common_layers",
".",
"layer_postprocess",
"(",
"x",
",",
"y",
",",
"hparams",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"ffn\"",
")",
":",
"y",
"=",
"transformer_ffn_layer",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
",",
"layer_collection",
"=",
"layer_collection",
")",
",",
"hparams",
",",
"conv_padding",
"=",
"\"LEFT\"",
",",
"nonpadding_mask",
"=",
"nonpadding",
",",
"losses",
"=",
"losses",
",",
"cache",
"=",
"layer_cache",
",",
"decode_loop_step",
"=",
"decode_loop_step",
",",
"layer_collection",
"=",
"layer_collection",
")",
"x",
"=",
"common_layers",
".",
"layer_postprocess",
"(",
"x",
",",
"y",
",",
"hparams",
")",
"# if normalization is done in layer_preprocess, then it should also be done",
"# on the output, since the output can grow very large, being the sum of",
"# a whole stack of unnormalized layer outputs.",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_NORM",
",",
"value",
"=",
"{",
"\"hidden_size\"",
":",
"hparams",
".",
"hidden_size",
"}",
")",
"return",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
",",
"layer_collection",
"=",
"layer_collection",
")"
] |
A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention (see
common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used to mask out
padding in convolutional layers. We generally only need this mask for
"packed" datasets, because for ordinary datasets, no padding is ever
followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: optional list onto which to append extra training losses
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
recurrent_memory_by_layer: Optional dict, mapping layer names to instances
of transformer_memory.RecurrentMemory. Default is None.
chunk_number: an optional integer Tensor with shape [batch] used to operate
the recurrent_memory.
Returns:
y: a Tensors
|
[
"A",
"stack",
"of",
"transformer",
"layers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1339-L1520
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_v1
|
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
return hparams
|
python
|
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
return hparams
|
[
"def",
"transformer_base_v1",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"norm_type",
"=",
"\"layer\"",
"hparams",
".",
"hidden_size",
"=",
"512",
"hparams",
".",
"batch_size",
"=",
"4096",
"hparams",
".",
"max_length",
"=",
"256",
"hparams",
".",
"clip_grad_norm",
"=",
"0.",
"# i.e. no gradient clipping",
"hparams",
".",
"optimizer_adam_epsilon",
"=",
"1e-9",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"legacy\"",
"hparams",
".",
"learning_rate_decay_scheme",
"=",
"\"noam\"",
"hparams",
".",
"learning_rate",
"=",
"0.1",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"4000",
"hparams",
".",
"initializer_gain",
"=",
"1.0",
"hparams",
".",
"num_hidden_layers",
"=",
"6",
"hparams",
".",
"initializer",
"=",
"\"uniform_unit_scaling\"",
"hparams",
".",
"weight_decay",
"=",
"0.0",
"hparams",
".",
"optimizer_adam_beta1",
"=",
"0.9",
"hparams",
".",
"optimizer_adam_beta2",
"=",
"0.98",
"hparams",
".",
"num_sampled_classes",
"=",
"0",
"hparams",
".",
"label_smoothing",
"=",
"0.1",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"True",
"hparams",
".",
"symbol_modality_num_shards",
"=",
"16",
"# Add new ones like this.",
"hparams",
".",
"add_hparam",
"(",
"\"filter_size\"",
",",
"2048",
")",
"# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.",
"hparams",
".",
"add_hparam",
"(",
"\"num_encoder_layers\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_decoder_layers\"",
",",
"0",
")",
"# Attention-related flags.",
"hparams",
".",
"add_hparam",
"(",
"\"num_heads\"",
",",
"8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_key_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_value_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ffn_layer\"",
",",
"\"dense_relu_dense\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"parameter_attention_key_channels\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"parameter_attention_value_channels\"",
",",
"0",
")",
"# All hyperparameters ending in \"dropout\" are automatically set to 0.0",
"# when not in training mode.",
"hparams",
".",
"add_hparam",
"(",
"\"attention_dropout\"",
",",
"0.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_dropout_broadcast_dims\"",
",",
"\"\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"relu_dropout\"",
",",
"0.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"relu_dropout_broadcast_dims\"",
",",
"\"\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"pos\"",
",",
"\"timing\"",
")",
"# timing, none",
"hparams",
".",
"add_hparam",
"(",
"\"nbr_decoder_problems\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"proximity_bias\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"causal_decoder_self_attention\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"use_pad_remover\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"self_attention_type\"",
",",
"\"dot_product\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"conv_first_kernel\"",
",",
"3",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_variables_3d\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"use_target_space_embedding\"",
",",
"True",
")",
"# These parameters are only used when ffn_layer==\"local_moe_tpu\"",
"hparams",
".",
"add_hparam",
"(",
"\"moe_overhead_train\"",
",",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"moe_overhead_eval\"",
",",
"2.0",
")",
"hparams",
".",
"moe_num_experts",
"=",
"16",
"hparams",
".",
"moe_loss_coef",
"=",
"1e-3",
"# If specified, use this value instead of problem name in metrics.py.",
"# This is useful for programs that can automatically compare experiments side",
"# by side based on the same metric names.",
"hparams",
".",
"add_hparam",
"(",
"\"overload_eval_metric_name\"",
",",
"\"\"",
")",
"# For making a transformer encoder unidirectional by using masked",
"# attention.",
"hparams",
".",
"add_hparam",
"(",
"\"unidirectional_encoder\"",
",",
"False",
")",
"# For hard attention.",
"hparams",
".",
"add_hparam",
"(",
"\"hard_attention_k\"",
",",
"0",
")",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1568-L1633
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_v2
|
def transformer_base_v2():
"""Set of hyperparameters."""
hparams = transformer_base_v1()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
return hparams
|
python
|
def transformer_base_v2():
"""Set of hyperparameters."""
hparams = transformer_base_v1()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
return hparams
|
[
"def",
"transformer_base_v2",
"(",
")",
":",
"hparams",
"=",
"transformer_base_v1",
"(",
")",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"n\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"da\"",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.1",
"hparams",
".",
"attention_dropout",
"=",
"0.1",
"hparams",
".",
"relu_dropout",
"=",
"0.1",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"8000",
"hparams",
".",
"learning_rate",
"=",
"0.2",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1637-L1647
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_vq_ada_32ex_packed
|
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params."""
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams
|
python
|
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params."""
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams
|
[
"def",
"transformer_base_vq_ada_32ex_packed",
"(",
")",
":",
"hparams",
"=",
"transformer_base_v2",
"(",
")",
"expert_utils",
".",
"update_hparams_for_vq_gating",
"(",
"hparams",
")",
"hparams",
".",
"moe_num_experts",
"=",
"32",
"hparams",
".",
"gating_type",
"=",
"\"vq\"",
"# this gives us a batch size of 16 because each seq is len 256",
"hparams",
".",
"batch_size",
"=",
"5072",
"hparams",
".",
"ffn_layer",
"=",
"\"local_moe\"",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"10000",
"# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"27200",
"hparams",
".",
"num_heads",
"=",
"4",
"hparams",
".",
"num_blocks",
"=",
"1",
"hparams",
".",
"moe_k",
"=",
"1",
"hparams",
".",
"num_decoder_layers",
"=",
"6",
"hparams",
".",
"label_smoothing",
"=",
"0.",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.1",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"dan\"",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"none\"",
"hparams",
".",
"weight_decay",
"=",
"1e-06",
"hparams",
".",
"attention_dropout",
"=",
"0.1",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"linear_warmup*rsqrt_decay*linear_decay\"",
"hparams",
".",
"activation_dtype",
"=",
"\"float32\"",
"hparams",
".",
"learning_rate",
"=",
"0.1",
"hparams",
".",
"learning_rate_constant",
"=",
"1.0",
"return",
"hparams"
] |
Set of hyperparameters for lm1b packed following tpu params.
|
[
"Set",
"of",
"hyperparameters",
"for",
"lm1b",
"packed",
"following",
"tpu",
"params",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1651-L1679
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_vq1_16_nb1_packed_nda_b01_scales
|
def transformer_base_vq1_16_nb1_packed_nda_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.ema = False
return hparams
|
python
|
def transformer_base_vq1_16_nb1_packed_nda_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.ema = False
return hparams
|
[
"def",
"transformer_base_vq1_16_nb1_packed_nda_b01_scales",
"(",
")",
":",
"hparams",
"=",
"transformer_base_vq_ada_32ex_packed",
"(",
")",
"hparams",
".",
"use_scales",
"=",
"int",
"(",
"True",
")",
"hparams",
".",
"moe_num_experts",
"=",
"16",
"hparams",
".",
"moe_k",
"=",
"1",
"hparams",
".",
"beta",
"=",
"0.1",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"n\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"da\"",
"hparams",
".",
"ema",
"=",
"False",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1692-L1702
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_vq1_16_nb1_packed_dan_b01_scales
|
def transformer_base_vq1_16_nb1_packed_dan_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.ema = False
return hparams
|
python
|
def transformer_base_vq1_16_nb1_packed_dan_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.ema = False
return hparams
|
[
"def",
"transformer_base_vq1_16_nb1_packed_dan_b01_scales",
"(",
")",
":",
"hparams",
"=",
"transformer_base_vq_ada_32ex_packed",
"(",
")",
"hparams",
".",
"use_scales",
"=",
"int",
"(",
"True",
")",
"hparams",
".",
"moe_num_experts",
"=",
"16",
"hparams",
".",
"moe_k",
"=",
"1",
"hparams",
".",
"beta",
"=",
"0.1",
"hparams",
".",
"ema",
"=",
"False",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1706-L1714
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog
|
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales()
hparams.batch_size = 2048
hparams.max_length = 1024
hparams.filter_size = 3072
return hparams
|
python
|
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales()
hparams.batch_size = 2048
hparams.max_length = 1024
hparams.filter_size = 3072
return hparams
|
[
"def",
"transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog",
"(",
")",
":",
"hparams",
"=",
"transformer_base_vq1_16_nb1_packed_nda_b01_scales",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"2048",
"hparams",
".",
"max_length",
"=",
"1024",
"hparams",
".",
"filter_size",
"=",
"3072",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1718-L1724
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_ada_lmpackedbase_dialog
|
def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams
|
python
|
def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams
|
[
"def",
"transformer_ada_lmpackedbase_dialog",
"(",
")",
":",
"hparams",
"=",
"transformer_base_vq_ada_32ex_packed",
"(",
")",
"hparams",
".",
"max_length",
"=",
"1024",
"hparams",
".",
"ffn_layer",
"=",
"\"dense_relu_dense\"",
"hparams",
".",
"batch_size",
"=",
"4096",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1736-L1742
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_v3
|
def transformer_base_v3():
"""Base parameters for Transformer model."""
# Update parameters here, then occasionally cut a versioned set, e.g.
# transformer_base_v2.
hparams = transformer_base_v2()
hparams.optimizer_adam_beta2 = 0.997
# New way of specifying learning rate schedule.
# Equivalent to previous version.
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams
|
python
|
def transformer_base_v3():
"""Base parameters for Transformer model."""
# Update parameters here, then occasionally cut a versioned set, e.g.
# transformer_base_v2.
hparams = transformer_base_v2()
hparams.optimizer_adam_beta2 = 0.997
# New way of specifying learning rate schedule.
# Equivalent to previous version.
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams
|
[
"def",
"transformer_base_v3",
"(",
")",
":",
"# Update parameters here, then occasionally cut a versioned set, e.g.",
"# transformer_base_v2.",
"hparams",
"=",
"transformer_base_v2",
"(",
")",
"hparams",
".",
"optimizer_adam_beta2",
"=",
"0.997",
"# New way of specifying learning rate schedule.",
"# Equivalent to previous version.",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"2.0",
"return",
"hparams"
] |
Base parameters for Transformer model.
|
[
"Base",
"parameters",
"for",
"Transformer",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1754-L1765
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_big
|
def transformer_big():
"""HParams for transformer big model on WMT."""
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU
# with 12 GB memory. For example, NVIDIA TITAN V GPU.
hparams.batch_size = 2048
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hparams
|
python
|
def transformer_big():
"""HParams for transformer big model on WMT."""
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU
# with 12 GB memory. For example, NVIDIA TITAN V GPU.
hparams.batch_size = 2048
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hparams
|
[
"def",
"transformer_big",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"filter_size",
"=",
"4096",
"# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU",
"# with 12 GB memory. For example, NVIDIA TITAN V GPU.",
"hparams",
".",
"batch_size",
"=",
"2048",
"hparams",
".",
"num_heads",
"=",
"16",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.3",
"return",
"hparams"
] |
HParams for transformer big model on WMT.
|
[
"HParams",
"for",
"transformer",
"big",
"model",
"on",
"WMT",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1776-L1786
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall
|
def transformer_tall():
"""Hparams for transformer on LM for pretraining/finetuning/mixing."""
hparams = transformer_base()
hparams.batch_size = 2048
hparams.hidden_size = 768
hparams.filter_size = 3072
hparams.num_hidden_layers = 12
hparams.num_heads = 12
hparams.label_smoothing = 0.0
hparams.max_length = 1024
hparams.eval_drop_long_sequences = True
hparams.multiproblem_mixing_schedule = "pretrain"
hparams.multiproblem_vocab_size = 65536
hparams.clip_grad_norm = 1.0
return hparams
|
python
|
def transformer_tall():
"""Hparams for transformer on LM for pretraining/finetuning/mixing."""
hparams = transformer_base()
hparams.batch_size = 2048
hparams.hidden_size = 768
hparams.filter_size = 3072
hparams.num_hidden_layers = 12
hparams.num_heads = 12
hparams.label_smoothing = 0.0
hparams.max_length = 1024
hparams.eval_drop_long_sequences = True
hparams.multiproblem_mixing_schedule = "pretrain"
hparams.multiproblem_vocab_size = 65536
hparams.clip_grad_norm = 1.0
return hparams
|
[
"def",
"transformer_tall",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"2048",
"hparams",
".",
"hidden_size",
"=",
"768",
"hparams",
".",
"filter_size",
"=",
"3072",
"hparams",
".",
"num_hidden_layers",
"=",
"12",
"hparams",
".",
"num_heads",
"=",
"12",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"hparams",
".",
"max_length",
"=",
"1024",
"hparams",
".",
"eval_drop_long_sequences",
"=",
"True",
"hparams",
".",
"multiproblem_mixing_schedule",
"=",
"\"pretrain\"",
"hparams",
".",
"multiproblem_vocab_size",
"=",
"65536",
"hparams",
".",
"clip_grad_norm",
"=",
"1.0",
"return",
"hparams"
] |
Hparams for transformer on LM for pretraining/finetuning/mixing.
|
[
"Hparams",
"for",
"transformer",
"on",
"LM",
"for",
"pretraining",
"/",
"finetuning",
"/",
"mixing",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1790-L1804
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_finetune_tied
|
def transformer_tall_finetune_tied():
"""Tied means fine-tune CNN/DM summarization as LM."""
hparams = transformer_tall()
hparams.multiproblem_max_input_length = 750
hparams.multiproblem_max_target_length = 100
hparams.multiproblem_schedule_max_examples = 0
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 80000
hparams.multiproblem_target_eval_only = True
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 1.0
hparams.optimizer = "true_adam"
return hparams
|
python
|
def transformer_tall_finetune_tied():
"""Tied means fine-tune CNN/DM summarization as LM."""
hparams = transformer_tall()
hparams.multiproblem_max_input_length = 750
hparams.multiproblem_max_target_length = 100
hparams.multiproblem_schedule_max_examples = 0
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 80000
hparams.multiproblem_target_eval_only = True
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 1.0
hparams.optimizer = "true_adam"
return hparams
|
[
"def",
"transformer_tall_finetune_tied",
"(",
")",
":",
"hparams",
"=",
"transformer_tall",
"(",
")",
"hparams",
".",
"multiproblem_max_input_length",
"=",
"750",
"hparams",
".",
"multiproblem_max_target_length",
"=",
"100",
"hparams",
".",
"multiproblem_schedule_max_examples",
"=",
"0",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"linear_warmup*constant*cosdecay\"",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"5e-5",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"100",
"# Set train steps to learning_rate_decay_steps or less",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"80000",
"hparams",
".",
"multiproblem_target_eval_only",
"=",
"True",
"hparams",
".",
"multiproblem_reweight_label_loss",
"=",
"True",
"hparams",
".",
"multiproblem_label_weight",
"=",
"1.0",
"hparams",
".",
"optimizer",
"=",
"\"true_adam\"",
"return",
"hparams"
] |
Tied means fine-tune CNN/DM summarization as LM.
|
[
"Tied",
"means",
"fine",
"-",
"tune",
"CNN",
"/",
"DM",
"summarization",
"as",
"LM",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1808-L1823
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_finetune_uniencdec
|
def transformer_tall_finetune_uniencdec():
"""Fine-tune CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 80000
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
hparams.unidirectional_encoder = True
return hparams
|
python
|
def transformer_tall_finetune_uniencdec():
"""Fine-tune CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 80000
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
hparams.unidirectional_encoder = True
return hparams
|
[
"def",
"transformer_tall_finetune_uniencdec",
"(",
")",
":",
"hparams",
"=",
"transformer_tall",
"(",
")",
"hparams",
".",
"max_input_seq_length",
"=",
"750",
"hparams",
".",
"max_target_seq_length",
"=",
"100",
"hparams",
".",
"optimizer",
"=",
"\"true_adam\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"linear_warmup*constant*cosdecay\"",
")",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"80000",
"hparams",
".",
"learning_rate_constant",
"=",
"5e-5",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"100",
"hparams",
".",
"unidirectional_encoder",
"=",
"True",
"return",
"hparams"
] |
Fine-tune CNN/DM with a unidirectional encoder and decoder.
|
[
"Fine",
"-",
"tune",
"CNN",
"/",
"DM",
"with",
"a",
"unidirectional",
"encoder",
"and",
"decoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1846-L1857
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_train_uniencdec
|
def transformer_tall_train_uniencdec():
"""Train CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 150000
hparams.learning_rate_constant = 2e-4
hparams.unidirectional_encoder = True
return hparams
|
python
|
def transformer_tall_train_uniencdec():
"""Train CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 150000
hparams.learning_rate_constant = 2e-4
hparams.unidirectional_encoder = True
return hparams
|
[
"def",
"transformer_tall_train_uniencdec",
"(",
")",
":",
"hparams",
"=",
"transformer_tall",
"(",
")",
"hparams",
".",
"max_input_seq_length",
"=",
"750",
"hparams",
".",
"max_target_seq_length",
"=",
"100",
"hparams",
".",
"optimizer",
"=",
"\"true_adam\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"linear_warmup*constant*cosdecay\"",
")",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"150000",
"hparams",
".",
"learning_rate_constant",
"=",
"2e-4",
"hparams",
".",
"unidirectional_encoder",
"=",
"True",
"return",
"hparams"
] |
Train CNN/DM with a unidirectional encoder and decoder.
|
[
"Train",
"CNN",
"/",
"DM",
"with",
"a",
"unidirectional",
"encoder",
"and",
"decoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1861-L1871
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_finetune_textclass
|
def transformer_tall_finetune_textclass():
"""Hparams for transformer on LM for finetuning on text class problems."""
hparams = transformer_tall()
hparams.learning_rate_constant = 6.25e-5
hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay")
hparams.multiproblem_schedule_max_examples = 0
hparams.multiproblem_target_eval_only = True
hparams.learning_rate_warmup_steps = 50
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 25000
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 0.95
return hparams
|
python
|
def transformer_tall_finetune_textclass():
"""Hparams for transformer on LM for finetuning on text class problems."""
hparams = transformer_tall()
hparams.learning_rate_constant = 6.25e-5
hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay")
hparams.multiproblem_schedule_max_examples = 0
hparams.multiproblem_target_eval_only = True
hparams.learning_rate_warmup_steps = 50
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 25000
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 0.95
return hparams
|
[
"def",
"transformer_tall_finetune_textclass",
"(",
")",
":",
"hparams",
"=",
"transformer_tall",
"(",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"6.25e-5",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"linear_warmup*constant*linear_decay\"",
")",
"hparams",
".",
"multiproblem_schedule_max_examples",
"=",
"0",
"hparams",
".",
"multiproblem_target_eval_only",
"=",
"True",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"50",
"# Set train steps to learning_rate_decay_steps or less",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"25000",
"hparams",
".",
"multiproblem_reweight_label_loss",
"=",
"True",
"hparams",
".",
"multiproblem_label_weight",
"=",
"0.95",
"return",
"hparams"
] |
Hparams for transformer on LM for finetuning on text class problems.
|
[
"Hparams",
"for",
"transformer",
"on",
"LM",
"for",
"finetuning",
"on",
"text",
"class",
"problems",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1875-L1887
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_pretrain_lm
|
def transformer_tall_pretrain_lm():
"""Hparams for transformer on LM pretraining (with 64k vocab)."""
hparams = transformer_tall()
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.optimizer = "adam_w"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
# Set max examples to something big when pretraining only the LM, definitely
# something an order of magnitude bigger than number of train steps.
hparams.multiproblem_schedule_max_examples = 5e8
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 5000000
return hparams
|
python
|
def transformer_tall_pretrain_lm():
"""Hparams for transformer on LM pretraining (with 64k vocab)."""
hparams = transformer_tall()
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.optimizer = "adam_w"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
# Set max examples to something big when pretraining only the LM, definitely
# something an order of magnitude bigger than number of train steps.
hparams.multiproblem_schedule_max_examples = 5e8
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 5000000
return hparams
|
[
"def",
"transformer_tall_pretrain_lm",
"(",
")",
":",
"hparams",
"=",
"transformer_tall",
"(",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"2e-4",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"linear_warmup*constant*cosdecay\"",
")",
"hparams",
".",
"optimizer",
"=",
"\"adam_w\"",
"hparams",
".",
"optimizer_adam_beta1",
"=",
"0.9",
"hparams",
".",
"optimizer_adam_beta2",
"=",
"0.999",
"hparams",
".",
"optimizer_adam_epsilon",
"=",
"1e-8",
"# Set max examples to something big when pretraining only the LM, definitely",
"# something an order of magnitude bigger than number of train steps.",
"hparams",
".",
"multiproblem_schedule_max_examples",
"=",
"5e8",
"# Set train steps to learning_rate_decay_steps or less",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"5000000",
"return",
"hparams"
] |
Hparams for transformer on LM pretraining (with 64k vocab).
|
[
"Hparams",
"for",
"transformer",
"on",
"LM",
"pretraining",
"(",
"with",
"64k",
"vocab",
")",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1891-L1905
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_pretrain_lm_tpu_adafactor
|
def transformer_tall_pretrain_lm_tpu_adafactor():
"""Hparams for transformer on LM pretraining (with 64k vocab) on TPU."""
hparams = transformer_tall_pretrain_lm()
update_hparams_for_tpu(hparams)
hparams.max_length = 1024
# For multi-problem on TPU we need it in absolute examples.
hparams.batch_size = 8
hparams.multiproblem_vocab_size = 2**16
return hparams
|
python
|
def transformer_tall_pretrain_lm_tpu_adafactor():
"""Hparams for transformer on LM pretraining (with 64k vocab) on TPU."""
hparams = transformer_tall_pretrain_lm()
update_hparams_for_tpu(hparams)
hparams.max_length = 1024
# For multi-problem on TPU we need it in absolute examples.
hparams.batch_size = 8
hparams.multiproblem_vocab_size = 2**16
return hparams
|
[
"def",
"transformer_tall_pretrain_lm_tpu_adafactor",
"(",
")",
":",
"hparams",
"=",
"transformer_tall_pretrain_lm",
"(",
")",
"update_hparams_for_tpu",
"(",
"hparams",
")",
"hparams",
".",
"max_length",
"=",
"1024",
"# For multi-problem on TPU we need it in absolute examples.",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"multiproblem_vocab_size",
"=",
"2",
"**",
"16",
"return",
"hparams"
] |
Hparams for transformer on LM pretraining (with 64k vocab) on TPU.
|
[
"Hparams",
"for",
"transformer",
"on",
"LM",
"pretraining",
"(",
"with",
"64k",
"vocab",
")",
"on",
"TPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1909-L1917
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_pretrain_lm_tpu_adafactor_large
|
def transformer_tall_pretrain_lm_tpu_adafactor_large():
"""Hparams for transformer on LM pretraining on TPU, large model."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2
hparams.batch_size = 4
hparams.multiproblem_mixing_schedule = "constant"
# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.
hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5"
return hparams
|
python
|
def transformer_tall_pretrain_lm_tpu_adafactor_large():
"""Hparams for transformer on LM pretraining on TPU, large model."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2
hparams.batch_size = 4
hparams.multiproblem_mixing_schedule = "constant"
# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.
hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5"
return hparams
|
[
"def",
"transformer_tall_pretrain_lm_tpu_adafactor_large",
"(",
")",
":",
"hparams",
"=",
"transformer_tall_pretrain_lm_tpu_adafactor",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"num_heads",
"=",
"16",
"hparams",
".",
"filter_size",
"=",
"32768",
"# max fitting in 16G memory is 49152, batch 2",
"hparams",
".",
"batch_size",
"=",
"4",
"hparams",
".",
"multiproblem_mixing_schedule",
"=",
"\"constant\"",
"# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.",
"hparams",
".",
"multiproblem_per_task_threshold",
"=",
"\"320,80,160,1,80,160,2,20,10,5\"",
"return",
"hparams"
] |
Hparams for transformer on LM pretraining on TPU, large model.
|
[
"Hparams",
"for",
"transformer",
"on",
"LM",
"pretraining",
"on",
"TPU",
"large",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1921-L1931
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tall_pretrain_lm_tpu
|
def transformer_tall_pretrain_lm_tpu():
"""Hparams for transformer on LM pretraining on TPU with AdamW."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
# Optimizer gets reset in update_hparams_for_tpu so we set it again here.
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay")
hparams.optimizer = "adam_w"
return hparams
|
python
|
def transformer_tall_pretrain_lm_tpu():
"""Hparams for transformer on LM pretraining on TPU with AdamW."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
# Optimizer gets reset in update_hparams_for_tpu so we set it again here.
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay")
hparams.optimizer = "adam_w"
return hparams
|
[
"def",
"transformer_tall_pretrain_lm_tpu",
"(",
")",
":",
"hparams",
"=",
"transformer_tall_pretrain_lm_tpu_adafactor",
"(",
")",
"# Optimizer gets reset in update_hparams_for_tpu so we set it again here.",
"hparams",
".",
"learning_rate_constant",
"=",
"2e-4",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"linear_warmup * constant * cosdecay\"",
")",
"hparams",
".",
"optimizer",
"=",
"\"adam_w\"",
"return",
"hparams"
] |
Hparams for transformer on LM pretraining on TPU with AdamW.
|
[
"Hparams",
"for",
"transformer",
"on",
"LM",
"pretraining",
"on",
"TPU",
"with",
"AdamW",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1935-L1942
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_single_gpu
|
def transformer_base_single_gpu():
"""HParams for transformer base model for single GPU."""
hparams = transformer_base()
hparams.batch_size = 1024
hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay"
hparams.learning_rate_constant = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
|
python
|
def transformer_base_single_gpu():
"""HParams for transformer base model for single GPU."""
hparams = transformer_base()
hparams.batch_size = 1024
hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay"
hparams.learning_rate_constant = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
|
[
"def",
"transformer_base_single_gpu",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"1024",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"constant*linear_warmup*rsqrt_decay\"",
"hparams",
".",
"learning_rate_constant",
"=",
"0.1",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"16000",
"return",
"hparams"
] |
HParams for transformer base model for single GPU.
|
[
"HParams",
"for",
"transformer",
"base",
"model",
"for",
"single",
"GPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1963-L1970
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_parsing_base
|
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams
|
python
|
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams
|
[
"def",
"transformer_parsing_base",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"attention_dropout",
"=",
"0.2",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.2",
"hparams",
".",
"max_length",
"=",
"512",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"16000",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"learning_rate",
"=",
"0.05",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"return",
"hparams"
] |
HParams for parsing on WSJ only.
|
[
"HParams",
"for",
"parsing",
"on",
"WSJ",
"only",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1983-L1993
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_parsing_big
|
def transformer_parsing_big():
"""HParams for parsing on WSJ semi-supervised."""
hparams = transformer_big()
hparams.max_length = 512
hparams.shared_source_target_embedding = False
hparams.learning_rate_warmup_steps = 4000
hparams.layer_prepostprocess_dropout = 0.1
hparams.batch_size = 2048
hparams.learning_rate = 0.05
return hparams
|
python
|
def transformer_parsing_big():
"""HParams for parsing on WSJ semi-supervised."""
hparams = transformer_big()
hparams.max_length = 512
hparams.shared_source_target_embedding = False
hparams.learning_rate_warmup_steps = 4000
hparams.layer_prepostprocess_dropout = 0.1
hparams.batch_size = 2048
hparams.learning_rate = 0.05
return hparams
|
[
"def",
"transformer_parsing_big",
"(",
")",
":",
"hparams",
"=",
"transformer_big",
"(",
")",
"hparams",
".",
"max_length",
"=",
"512",
"hparams",
".",
"shared_source_target_embedding",
"=",
"False",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"4000",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.1",
"hparams",
".",
"batch_size",
"=",
"2048",
"hparams",
".",
"learning_rate",
"=",
"0.05",
"return",
"hparams"
] |
HParams for parsing on WSJ semi-supervised.
|
[
"HParams",
"for",
"parsing",
"on",
"WSJ",
"semi",
"-",
"supervised",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1997-L2006
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_base_range
|
def transformer_base_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 1e-4)
|
python
|
def transformer_base_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 1e-4)
|
[
"def",
"transformer_base_range",
"(",
"rhp",
")",
":",
"# After starting from base, set intervals for some parameters.",
"rhp",
".",
"set_float",
"(",
"\"learning_rate\"",
",",
"0.3",
",",
"3.0",
",",
"scale",
"=",
"rhp",
".",
"LOG_SCALE",
")",
"rhp",
".",
"set_discrete",
"(",
"\"learning_rate_warmup_steps\"",
",",
"[",
"1000",
",",
"2000",
",",
"4000",
",",
"8000",
",",
"16000",
"]",
")",
"rhp",
".",
"set_float",
"(",
"\"initializer_gain\"",
",",
"0.5",
",",
"2.0",
")",
"rhp",
".",
"set_float",
"(",
"\"optimizer_adam_beta1\"",
",",
"0.85",
",",
"0.95",
")",
"rhp",
".",
"set_float",
"(",
"\"optimizer_adam_beta2\"",
",",
"0.97",
",",
"0.99",
")",
"rhp",
".",
"set_float",
"(",
"\"weight_decay\"",
",",
"0.0",
",",
"1e-4",
")"
] |
Small range of hyperparameters.
|
[
"Small",
"range",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2250-L2259
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_relative
|
def transformer_relative():
"""Use relative position embeddings instead of absolute position encodings."""
hparams = transformer_base()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams
|
python
|
def transformer_relative():
"""Use relative position embeddings instead of absolute position encodings."""
hparams = transformer_base()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams
|
[
"def",
"transformer_relative",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"pos",
"=",
"None",
"hparams",
".",
"self_attention_type",
"=",
"\"dot_product_relative\"",
"hparams",
".",
"max_relative_position",
"=",
"20",
"return",
"hparams"
] |
Use relative position embeddings instead of absolute position encodings.
|
[
"Use",
"relative",
"position",
"embeddings",
"instead",
"of",
"absolute",
"position",
"encodings",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2263-L2269
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_mlperf_tpu
|
def transformer_mlperf_tpu():
"""HParams for Transformer model on TPU for MLPerf on TPU 2x2."""
hparams = transformer_base_v3()
hparams.mlperf_mode = True
hparams.symbol_modality_num_shards = 1
hparams.max_length = 256 # ignored when using "_packed" problems
hparams.batch_size = 2048 # per-chip batch size matches the reference model
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
|
python
|
def transformer_mlperf_tpu():
"""HParams for Transformer model on TPU for MLPerf on TPU 2x2."""
hparams = transformer_base_v3()
hparams.mlperf_mode = True
hparams.symbol_modality_num_shards = 1
hparams.max_length = 256 # ignored when using "_packed" problems
hparams.batch_size = 2048 # per-chip batch size matches the reference model
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
|
[
"def",
"transformer_mlperf_tpu",
"(",
")",
":",
"hparams",
"=",
"transformer_base_v3",
"(",
")",
"hparams",
".",
"mlperf_mode",
"=",
"True",
"hparams",
".",
"symbol_modality_num_shards",
"=",
"1",
"hparams",
".",
"max_length",
"=",
"256",
"# ignored when using \"_packed\" problems",
"hparams",
".",
"batch_size",
"=",
"2048",
"# per-chip batch size matches the reference model",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"filter_size",
"=",
"4096",
"hparams",
".",
"num_heads",
"=",
"16",
"hparams",
".",
"attention_dropout_broadcast_dims",
"=",
"\"0,1\"",
"# batch, heads",
"hparams",
".",
"relu_dropout_broadcast_dims",
"=",
"\"1\"",
"# length",
"hparams",
".",
"layer_prepostprocess_dropout_broadcast_dims",
"=",
"\"1\"",
"# length",
"return",
"hparams"
] |
HParams for Transformer model on TPU for MLPerf on TPU 2x2.
|
[
"HParams",
"for",
"Transformer",
"model",
"on",
"TPU",
"for",
"MLPerf",
"on",
"TPU",
"2x2",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2300-L2313
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
update_hparams_for_tpu
|
def update_hparams_for_tpu(hparams):
"""Change hparams to be compatible with TPU training."""
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
# Adaptive batch sizes and sequence lengths are not supported on TPU.
# Instead, every batch has the same sequence length and the same batch size.
# Longer sequences are dropped and shorter ones are padded.
#
# It is therefore suggested to use a problem where examples have been combined
# to a longer length, e.g. the "_packed" problems.
#
# For problems with variable sequence lengths, this parameter controls the
# maximum sequence length. Shorter sequences are dropped and longer ones
# are padded.
#
# For problems with fixed sequence lengths - e.g. the "_packed" problems,
# this hyperparameter is ignored.
hparams.max_length = 64
# TPUs have less memory than GPUs, so decrease the batch size
hparams.batch_size = 2048
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
|
python
|
def update_hparams_for_tpu(hparams):
"""Change hparams to be compatible with TPU training."""
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
# Adaptive batch sizes and sequence lengths are not supported on TPU.
# Instead, every batch has the same sequence length and the same batch size.
# Longer sequences are dropped and shorter ones are padded.
#
# It is therefore suggested to use a problem where examples have been combined
# to a longer length, e.g. the "_packed" problems.
#
# For problems with variable sequence lengths, this parameter controls the
# maximum sequence length. Shorter sequences are dropped and longer ones
# are padded.
#
# For problems with fixed sequence lengths - e.g. the "_packed" problems,
# this hyperparameter is ignored.
hparams.max_length = 64
# TPUs have less memory than GPUs, so decrease the batch size
hparams.batch_size = 2048
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
|
[
"def",
"update_hparams_for_tpu",
"(",
"hparams",
")",
":",
"# Adafactor uses less memory than Adam.",
"# switch to Adafactor with its recommended learning rate scheme.",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay\"",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"10000",
"# Avoid an expensive concat on TPU.",
"# >1 shards helps with faster parameter distribution on multi-GPU machines",
"hparams",
".",
"symbol_modality_num_shards",
"=",
"1",
"# Adaptive batch sizes and sequence lengths are not supported on TPU.",
"# Instead, every batch has the same sequence length and the same batch size.",
"# Longer sequences are dropped and shorter ones are padded.",
"#",
"# It is therefore suggested to use a problem where examples have been combined",
"# to a longer length, e.g. the \"_packed\" problems.",
"#",
"# For problems with variable sequence lengths, this parameter controls the",
"# maximum sequence length. Shorter sequences are dropped and longer ones",
"# are padded.",
"#",
"# For problems with fixed sequence lengths - e.g. the \"_packed\" problems,",
"# this hyperparameter is ignored.",
"hparams",
".",
"max_length",
"=",
"64",
"# TPUs have less memory than GPUs, so decrease the batch size",
"hparams",
".",
"batch_size",
"=",
"2048",
"# Using noise broadcast in the dropout layers saves memory during training.",
"hparams",
".",
"attention_dropout_broadcast_dims",
"=",
"\"0,1\"",
"# batch, heads",
"hparams",
".",
"relu_dropout_broadcast_dims",
"=",
"\"1\"",
"# length",
"hparams",
".",
"layer_prepostprocess_dropout_broadcast_dims",
"=",
"\"1\"",
"# length",
"return",
"hparams"
] |
Change hparams to be compatible with TPU training.
|
[
"Change",
"hparams",
"to",
"be",
"compatible",
"with",
"TPU",
"training",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2316-L2351
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tpu_range
|
def transformer_tpu_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 2.0)
|
python
|
def transformer_tpu_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 2.0)
|
[
"def",
"transformer_tpu_range",
"(",
"rhp",
")",
":",
"# After starting from base, set intervals for some parameters.",
"rhp",
".",
"set_float",
"(",
"\"learning_rate\"",
",",
"0.3",
",",
"3.0",
",",
"scale",
"=",
"rhp",
".",
"LOG_SCALE",
")",
"rhp",
".",
"set_discrete",
"(",
"\"learning_rate_warmup_steps\"",
",",
"[",
"1000",
",",
"2000",
",",
"4000",
",",
"8000",
",",
"16000",
"]",
")",
"rhp",
".",
"set_float",
"(",
"\"initializer_gain\"",
",",
"0.5",
",",
"2.0",
")",
"rhp",
".",
"set_float",
"(",
"\"optimizer_adam_beta1\"",
",",
"0.85",
",",
"0.95",
")",
"rhp",
".",
"set_float",
"(",
"\"optimizer_adam_beta2\"",
",",
"0.97",
",",
"0.99",
")",
"rhp",
".",
"set_float",
"(",
"\"weight_decay\"",
",",
"0.0",
",",
"2.0",
")"
] |
Small range of hyperparameters.
|
[
"Small",
"range",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2416-L2425
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_clean
|
def transformer_clean():
"""No dropout, label smoothing, max_length."""
hparams = transformer_base_v2()
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.max_length = 0
return hparams
|
python
|
def transformer_clean():
"""No dropout, label smoothing, max_length."""
hparams = transformer_base_v2()
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.max_length = 0
return hparams
|
[
"def",
"transformer_clean",
"(",
")",
":",
"hparams",
"=",
"transformer_base_v2",
"(",
")",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"hparams",
".",
"attention_dropout",
"=",
"0.0",
"hparams",
".",
"relu_dropout",
"=",
"0.0",
"hparams",
".",
"max_length",
"=",
"0",
"return",
"hparams"
] |
No dropout, label smoothing, max_length.
|
[
"No",
"dropout",
"label",
"smoothing",
"max_length",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2441-L2449
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_lm_tpu_0
|
def transformer_lm_tpu_0():
"""HParams for training languagemodel_lm1b8k on tpu. 92M Params."""
hparams = transformer_clean_big()
update_hparams_for_tpu(hparams)
hparams.num_heads = 4 # Heads are expensive on TPUs.
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
|
python
|
def transformer_lm_tpu_0():
"""HParams for training languagemodel_lm1b8k on tpu. 92M Params."""
hparams = transformer_clean_big()
update_hparams_for_tpu(hparams)
hparams.num_heads = 4 # Heads are expensive on TPUs.
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
|
[
"def",
"transformer_lm_tpu_0",
"(",
")",
":",
"hparams",
"=",
"transformer_clean_big",
"(",
")",
"update_hparams_for_tpu",
"(",
"hparams",
")",
"hparams",
".",
"num_heads",
"=",
"4",
"# Heads are expensive on TPUs.",
"hparams",
".",
"batch_size",
"=",
"4096",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.1",
"return",
"hparams"
] |
HParams for training languagemodel_lm1b8k on tpu. 92M Params.
|
[
"HParams",
"for",
"training",
"languagemodel_lm1b8k",
"on",
"tpu",
".",
"92M",
"Params",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2477-L2485
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_librispeech_v1
|
def transformer_librispeech_v1():
"""HParams for training ASR model on LibriSpeech V1."""
hparams = transformer_base()
hparams.num_heads = 4
hparams.filter_size = 1024
hparams.hidden_size = 256
hparams.num_encoder_layers = 5
hparams.num_decoder_layers = 3
hparams.learning_rate = 0.15
hparams.batch_size = 6000000
librispeech.set_librispeech_length_hparams(hparams)
return hparams
|
python
|
def transformer_librispeech_v1():
"""HParams for training ASR model on LibriSpeech V1."""
hparams = transformer_base()
hparams.num_heads = 4
hparams.filter_size = 1024
hparams.hidden_size = 256
hparams.num_encoder_layers = 5
hparams.num_decoder_layers = 3
hparams.learning_rate = 0.15
hparams.batch_size = 6000000
librispeech.set_librispeech_length_hparams(hparams)
return hparams
|
[
"def",
"transformer_librispeech_v1",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"num_heads",
"=",
"4",
"hparams",
".",
"filter_size",
"=",
"1024",
"hparams",
".",
"hidden_size",
"=",
"256",
"hparams",
".",
"num_encoder_layers",
"=",
"5",
"hparams",
".",
"num_decoder_layers",
"=",
"3",
"hparams",
".",
"learning_rate",
"=",
"0.15",
"hparams",
".",
"batch_size",
"=",
"6000000",
"librispeech",
".",
"set_librispeech_length_hparams",
"(",
"hparams",
")",
"return",
"hparams"
] |
HParams for training ASR model on LibriSpeech V1.
|
[
"HParams",
"for",
"training",
"ASR",
"model",
"on",
"LibriSpeech",
"V1",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2498-L2511
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_librispeech_v2
|
def transformer_librispeech_v2():
"""HParams for training ASR model on LibriSpeech V2."""
hparams = transformer_base()
hparams.max_length = 1240000
hparams.max_input_seq_length = 1550
hparams.max_target_seq_length = 350
hparams.batch_size = 16
hparams.num_decoder_layers = 4
hparams.num_encoder_layers = 6
hparams.hidden_size = 384
hparams.learning_rate = 0.15
hparams.daisy_chain_variables = False
hparams.filter_size = 1536
hparams.num_heads = 2
hparams.ffn_layer = "conv_relu_conv"
hparams.conv_first_kernel = 9
hparams.weight_decay = 0
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams
|
python
|
def transformer_librispeech_v2():
"""HParams for training ASR model on LibriSpeech V2."""
hparams = transformer_base()
hparams.max_length = 1240000
hparams.max_input_seq_length = 1550
hparams.max_target_seq_length = 350
hparams.batch_size = 16
hparams.num_decoder_layers = 4
hparams.num_encoder_layers = 6
hparams.hidden_size = 384
hparams.learning_rate = 0.15
hparams.daisy_chain_variables = False
hparams.filter_size = 1536
hparams.num_heads = 2
hparams.ffn_layer = "conv_relu_conv"
hparams.conv_first_kernel = 9
hparams.weight_decay = 0
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams
|
[
"def",
"transformer_librispeech_v2",
"(",
")",
":",
"hparams",
"=",
"transformer_base",
"(",
")",
"hparams",
".",
"max_length",
"=",
"1240000",
"hparams",
".",
"max_input_seq_length",
"=",
"1550",
"hparams",
".",
"max_target_seq_length",
"=",
"350",
"hparams",
".",
"batch_size",
"=",
"16",
"hparams",
".",
"num_decoder_layers",
"=",
"4",
"hparams",
".",
"num_encoder_layers",
"=",
"6",
"hparams",
".",
"hidden_size",
"=",
"384",
"hparams",
".",
"learning_rate",
"=",
"0.15",
"hparams",
".",
"daisy_chain_variables",
"=",
"False",
"hparams",
".",
"filter_size",
"=",
"1536",
"hparams",
".",
"num_heads",
"=",
"2",
"hparams",
".",
"ffn_layer",
"=",
"\"conv_relu_conv\"",
"hparams",
".",
"conv_first_kernel",
"=",
"9",
"hparams",
".",
"weight_decay",
"=",
"0",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.2",
"hparams",
".",
"relu_dropout",
"=",
"0.2",
"return",
"hparams"
] |
HParams for training ASR model on LibriSpeech V2.
|
[
"HParams",
"for",
"training",
"ASR",
"model",
"on",
"LibriSpeech",
"V2",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2515-L2536
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_librispeech_tpu_v1
|
def transformer_librispeech_tpu_v1():
"""HParams for training ASR model on Librispeech on TPU v1."""
hparams = transformer_librispeech_v1()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
|
python
|
def transformer_librispeech_tpu_v1():
"""HParams for training ASR model on Librispeech on TPU v1."""
hparams = transformer_librispeech_v1()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
|
[
"def",
"transformer_librispeech_tpu_v1",
"(",
")",
":",
"hparams",
"=",
"transformer_librispeech_v1",
"(",
")",
"update_hparams_for_tpu",
"(",
"hparams",
")",
"hparams",
".",
"batch_size",
"=",
"16",
"librispeech",
".",
"set_librispeech_length_hparams",
"(",
"hparams",
")",
"return",
"hparams"
] |
HParams for training ASR model on Librispeech on TPU v1.
|
[
"HParams",
"for",
"training",
"ASR",
"model",
"on",
"Librispeech",
"on",
"TPU",
"v1",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2540-L2547
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_librispeech_tpu_v2
|
def transformer_librispeech_tpu_v2():
"""HParams for training ASR model on Librispeech on TPU v2."""
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
|
python
|
def transformer_librispeech_tpu_v2():
"""HParams for training ASR model on Librispeech on TPU v2."""
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
|
[
"def",
"transformer_librispeech_tpu_v2",
"(",
")",
":",
"hparams",
"=",
"transformer_librispeech_v2",
"(",
")",
"update_hparams_for_tpu",
"(",
"hparams",
")",
"hparams",
".",
"batch_size",
"=",
"16",
"librispeech",
".",
"set_librispeech_length_hparams",
"(",
"hparams",
")",
"return",
"hparams"
] |
HParams for training ASR model on Librispeech on TPU v2.
|
[
"HParams",
"for",
"training",
"ASR",
"model",
"on",
"Librispeech",
"on",
"TPU",
"v2",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2551-L2558
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_tpu_1b
|
def transformer_tpu_1b():
"""Hparams for machine translation with ~1.1B parameters."""
hparams = transformer_tpu()
hparams.hidden_size = 2048
hparams.filter_size = 8192
hparams.num_hidden_layers = 8
# smaller batch size to avoid OOM
hparams.batch_size = 1024
hparams.activation_dtype = "bfloat16"
hparams.weight_dtype = "bfloat16"
# maximize number of parameters relative to computation by not sharing.
hparams.shared_embedding_and_softmax_weights = False
return hparams
|
python
|
def transformer_tpu_1b():
"""Hparams for machine translation with ~1.1B parameters."""
hparams = transformer_tpu()
hparams.hidden_size = 2048
hparams.filter_size = 8192
hparams.num_hidden_layers = 8
# smaller batch size to avoid OOM
hparams.batch_size = 1024
hparams.activation_dtype = "bfloat16"
hparams.weight_dtype = "bfloat16"
# maximize number of parameters relative to computation by not sharing.
hparams.shared_embedding_and_softmax_weights = False
return hparams
|
[
"def",
"transformer_tpu_1b",
"(",
")",
":",
"hparams",
"=",
"transformer_tpu",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"2048",
"hparams",
".",
"filter_size",
"=",
"8192",
"hparams",
".",
"num_hidden_layers",
"=",
"8",
"# smaller batch size to avoid OOM",
"hparams",
".",
"batch_size",
"=",
"1024",
"hparams",
".",
"activation_dtype",
"=",
"\"bfloat16\"",
"hparams",
".",
"weight_dtype",
"=",
"\"bfloat16\"",
"# maximize number of parameters relative to computation by not sharing.",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"return",
"hparams"
] |
Hparams for machine translation with ~1.1B parameters.
|
[
"Hparams",
"for",
"machine",
"translation",
"with",
"~1",
".",
"1B",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2599-L2611
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_wikitext103_l4k_v0
|
def transformer_wikitext103_l4k_v0():
"""HParams for training languagemodel_wikitext103_l4k."""
hparams = transformer_big()
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.num_heads = 4
hparams.max_length = 4096
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.num_hidden_layers = 8
hparams.attention_dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.1
hparams.label_smoothing = 0.0
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
return hparams
|
python
|
def transformer_wikitext103_l4k_v0():
"""HParams for training languagemodel_wikitext103_l4k."""
hparams = transformer_big()
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.num_heads = 4
hparams.max_length = 4096
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.num_hidden_layers = 8
hparams.attention_dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.1
hparams.label_smoothing = 0.0
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
return hparams
|
[
"def",
"transformer_wikitext103_l4k_v0",
"(",
")",
":",
"hparams",
"=",
"transformer_big",
"(",
")",
"# Adafactor uses less memory than Adam.",
"# switch to Adafactor with its recommended learning rate scheme.",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay\"",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"10000",
"hparams",
".",
"num_heads",
"=",
"4",
"hparams",
".",
"max_length",
"=",
"4096",
"hparams",
".",
"batch_size",
"=",
"4096",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"hparams",
".",
"num_hidden_layers",
"=",
"8",
"hparams",
".",
"attention_dropout",
"=",
"0.1",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.2",
"hparams",
".",
"relu_dropout",
"=",
"0.1",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"# Using noise broadcast in the dropout layers saves memory during training.",
"hparams",
".",
"attention_dropout_broadcast_dims",
"=",
"\"0,1\"",
"# batch, heads",
"hparams",
".",
"relu_dropout_broadcast_dims",
"=",
"\"1\"",
"# length",
"hparams",
".",
"layer_prepostprocess_dropout_broadcast_dims",
"=",
"\"1\"",
"# length",
"# Avoid an expensive concat on TPU.",
"# >1 shards helps with faster parameter distribution on multi-GPU machines",
"hparams",
".",
"symbol_modality_num_shards",
"=",
"1",
"return",
"hparams"
] |
HParams for training languagemodel_wikitext103_l4k.
|
[
"HParams",
"for",
"training",
"languagemodel_wikitext103_l4k",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2615-L2645
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_wikitext103_l4k_memory_v0
|
def transformer_wikitext103_l4k_memory_v0():
"""HParams for training languagemodel_wikitext103_l4k with memory."""
hparams = transformer_wikitext103_l4k_v0()
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = 64
hparams.split_targets_strided_training = True
hparams.add_hparam("memory_type", "transformer_xl")
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length)) # 262144
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
hparams.add_hparam("unconditional", True)
hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess
# By default, cache one chunk only (like Transformer-XL)
hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length)
return hparams
|
python
|
def transformer_wikitext103_l4k_memory_v0():
"""HParams for training languagemodel_wikitext103_l4k with memory."""
hparams = transformer_wikitext103_l4k_v0()
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = 64
hparams.split_targets_strided_training = True
hparams.add_hparam("memory_type", "transformer_xl")
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length)) # 262144
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
hparams.add_hparam("unconditional", True)
hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess
# By default, cache one chunk only (like Transformer-XL)
hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length)
return hparams
|
[
"def",
"transformer_wikitext103_l4k_memory_v0",
"(",
")",
":",
"hparams",
"=",
"transformer_wikitext103_l4k_v0",
"(",
")",
"hparams",
".",
"split_targets_chunk_length",
"=",
"64",
"hparams",
".",
"split_targets_max_chunks",
"=",
"64",
"hparams",
".",
"split_targets_strided_training",
"=",
"True",
"hparams",
".",
"add_hparam",
"(",
"\"memory_type\"",
",",
"\"transformer_xl\"",
")",
"# The hparams specify batch size *before* chunking, but we want to have a",
"# consistent 4K batch size *after* chunking to fully utilize the hardware.",
"target_tokens_per_batch",
"=",
"4096",
"hparams",
".",
"batch_size",
"=",
"int",
"(",
"target_tokens_per_batch",
"*",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
")",
"# 262144",
"hparams",
".",
"pos",
"=",
"None",
"hparams",
".",
"self_attention_type",
"=",
"\"dot_product_relative\"",
"hparams",
".",
"max_relative_position",
"=",
"2",
"*",
"hparams",
".",
"split_targets_chunk_length",
"hparams",
".",
"add_hparam",
"(",
"\"unconditional\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"recurrent_memory_batch_size\"",
",",
"0",
")",
"# 0 = try to guess",
"# By default, cache one chunk only (like Transformer-XL)",
"hparams",
".",
"add_hparam",
"(",
"\"num_memory_items\"",
",",
"hparams",
".",
"split_targets_chunk_length",
")",
"return",
"hparams"
] |
HParams for training languagemodel_wikitext103_l4k with memory.
|
[
"HParams",
"for",
"training",
"languagemodel_wikitext103_l4k",
"with",
"memory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2649-L2673
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_wikitext103_l16k_memory_v0
|
def transformer_wikitext103_l16k_memory_v0():
"""HParams for training languagemodel_wikitext103_l16k with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.max_length = 16384
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
return hparams
|
python
|
def transformer_wikitext103_l16k_memory_v0():
"""HParams for training languagemodel_wikitext103_l16k with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.max_length = 16384
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
return hparams
|
[
"def",
"transformer_wikitext103_l16k_memory_v0",
"(",
")",
":",
"hparams",
"=",
"transformer_wikitext103_l4k_memory_v0",
"(",
")",
"hparams",
".",
"max_length",
"=",
"16384",
"hparams",
".",
"split_targets_chunk_length",
"=",
"64",
"hparams",
".",
"split_targets_max_chunks",
"=",
"int",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
"# The hparams specify batch size *before* chunking, but we want to have a",
"# consistent 4K batch size *after* chunking to fully utilize the hardware.",
"target_tokens_per_batch",
"=",
"4096",
"hparams",
".",
"batch_size",
"=",
"int",
"(",
"target_tokens_per_batch",
"*",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
")",
"hparams",
".",
"max_relative_position",
"=",
"2",
"*",
"hparams",
".",
"split_targets_chunk_length",
"return",
"hparams"
] |
HParams for training languagemodel_wikitext103_l16k with memory.
|
[
"HParams",
"for",
"training",
"languagemodel_wikitext103_l16k",
"with",
"memory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2677-L2694
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_cifar10_memory_v0
|
def transformer_cifar10_memory_v0():
"""HParams for training image_cifar10_plain_gen_flat_rev with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.num_hidden_layers = 6
hparams.max_length = 32 * 32 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 4
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = (
hparams.num_memory_items + hparams.split_targets_chunk_length)
return hparams
|
python
|
def transformer_cifar10_memory_v0():
"""HParams for training image_cifar10_plain_gen_flat_rev with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.num_hidden_layers = 6
hparams.max_length = 32 * 32 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 4
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = (
hparams.num_memory_items + hparams.split_targets_chunk_length)
return hparams
|
[
"def",
"transformer_cifar10_memory_v0",
"(",
")",
":",
"hparams",
"=",
"transformer_wikitext103_l4k_memory_v0",
"(",
")",
"hparams",
".",
"num_hidden_layers",
"=",
"6",
"hparams",
".",
"max_length",
"=",
"32",
"*",
"32",
"*",
"3",
"hparams",
".",
"split_targets_chunk_length",
"=",
"64",
"*",
"3",
"hparams",
".",
"split_targets_max_chunks",
"=",
"int",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
"hparams",
".",
"num_memory_items",
"=",
"128",
"*",
"3",
"# Since this is an image problem, batch size refers to examples (not tokens)",
"target_images_per_batch",
"=",
"4",
"hparams",
".",
"batch_size",
"=",
"int",
"(",
"target_images_per_batch",
"*",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
")",
"# The recurrent memory needs to know the actual batch size (in sequences)",
"hparams",
".",
"recurrent_memory_batch_size",
"=",
"hparams",
".",
"batch_size",
"hparams",
".",
"max_relative_position",
"=",
"(",
"hparams",
".",
"num_memory_items",
"+",
"hparams",
".",
"split_targets_chunk_length",
")",
"return",
"hparams"
] |
HParams for training image_cifar10_plain_gen_flat_rev with memory.
|
[
"HParams",
"for",
"training",
"image_cifar10_plain_gen_flat_rev",
"with",
"memory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2698-L2721
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/transformer.py
|
transformer_imagenet64_memory_v0
|
def transformer_imagenet64_memory_v0():
"""HParams for training image_imagenet64_gen_flat_rev with memory."""
hparams = transformer_cifar10_memory_v0()
hparams.max_length = 64 * 64 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 2
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = 3072
return hparams
|
python
|
def transformer_imagenet64_memory_v0():
"""HParams for training image_imagenet64_gen_flat_rev with memory."""
hparams = transformer_cifar10_memory_v0()
hparams.max_length = 64 * 64 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 2
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = 3072
return hparams
|
[
"def",
"transformer_imagenet64_memory_v0",
"(",
")",
":",
"hparams",
"=",
"transformer_cifar10_memory_v0",
"(",
")",
"hparams",
".",
"max_length",
"=",
"64",
"*",
"64",
"*",
"3",
"hparams",
".",
"split_targets_chunk_length",
"=",
"64",
"*",
"3",
"hparams",
".",
"split_targets_max_chunks",
"=",
"int",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
"hparams",
".",
"num_memory_items",
"=",
"128",
"*",
"3",
"# Since this is an image problem, batch size refers to examples (not tokens)",
"target_images_per_batch",
"=",
"2",
"hparams",
".",
"batch_size",
"=",
"int",
"(",
"target_images_per_batch",
"*",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
")",
"# The recurrent memory needs to know the actual batch size (in sequences)",
"hparams",
".",
"recurrent_memory_batch_size",
"=",
"hparams",
".",
"batch_size",
"hparams",
".",
"max_relative_position",
"=",
"3072",
"return",
"hparams"
] |
HParams for training image_imagenet64_gen_flat_rev with memory.
|
[
"HParams",
"for",
"training",
"image_imagenet64_gen_flat_rev",
"with",
"memory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2725-L2745
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_image_attention.py
|
maybe_reshape_4d_to_3d
|
def maybe_reshape_4d_to_3d(x):
"""Reshape input from 4D to 3D if necessary."""
x_shape = common_layers.shape_list(x)
is_4d = False
if len(x_shape) == 4:
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]])
is_4d = True
return x, x_shape, is_4d
|
python
|
def maybe_reshape_4d_to_3d(x):
"""Reshape input from 4D to 3D if necessary."""
x_shape = common_layers.shape_list(x)
is_4d = False
if len(x_shape) == 4:
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]])
is_4d = True
return x, x_shape, is_4d
|
[
"def",
"maybe_reshape_4d_to_3d",
"(",
"x",
")",
":",
"x_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"is_4d",
"=",
"False",
"if",
"len",
"(",
"x_shape",
")",
"==",
"4",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"x_shape",
"[",
"0",
"]",
",",
"x_shape",
"[",
"1",
"]",
"*",
"x_shape",
"[",
"2",
"]",
",",
"x_shape",
"[",
"3",
"]",
"]",
")",
"is_4d",
"=",
"True",
"return",
"x",
",",
"x_shape",
",",
"is_4d"
] |
Reshape input from 4D to 3D if necessary.
|
[
"Reshape",
"input",
"from",
"4D",
"to",
"3D",
"if",
"necessary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L72-L79
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_image_attention.py
|
local_attention_2d
|
def local_attention_2d(x, hparams, attention_type="local_attention_2d"):
"""Local 2d, self attention layer."""
# self-attention
with tf.variable_scope("local_2d_self_att"):
y = common_attention.multihead_attention_2d(
x,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
attention_type=attention_type,
query_shape=hparams.query_shape,
memory_flange=hparams.memory_flange,
name="self_attention")
return y
|
python
|
def local_attention_2d(x, hparams, attention_type="local_attention_2d"):
"""Local 2d, self attention layer."""
# self-attention
with tf.variable_scope("local_2d_self_att"):
y = common_attention.multihead_attention_2d(
x,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
attention_type=attention_type,
query_shape=hparams.query_shape,
memory_flange=hparams.memory_flange,
name="self_attention")
return y
|
[
"def",
"local_attention_2d",
"(",
"x",
",",
"hparams",
",",
"attention_type",
"=",
"\"local_attention_2d\"",
")",
":",
"# self-attention",
"with",
"tf",
".",
"variable_scope",
"(",
"\"local_2d_self_att\"",
")",
":",
"y",
"=",
"common_attention",
".",
"multihead_attention_2d",
"(",
"x",
",",
"None",
",",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"num_heads",
",",
"attention_type",
"=",
"attention_type",
",",
"query_shape",
"=",
"hparams",
".",
"query_shape",
",",
"memory_flange",
"=",
"hparams",
".",
"memory_flange",
",",
"name",
"=",
"\"self_attention\"",
")",
"return",
"y"
] |
Local 2d, self attention layer.
|
[
"Local",
"2d",
"self",
"attention",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L82-L97
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_image_attention.py
|
local_within_block_attention
|
def local_within_block_attention(x,
self_attention_bias,
hparams,
attention_type="local_within_block_mask_right",
q_padding="VALID",
kv_padding="VALID"):
"""Local within block self attention."""
x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_within_block"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x_new, hparams),
None,
self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
name="local_within_block")
if is_4d:
y = tf.reshape(y, x_shape)
return y
|
python
|
def local_within_block_attention(x,
self_attention_bias,
hparams,
attention_type="local_within_block_mask_right",
q_padding="VALID",
kv_padding="VALID"):
"""Local within block self attention."""
x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_within_block"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x_new, hparams),
None,
self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
name="local_within_block")
if is_4d:
y = tf.reshape(y, x_shape)
return y
|
[
"def",
"local_within_block_attention",
"(",
"x",
",",
"self_attention_bias",
",",
"hparams",
",",
"attention_type",
"=",
"\"local_within_block_mask_right\"",
",",
"q_padding",
"=",
"\"VALID\"",
",",
"kv_padding",
"=",
"\"VALID\"",
")",
":",
"x_new",
",",
"x_shape",
",",
"is_4d",
"=",
"maybe_reshape_4d_to_3d",
"(",
"x",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"local_within_block\"",
")",
":",
"y",
"=",
"common_attention",
".",
"multihead_attention",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x_new",
",",
"hparams",
")",
",",
"None",
",",
"self_attention_bias",
",",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"num_heads",
",",
"hparams",
".",
"attention_dropout",
",",
"attention_type",
"=",
"attention_type",
",",
"block_width",
"=",
"hparams",
".",
"block_width",
",",
"block_length",
"=",
"hparams",
".",
"block_length",
",",
"q_padding",
"=",
"q_padding",
",",
"kv_padding",
"=",
"kv_padding",
",",
"q_filter_width",
"=",
"hparams",
".",
"q_filter_width",
",",
"kv_filter_width",
"=",
"hparams",
".",
"kv_filter_width",
",",
"name",
"=",
"\"local_within_block\"",
")",
"if",
"is_4d",
":",
"y",
"=",
"tf",
".",
"reshape",
"(",
"y",
",",
"x_shape",
")",
"return",
"y"
] |
Local within block self attention.
|
[
"Local",
"within",
"block",
"self",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L100-L128
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_image_attention.py
|
local_attention_1d
|
def local_attention_1d(x,
hparams,
attention_type="local_unmasked",
q_padding="VALID",
kv_padding="VALID"):
"""Local 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_1d_self_att"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
shared_rel=hparams.shared_rel,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
make_image_summary=False,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
return y
|
python
|
def local_attention_1d(x,
hparams,
attention_type="local_unmasked",
q_padding="VALID",
kv_padding="VALID"):
"""Local 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_1d_self_att"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
shared_rel=hparams.shared_rel,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
make_image_summary=False,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
return y
|
[
"def",
"local_attention_1d",
"(",
"x",
",",
"hparams",
",",
"attention_type",
"=",
"\"local_unmasked\"",
",",
"q_padding",
"=",
"\"VALID\"",
",",
"kv_padding",
"=",
"\"VALID\"",
")",
":",
"# self-attention",
"x",
",",
"x_shape",
",",
"is_4d",
"=",
"maybe_reshape_4d_to_3d",
"(",
"x",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"local_1d_self_att\"",
")",
":",
"y",
"=",
"common_attention",
".",
"multihead_attention",
"(",
"x",
",",
"None",
",",
"None",
",",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"num_heads",
",",
"hparams",
".",
"attention_dropout",
",",
"attention_type",
"=",
"attention_type",
",",
"shared_rel",
"=",
"hparams",
".",
"shared_rel",
",",
"block_width",
"=",
"hparams",
".",
"block_width",
",",
"block_length",
"=",
"hparams",
".",
"block_length",
",",
"q_padding",
"=",
"q_padding",
",",
"kv_padding",
"=",
"kv_padding",
",",
"q_filter_width",
"=",
"hparams",
".",
"q_filter_width",
",",
"kv_filter_width",
"=",
"hparams",
".",
"kv_filter_width",
",",
"make_image_summary",
"=",
"False",
",",
"name",
"=",
"\"self_attention\"",
")",
"if",
"is_4d",
":",
"y",
"=",
"tf",
".",
"reshape",
"(",
"y",
",",
"x_shape",
")",
"return",
"y"
] |
Local 1d self attention.
|
[
"Local",
"1d",
"self",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L131-L161
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_image_attention.py
|
get_dilated_1d_attention_mask
|
def get_dilated_1d_attention_mask(
num_heads, block_size,
num_blocks, memory_size, gap_size,
name="dilated_mask"):
"""Dilated attention with a masking strategy."""
mask = np.ones((num_heads, block_size, 2*block_size), np.bool)
# now going over every row to do the right assignment of
# memory blocks
for i in range(block_size):
visible = 2*block_size - (block_size-i)
# You always attend to yourself, set the mask for that
mask[:, i, -(block_size - i)] = 0
# Maybe num_blocks can be automatically calculated?
for j in range(num_blocks):
for k in range(memory_size):
index = ((gap_size + memory_size)*j) + k
if index >= visible:
break
mask[:, i, -(index + block_size - i + 1)] = 0 # Verify
# adding a num blocks dimension
mask = np.expand_dims(mask, axis=1)
return tf.constant(mask, dtype=tf.int32, name=name)
|
python
|
def get_dilated_1d_attention_mask(
num_heads, block_size,
num_blocks, memory_size, gap_size,
name="dilated_mask"):
"""Dilated attention with a masking strategy."""
mask = np.ones((num_heads, block_size, 2*block_size), np.bool)
# now going over every row to do the right assignment of
# memory blocks
for i in range(block_size):
visible = 2*block_size - (block_size-i)
# You always attend to yourself, set the mask for that
mask[:, i, -(block_size - i)] = 0
# Maybe num_blocks can be automatically calculated?
for j in range(num_blocks):
for k in range(memory_size):
index = ((gap_size + memory_size)*j) + k
if index >= visible:
break
mask[:, i, -(index + block_size - i + 1)] = 0 # Verify
# adding a num blocks dimension
mask = np.expand_dims(mask, axis=1)
return tf.constant(mask, dtype=tf.int32, name=name)
|
[
"def",
"get_dilated_1d_attention_mask",
"(",
"num_heads",
",",
"block_size",
",",
"num_blocks",
",",
"memory_size",
",",
"gap_size",
",",
"name",
"=",
"\"dilated_mask\"",
")",
":",
"mask",
"=",
"np",
".",
"ones",
"(",
"(",
"num_heads",
",",
"block_size",
",",
"2",
"*",
"block_size",
")",
",",
"np",
".",
"bool",
")",
"# now going over every row to do the right assignment of",
"# memory blocks",
"for",
"i",
"in",
"range",
"(",
"block_size",
")",
":",
"visible",
"=",
"2",
"*",
"block_size",
"-",
"(",
"block_size",
"-",
"i",
")",
"# You always attend to yourself, set the mask for that",
"mask",
"[",
":",
",",
"i",
",",
"-",
"(",
"block_size",
"-",
"i",
")",
"]",
"=",
"0",
"# Maybe num_blocks can be automatically calculated?",
"for",
"j",
"in",
"range",
"(",
"num_blocks",
")",
":",
"for",
"k",
"in",
"range",
"(",
"memory_size",
")",
":",
"index",
"=",
"(",
"(",
"gap_size",
"+",
"memory_size",
")",
"*",
"j",
")",
"+",
"k",
"if",
"index",
">=",
"visible",
":",
"break",
"mask",
"[",
":",
",",
"i",
",",
"-",
"(",
"index",
"+",
"block_size",
"-",
"i",
"+",
"1",
")",
"]",
"=",
"0",
"# Verify",
"# adding a num blocks dimension",
"mask",
"=",
"np",
".",
"expand_dims",
"(",
"mask",
",",
"axis",
"=",
"1",
")",
"return",
"tf",
".",
"constant",
"(",
"mask",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"name",
")"
] |
Dilated attention with a masking strategy.
|
[
"Dilated",
"attention",
"with",
"a",
"masking",
"strategy",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L164-L187
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_image_attention.py
|
dilated_attention_1d
|
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
"""Dilated 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y
|
python
|
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
"""Dilated 1d self attention."""
# self-attention
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y
|
[
"def",
"dilated_attention_1d",
"(",
"x",
",",
"hparams",
",",
"attention_type",
"=",
"\"masked_dilated_1d\"",
",",
"q_padding",
"=",
"\"VALID\"",
",",
"kv_padding",
"=",
"\"VALID\"",
",",
"gap_size",
"=",
"2",
")",
":",
"# self-attention",
"x",
",",
"x_shape",
",",
"is_4d",
"=",
"maybe_reshape_4d_to_3d",
"(",
"x",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"masked_dilated_1d\"",
")",
":",
"y",
"=",
"common_attention",
".",
"multihead_attention",
"(",
"x",
",",
"None",
",",
"None",
",",
"hparams",
".",
"attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"num_heads",
",",
"hparams",
".",
"attention_dropout",
",",
"attention_type",
"=",
"attention_type",
",",
"block_width",
"=",
"hparams",
".",
"block_width",
",",
"block_length",
"=",
"hparams",
".",
"block_length",
",",
"q_padding",
"=",
"q_padding",
",",
"kv_padding",
"=",
"kv_padding",
",",
"q_filter_width",
"=",
"hparams",
".",
"q_filter_width",
",",
"kv_filter_width",
"=",
"hparams",
".",
"kv_filter_width",
",",
"gap_size",
"=",
"gap_size",
",",
"num_memory_blocks",
"=",
"hparams",
".",
"num_memory_blocks",
",",
"name",
"=",
"\"self_attention\"",
")",
"if",
"is_4d",
":",
"y",
"=",
"tf",
".",
"reshape",
"(",
"y",
",",
"x_shape",
")",
"y",
".",
"set_shape",
"(",
"[",
"None",
",",
"None",
",",
"None",
",",
"hparams",
".",
"hidden_size",
"]",
")",
"return",
"y"
] |
Dilated 1d self attention.
|
[
"Dilated",
"1d",
"self",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L190-L222
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.