repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/tensor2tensor
|
tensor2tensor/utils/beam_search.py
|
top_k_with_unique
|
def top_k_with_unique(inputs, k):
"""Finds the values and indices of the k largests entries.
Instead of doing sort like tf.nn.top_k, this function finds the max value
k times. The running time is proportional to k, which is be faster when k
is small. The current implementation supports only inputs of rank 2.
In addition, iota is used to replace the lower bits of each element, this
makes the selection more stable when there are equal elements. The
overhead is that output values are approximated.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
top_values: A tensor, the k largest elements in sorted order.
[batch_size, k].
indices: A tensor, indices of the top_values. [batch_size, k].
"""
unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))
top_values, indices = _create_topk_unique(unique_inputs, k)
top_values = tf.cast(top_values, inputs.dtype)
return top_values, indices
|
python
|
def top_k_with_unique(inputs, k):
"""Finds the values and indices of the k largests entries.
Instead of doing sort like tf.nn.top_k, this function finds the max value
k times. The running time is proportional to k, which is be faster when k
is small. The current implementation supports only inputs of rank 2.
In addition, iota is used to replace the lower bits of each element, this
makes the selection more stable when there are equal elements. The
overhead is that output values are approximated.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
top_values: A tensor, the k largest elements in sorted order.
[batch_size, k].
indices: A tensor, indices of the top_values. [batch_size, k].
"""
unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32))
top_values, indices = _create_topk_unique(unique_inputs, k)
top_values = tf.cast(top_values, inputs.dtype)
return top_values, indices
|
[
"def",
"top_k_with_unique",
"(",
"inputs",
",",
"k",
")",
":",
"unique_inputs",
"=",
"_create_make_unique",
"(",
"tf",
".",
"cast",
"(",
"inputs",
",",
"tf",
".",
"float32",
")",
")",
"top_values",
",",
"indices",
"=",
"_create_topk_unique",
"(",
"unique_inputs",
",",
"k",
")",
"top_values",
"=",
"tf",
".",
"cast",
"(",
"top_values",
",",
"inputs",
".",
"dtype",
")",
"return",
"top_values",
",",
"indices"
] |
Finds the values and indices of the k largests entries.
Instead of doing sort like tf.nn.top_k, this function finds the max value
k times. The running time is proportional to k, which is be faster when k
is small. The current implementation supports only inputs of rank 2.
In addition, iota is used to replace the lower bits of each element, this
makes the selection more stable when there are equal elements. The
overhead is that output values are approximated.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
top_values: A tensor, the k largest elements in sorted order.
[batch_size, k].
indices: A tensor, indices of the top_values. [batch_size, k].
|
[
"Finds",
"the",
"values",
"and",
"indices",
"of",
"the",
"k",
"largests",
"entries",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L273-L295
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/beam_search.py
|
compute_topk_scores_and_seq
|
def compute_topk_scores_and_seq(sequences,
scores,
scores_to_gather,
flags,
beam_size,
batch_size,
prefix="default",
states_to_gather=None,
use_tpu=False,
use_top_k_with_unique=True):
"""Given sequences and scores, will gather the top k=beam size sequences.
This function is used to grow alive, and finished. It takes sequences,
scores, and flags, and returns the top k from sequences, scores_to_gather,
and flags based on the values in scores.
This method permits easy introspection using tfdbg. It adds three named ops
that are prefixed by `prefix`:
- _topk_seq: the tensor for topk_seq returned by this method.
- _topk_flags: the tensor for topk_finished_flags returned by this method.
- _topk_scores: the tensor for tokp_gathered_scores returned by this method.
Args:
sequences: Tensor of sequences that we need to gather from.
[batch_size, beam_size, seq_length]
scores: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will use these to compute the topk.
scores_to_gather: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will return the gathered scores from here.
Scores to gather is different from scores because for grow_alive, we will
need to return log_probs, while for grow_finished, we will need to return
the length penalized scores.
flags: Tensor of bools for sequences that say whether a sequence has reached
EOS or not
beam_size: int
batch_size: int
prefix: string that will prefix unique names for the ops run.
states_to_gather: dict (possibly nested) of decoding states.
use_tpu: A bool, whether to compute topk scores and sequences on TPU.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during TPU beam search.
Returns:
Tuple of
(topk_seq [batch_size, beam_size, decode_length],
topk_gathered_scores [batch_size, beam_size],
topk_finished_flags[batch_size, beam_size])
"""
if not use_tpu:
_, topk_indexes = tf.nn.top_k(scores, k=beam_size)
# The next three steps are to create coordinates for tf.gather_nd to pull
# out the topk sequences from sequences based on scores.
# batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which
# batch the beam item is in. This will create the i of the i,j coordinate
# needed for the gather
batch_pos = compute_batch_indices(batch_size, beam_size)
# top coordinates will give us the actual coordinates to do the gather.
# stacking will create a tensor of dimension batch * beam * 2, where the
# last dimension contains the i,j gathering coordinates.
top_coordinates = tf.stack([batch_pos, topk_indexes], axis=2)
# Gather up the highest scoring sequences. For each operation added, give
# it a concrete name to simplify observing these operations with tfdbg.
# Clients can capture these tensors by watching these node names.
def gather(tensor, name):
return tf.gather_nd(tensor, top_coordinates, name=(prefix + name))
topk_seq = gather(sequences, "_topk_seq")
topk_flags = gather(flags, "_topk_flags")
topk_gathered_scores = gather(scores_to_gather, "_topk_scores")
if states_to_gather:
topk_gathered_states = nest.map_structure(
lambda state: gather(state, "_topk_states"), states_to_gather)
else:
topk_gathered_states = states_to_gather
else:
if use_top_k_with_unique:
_, topk_indexes = top_k_with_unique(scores, k=beam_size)
else:
_, topk_indexes = tf.nn.top_k(scores, k=beam_size)
# Gather up the highest scoring sequences. For each operation added, give
# it a concrete name to simplify observing these operations with tfdbg.
# Clients can capture these tensors by watching these node names.
topk_seq = fast_tpu_gather(sequences, topk_indexes, prefix + "_topk_seq")
topk_flags = fast_tpu_gather(flags, topk_indexes, prefix + "_topk_flags")
topk_gathered_scores = fast_tpu_gather(scores_to_gather, topk_indexes,
prefix + "_topk_scores")
if states_to_gather:
topk_gathered_states = nest.map_structure(
# pylint: disable=g-long-lambda
lambda state: fast_tpu_gather(state, topk_indexes,
prefix + "_topk_states"),
states_to_gather)
else:
topk_gathered_states = states_to_gather
return topk_seq, topk_gathered_scores, topk_flags, topk_gathered_states
|
python
|
def compute_topk_scores_and_seq(sequences,
scores,
scores_to_gather,
flags,
beam_size,
batch_size,
prefix="default",
states_to_gather=None,
use_tpu=False,
use_top_k_with_unique=True):
"""Given sequences and scores, will gather the top k=beam size sequences.
This function is used to grow alive, and finished. It takes sequences,
scores, and flags, and returns the top k from sequences, scores_to_gather,
and flags based on the values in scores.
This method permits easy introspection using tfdbg. It adds three named ops
that are prefixed by `prefix`:
- _topk_seq: the tensor for topk_seq returned by this method.
- _topk_flags: the tensor for topk_finished_flags returned by this method.
- _topk_scores: the tensor for tokp_gathered_scores returned by this method.
Args:
sequences: Tensor of sequences that we need to gather from.
[batch_size, beam_size, seq_length]
scores: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will use these to compute the topk.
scores_to_gather: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will return the gathered scores from here.
Scores to gather is different from scores because for grow_alive, we will
need to return log_probs, while for grow_finished, we will need to return
the length penalized scores.
flags: Tensor of bools for sequences that say whether a sequence has reached
EOS or not
beam_size: int
batch_size: int
prefix: string that will prefix unique names for the ops run.
states_to_gather: dict (possibly nested) of decoding states.
use_tpu: A bool, whether to compute topk scores and sequences on TPU.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during TPU beam search.
Returns:
Tuple of
(topk_seq [batch_size, beam_size, decode_length],
topk_gathered_scores [batch_size, beam_size],
topk_finished_flags[batch_size, beam_size])
"""
if not use_tpu:
_, topk_indexes = tf.nn.top_k(scores, k=beam_size)
# The next three steps are to create coordinates for tf.gather_nd to pull
# out the topk sequences from sequences based on scores.
# batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which
# batch the beam item is in. This will create the i of the i,j coordinate
# needed for the gather
batch_pos = compute_batch_indices(batch_size, beam_size)
# top coordinates will give us the actual coordinates to do the gather.
# stacking will create a tensor of dimension batch * beam * 2, where the
# last dimension contains the i,j gathering coordinates.
top_coordinates = tf.stack([batch_pos, topk_indexes], axis=2)
# Gather up the highest scoring sequences. For each operation added, give
# it a concrete name to simplify observing these operations with tfdbg.
# Clients can capture these tensors by watching these node names.
def gather(tensor, name):
return tf.gather_nd(tensor, top_coordinates, name=(prefix + name))
topk_seq = gather(sequences, "_topk_seq")
topk_flags = gather(flags, "_topk_flags")
topk_gathered_scores = gather(scores_to_gather, "_topk_scores")
if states_to_gather:
topk_gathered_states = nest.map_structure(
lambda state: gather(state, "_topk_states"), states_to_gather)
else:
topk_gathered_states = states_to_gather
else:
if use_top_k_with_unique:
_, topk_indexes = top_k_with_unique(scores, k=beam_size)
else:
_, topk_indexes = tf.nn.top_k(scores, k=beam_size)
# Gather up the highest scoring sequences. For each operation added, give
# it a concrete name to simplify observing these operations with tfdbg.
# Clients can capture these tensors by watching these node names.
topk_seq = fast_tpu_gather(sequences, topk_indexes, prefix + "_topk_seq")
topk_flags = fast_tpu_gather(flags, topk_indexes, prefix + "_topk_flags")
topk_gathered_scores = fast_tpu_gather(scores_to_gather, topk_indexes,
prefix + "_topk_scores")
if states_to_gather:
topk_gathered_states = nest.map_structure(
# pylint: disable=g-long-lambda
lambda state: fast_tpu_gather(state, topk_indexes,
prefix + "_topk_states"),
states_to_gather)
else:
topk_gathered_states = states_to_gather
return topk_seq, topk_gathered_scores, topk_flags, topk_gathered_states
|
[
"def",
"compute_topk_scores_and_seq",
"(",
"sequences",
",",
"scores",
",",
"scores_to_gather",
",",
"flags",
",",
"beam_size",
",",
"batch_size",
",",
"prefix",
"=",
"\"default\"",
",",
"states_to_gather",
"=",
"None",
",",
"use_tpu",
"=",
"False",
",",
"use_top_k_with_unique",
"=",
"True",
")",
":",
"if",
"not",
"use_tpu",
":",
"_",
",",
"topk_indexes",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"scores",
",",
"k",
"=",
"beam_size",
")",
"# The next three steps are to create coordinates for tf.gather_nd to pull",
"# out the topk sequences from sequences based on scores.",
"# batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which",
"# batch the beam item is in. This will create the i of the i,j coordinate",
"# needed for the gather",
"batch_pos",
"=",
"compute_batch_indices",
"(",
"batch_size",
",",
"beam_size",
")",
"# top coordinates will give us the actual coordinates to do the gather.",
"# stacking will create a tensor of dimension batch * beam * 2, where the",
"# last dimension contains the i,j gathering coordinates.",
"top_coordinates",
"=",
"tf",
".",
"stack",
"(",
"[",
"batch_pos",
",",
"topk_indexes",
"]",
",",
"axis",
"=",
"2",
")",
"# Gather up the highest scoring sequences. For each operation added, give",
"# it a concrete name to simplify observing these operations with tfdbg.",
"# Clients can capture these tensors by watching these node names.",
"def",
"gather",
"(",
"tensor",
",",
"name",
")",
":",
"return",
"tf",
".",
"gather_nd",
"(",
"tensor",
",",
"top_coordinates",
",",
"name",
"=",
"(",
"prefix",
"+",
"name",
")",
")",
"topk_seq",
"=",
"gather",
"(",
"sequences",
",",
"\"_topk_seq\"",
")",
"topk_flags",
"=",
"gather",
"(",
"flags",
",",
"\"_topk_flags\"",
")",
"topk_gathered_scores",
"=",
"gather",
"(",
"scores_to_gather",
",",
"\"_topk_scores\"",
")",
"if",
"states_to_gather",
":",
"topk_gathered_states",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"state",
":",
"gather",
"(",
"state",
",",
"\"_topk_states\"",
")",
",",
"states_to_gather",
")",
"else",
":",
"topk_gathered_states",
"=",
"states_to_gather",
"else",
":",
"if",
"use_top_k_with_unique",
":",
"_",
",",
"topk_indexes",
"=",
"top_k_with_unique",
"(",
"scores",
",",
"k",
"=",
"beam_size",
")",
"else",
":",
"_",
",",
"topk_indexes",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"scores",
",",
"k",
"=",
"beam_size",
")",
"# Gather up the highest scoring sequences. For each operation added, give",
"# it a concrete name to simplify observing these operations with tfdbg.",
"# Clients can capture these tensors by watching these node names.",
"topk_seq",
"=",
"fast_tpu_gather",
"(",
"sequences",
",",
"topk_indexes",
",",
"prefix",
"+",
"\"_topk_seq\"",
")",
"topk_flags",
"=",
"fast_tpu_gather",
"(",
"flags",
",",
"topk_indexes",
",",
"prefix",
"+",
"\"_topk_flags\"",
")",
"topk_gathered_scores",
"=",
"fast_tpu_gather",
"(",
"scores_to_gather",
",",
"topk_indexes",
",",
"prefix",
"+",
"\"_topk_scores\"",
")",
"if",
"states_to_gather",
":",
"topk_gathered_states",
"=",
"nest",
".",
"map_structure",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"state",
":",
"fast_tpu_gather",
"(",
"state",
",",
"topk_indexes",
",",
"prefix",
"+",
"\"_topk_states\"",
")",
",",
"states_to_gather",
")",
"else",
":",
"topk_gathered_states",
"=",
"states_to_gather",
"return",
"topk_seq",
",",
"topk_gathered_scores",
",",
"topk_flags",
",",
"topk_gathered_states"
] |
Given sequences and scores, will gather the top k=beam size sequences.
This function is used to grow alive, and finished. It takes sequences,
scores, and flags, and returns the top k from sequences, scores_to_gather,
and flags based on the values in scores.
This method permits easy introspection using tfdbg. It adds three named ops
that are prefixed by `prefix`:
- _topk_seq: the tensor for topk_seq returned by this method.
- _topk_flags: the tensor for topk_finished_flags returned by this method.
- _topk_scores: the tensor for tokp_gathered_scores returned by this method.
Args:
sequences: Tensor of sequences that we need to gather from.
[batch_size, beam_size, seq_length]
scores: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will use these to compute the topk.
scores_to_gather: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will return the gathered scores from here.
Scores to gather is different from scores because for grow_alive, we will
need to return log_probs, while for grow_finished, we will need to return
the length penalized scores.
flags: Tensor of bools for sequences that say whether a sequence has reached
EOS or not
beam_size: int
batch_size: int
prefix: string that will prefix unique names for the ops run.
states_to_gather: dict (possibly nested) of decoding states.
use_tpu: A bool, whether to compute topk scores and sequences on TPU.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during TPU beam search.
Returns:
Tuple of
(topk_seq [batch_size, beam_size, decode_length],
topk_gathered_scores [batch_size, beam_size],
topk_finished_flags[batch_size, beam_size])
|
[
"Given",
"sequences",
"and",
"scores",
"will",
"gather",
"the",
"top",
"k",
"=",
"beam",
"size",
"sequences",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L298-L393
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/beam_search.py
|
beam_search
|
def beam_search(symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=None,
eos_id=EOS_ID,
stop_early=True,
use_tpu=False,
use_top_k_with_unique=True):
"""Beam search with length penalties.
Requires a function that can take the currently decoded symbols and return
the logits for the next symbol. The implementation is inspired by
https://arxiv.org/abs/1609.08144.
When running, the beam search steps can be visualized by using tfdbg to watch
the operations generating the output ids for each beam step. These operations
have the pattern:
(alive|finished)_topk_(seq,scores)
Operations marked `alive` represent the new beam sequences that will be
processed in the next step. Operations marked `finished` represent the
completed beam sequences, which may be padded with 0s if no beams finished.
Operations marked `seq` store the full beam sequence for the time step.
Operations marked `scores` store the sequence's final log scores.
The beam search steps will be processed sequentially in order, so when
capturing observed from these operations, tensors, clients can make
assumptions about which step is being recorded.
WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this
means that the shape of the 2nd dimension of these tensors will not be
available (i.e. set to None) inside symbols_to_logits_fn.
Args:
symbols_to_logits_fn: Interface to the model, to provide logits.
Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
initial_ids: Ids to start off the decoding, this will be the first thing
handed to symbols_to_logits_fn (after expanding to beam size)
[batch_size]
beam_size: Size of the beam.
decode_length: Number of steps to decode for.
vocab_size: Size of the vocab, must equal the size of the logits returned by
symbols_to_logits_fn
alpha: alpha for length penalty.
states: dict (possibly nested) of decoding states.
eos_id: ID for end of sentence.
stop_early: a boolean - stop once best sequence is provably determined.
use_tpu: A bool, whether to do beam search on TPU.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during TPU beam search.
Returns:
Tuple of
(decoded beams [batch_size, beam_size, decode_length]
decoding probabilities [batch_size, beam_size])
"""
batch_size = common_layers.shape_list(initial_ids)[0]
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant([[0.] + [-INF] * (beam_size - 1)])
# Expand to beam_size (batch_size, beam_size)
alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])
# Expand each batch and state to beam_size
alive_seq = _expand_to_beam_size(initial_ids, beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2) # (batch_size, beam_size, 1)
if use_tpu:
alive_seq = tf.tile(alive_seq, [1, 1, decode_length + 1])
if states:
states = nest.map_structure(
lambda state: _expand_to_beam_size(state, beam_size), states)
else:
states = {}
# Finished will keep track of all the sequences that have finished so far
# Finished log probs will be negative infinity in the beginning
# finished_flags will keep track of booleans
finished_seq = tf.zeros(common_layers.shape_list(alive_seq), tf.int32)
# Setting the scores of the initial to negative infinity.
finished_scores = tf.ones([batch_size, beam_size]) * -INF
finished_flags = tf.zeros([batch_size, beam_size], tf.bool)
def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
curr_scores, curr_finished):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
finished_seq: Current finished sequences.
[batch_size, beam_size, current_decoded_length]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
curr_seq: current topk sequence that has been grown by one position.
[batch_size, beam_size, current_decoded_length]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_finished: Finished flags for each of these sequences.
[batch_size, beam_size]
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
if not use_tpu:
# First append a column of 0'ids to finished to make the same length with
# finished scores
finished_seq = tf.concat(
[finished_seq,
tf.zeros([batch_size, beam_size, 1], tf.int32)], axis=2)
# Set the scores of the unfinished seq in curr_seq to large negative
# values
curr_scores += (1. - tf.to_float(curr_finished)) * -INF
# concatenating the sequences and scores along beam axis
curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1)
curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1)
curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1)
return compute_topk_scores_and_seq(
curr_finished_seq,
curr_finished_scores,
curr_finished_scores,
curr_finished_flags,
beam_size,
batch_size,
"grow_finished",
use_tpu=use_tpu,
use_top_k_with_unique=use_top_k_with_unique)
def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished, states):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
curr_seq: current topk sequence that has been grown by one position.
[batch_size, beam_size, i+1]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_log_probs: log probs for each of these sequences.
[batch_size, beam_size]
curr_finished: Finished flags for each of these sequences.
[batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
# Set the scores of the finished seq in curr_seq to large negative
# values
curr_scores += tf.to_float(curr_finished) * -INF
return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
curr_finished, beam_size, batch_size,
"grow_alive", states, use_tpu=use_tpu)
def grow_topk(i, alive_seq, alive_log_probs, states):
r"""Inner beam search loop.
This function takes the current alive sequences, and grows them to topk
sequences where k = 2*beam. We use 2*beam because, we could have beam_size
number of sequences that might hit <EOS> and there will be no alive
sequences to continue. With 2*beam_size, this will not happen. This relies
on the assumption the vocab size is > beam size. If this is true, we'll
have at least beam_size non <EOS> extensions if we extract the next top
2*beam words.
Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
https://arxiv.org/abs/1609.08144.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of these sequences. [batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Topk sequences extended by the next word,
The log probs of these sequences,
The scores with length penalty of these sequences,
Flags indicating which of these sequences have finished decoding,
dict of transformed decoding states)
"""
# Get the logits for all the possible next symbols
if use_tpu and states:
flat_ids = tf.reshape(
tf.slice(alive_seq, [0, 0, i], [batch_size, beam_size, 1]),
[batch_size * beam_size, -1])
else:
flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1])
# (batch_size * beam_size, decoded_length)
if states:
flat_states = nest.map_structure(_merge_beam_dim, states)
flat_logits, flat_states = symbols_to_logits_fn(flat_ids, i, flat_states)
states = nest.map_structure(
lambda t: _unmerge_beam_dim(t, batch_size, beam_size), flat_states)
elif use_tpu:
flat_logits = symbols_to_logits_fn(flat_ids, i)
else:
flat_logits = symbols_to_logits_fn(flat_ids)
logits = tf.reshape(flat_logits, [batch_size, beam_size, -1])
# Convert logits to normalized log probs
candidate_log_probs = common_layers.log_prob_from_logits(logits)
# Multiply the probabilities by the current probabilities of the beam.
# (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
length_penalty = tf.pow(((5. + tf.to_float(i + 1)) / 6.), alpha)
curr_scores = log_probs / length_penalty
# Flatten out (beam_size, vocab_size) probs in to a list of possibilities
flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size])
if use_tpu and use_top_k_with_unique:
topk_scores, topk_ids = top_k_with_unique(
flat_curr_scores, k=beam_size * 2)
else:
topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2)
# Recovering the log probs because we will need to send them back
topk_log_probs = topk_scores * length_penalty
# Work out what beam the top probs are in.
topk_beam_index = topk_ids // vocab_size
topk_ids %= vocab_size # Unflatten the ids
if not use_tpu:
# The next three steps are to create coordinates for tf.gather_nd to pull
# out the correct sequences from id's that we need to grow.
# We will also use the coordinates to gather the booleans of the beam
# items that survived.
batch_pos = compute_batch_indices(batch_size, beam_size * 2)
# top beams will give us the actual coordinates to do the gather.
# stacking will create a tensor of dimension batch * beam * 2, where the
# last dimension contains the i,j gathering coordinates.
topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2)
# Gather up the most probable 2*beams both for the ids and
# finished_in_alive bools
topk_seq = tf.gather_nd(alive_seq, topk_coordinates)
if states:
states = nest.map_structure(
lambda state: tf.gather_nd(state, topk_coordinates), states)
# Append the most probable alive
topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)
else:
# Gather up the most probable 2*beams both for the ids and
# finished_in_alive bools
topk_seq = fast_tpu_gather(alive_seq, topk_beam_index)
if states:
states = nest.map_structure(
lambda state: fast_tpu_gather(state, topk_beam_index), states)
# Update the most probable alive
topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1])
topk_seq = inplace_ops.alias_inplace_update(topk_seq, i + 1, topk_ids)
topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0])
topk_finished = tf.equal(topk_ids, eos_id)
return topk_seq, topk_log_probs, topk_scores, topk_finished, states
def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states):
"""Inner beam search loop.
There are three groups of tensors, alive, finished, and topk.
The alive group contains information about the current alive sequences
The topk group contains information about alive + topk current decoded words
the finished group contains information about finished sentences, that is,
the ones that have decoded to <EOS>. These are what we return.
The general beam search algorithm is as follows:
While we haven't terminated (pls look at termination condition)
1. Grow the current alive to get beam*2 topk sequences
2. Among the topk, keep the top beam_size ones that haven't reached EOS
into alive
3. Among the topk, keep the top beam_size ones have reached EOS into
finished
Repeat
To make things simple with using fixed size tensors, we will end
up inserting unfinished sequences into finished in the beginning. To stop
that we add -ve INF to the score of the unfinished sequence so that when a
true finished sequence does appear, it will have a higher score than all the
unfinished ones.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_seq: Current finished sequences.
[batch_size, beam_size, i+1]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Incremented loop index
New alive sequences,
Log probs of the alive sequences,
New finished sequences,
Scores of the new finished sequences,
Flags indicating which sequence in finished as reached EOS,
dict of final decoding states)
"""
# Each inner loop, we carry out three steps:
# 1. Get the current topk items.
# 2. Extract the ones that have finished and haven't finished
# 3. Recompute the contents of finished based on scores.
topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk(
i, alive_seq, alive_log_probs, states)
alive_seq, alive_log_probs, _, states = grow_alive(
topk_seq, topk_scores, topk_log_probs, topk_finished, states)
finished_seq, finished_scores, finished_flags, _ = grow_finished(
finished_seq, finished_scores, finished_flags, topk_seq, topk_scores,
topk_finished)
return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states)
def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
finished_scores, unused_finished_in_finished, unused_states):
"""Checking termination condition.
We terminate when we decoded up to decode_length or the lowest scoring item
in finished has a greater score that the highest prob item in alive divided
by the max length penalty
Args:
i: loop index
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
Returns:
Bool.
"""
max_length_penalty = tf.pow(((5. + tf.to_float(decode_length)) / 6.), alpha)
# The best possible score of the most likely alive sequence.
lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty
if not stop_early:
# by considering the min score (in the top N beams) we ensure that
# the decoder will keep decoding until there is at least one beam
# (in the top N) that can be improved (w.r.t. the alive beams).
# any unfinished beam will have score -INF - thus the min
# will always be -INF if there is at least one unfinished beam -
# which means the bound_is_met condition cannot be true in this case.
lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores)
else:
# by taking the max score we only care about the first beam;
# as soon as this first beam cannot be beaten from the alive beams
# the beam decoder can stop.
# similarly to the above, if the top beam is not completed, its
# finished_score is -INF, thus it will not activate the
# bound_is_met condition. (i.e., decoder will keep going on).
# note we need to find the max for every sequence eparately - so, we need
# to keep the batch dimension (see axis=1)
lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores,
axis=1)
bound_is_met = tf.reduce_all(
tf.greater(lowest_score_of_finished_in_finished,
lower_bound_alive_scores))
return tf.logical_and(
tf.less(i, decode_length), tf.logical_not(bound_is_met))
inner_shape = tf.TensorShape([None, None, None])
if use_tpu:
inner_shape = tf.TensorShape([batch_size, beam_size, decode_length + 1])
if use_tpu:
state_struc = nest.map_structure(lambda state: state.get_shape(), states)
else:
state_struc = nest.map_structure(get_state_shape_invariants, states)
(_, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states) = tf.while_loop(
_is_finished,
inner_loop, [
tf.constant(0), alive_seq, alive_log_probs, finished_seq,
finished_scores, finished_flags, states
],
shape_invariants=[
tf.TensorShape([]),
inner_shape,
alive_log_probs.get_shape(),
inner_shape,
finished_scores.get_shape(),
finished_flags.get_shape(),
state_struc
],
parallel_iterations=1,
back_prop=False)
alive_seq.set_shape((None, beam_size, None))
finished_seq.set_shape((None, beam_size, None))
# Accounting for corner case: It's possible that no sequence in alive for a
# particular batch item ever reached EOS. In that case, we should just copy
# the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
# if 0, means that no sequence for that batch index had reached EOS. We need
# to do the same for the scores as well.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores, states
|
python
|
def beam_search(symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=None,
eos_id=EOS_ID,
stop_early=True,
use_tpu=False,
use_top_k_with_unique=True):
"""Beam search with length penalties.
Requires a function that can take the currently decoded symbols and return
the logits for the next symbol. The implementation is inspired by
https://arxiv.org/abs/1609.08144.
When running, the beam search steps can be visualized by using tfdbg to watch
the operations generating the output ids for each beam step. These operations
have the pattern:
(alive|finished)_topk_(seq,scores)
Operations marked `alive` represent the new beam sequences that will be
processed in the next step. Operations marked `finished` represent the
completed beam sequences, which may be padded with 0s if no beams finished.
Operations marked `seq` store the full beam sequence for the time step.
Operations marked `scores` store the sequence's final log scores.
The beam search steps will be processed sequentially in order, so when
capturing observed from these operations, tensors, clients can make
assumptions about which step is being recorded.
WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this
means that the shape of the 2nd dimension of these tensors will not be
available (i.e. set to None) inside symbols_to_logits_fn.
Args:
symbols_to_logits_fn: Interface to the model, to provide logits.
Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
initial_ids: Ids to start off the decoding, this will be the first thing
handed to symbols_to_logits_fn (after expanding to beam size)
[batch_size]
beam_size: Size of the beam.
decode_length: Number of steps to decode for.
vocab_size: Size of the vocab, must equal the size of the logits returned by
symbols_to_logits_fn
alpha: alpha for length penalty.
states: dict (possibly nested) of decoding states.
eos_id: ID for end of sentence.
stop_early: a boolean - stop once best sequence is provably determined.
use_tpu: A bool, whether to do beam search on TPU.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during TPU beam search.
Returns:
Tuple of
(decoded beams [batch_size, beam_size, decode_length]
decoding probabilities [batch_size, beam_size])
"""
batch_size = common_layers.shape_list(initial_ids)[0]
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant([[0.] + [-INF] * (beam_size - 1)])
# Expand to beam_size (batch_size, beam_size)
alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1])
# Expand each batch and state to beam_size
alive_seq = _expand_to_beam_size(initial_ids, beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2) # (batch_size, beam_size, 1)
if use_tpu:
alive_seq = tf.tile(alive_seq, [1, 1, decode_length + 1])
if states:
states = nest.map_structure(
lambda state: _expand_to_beam_size(state, beam_size), states)
else:
states = {}
# Finished will keep track of all the sequences that have finished so far
# Finished log probs will be negative infinity in the beginning
# finished_flags will keep track of booleans
finished_seq = tf.zeros(common_layers.shape_list(alive_seq), tf.int32)
# Setting the scores of the initial to negative infinity.
finished_scores = tf.ones([batch_size, beam_size]) * -INF
finished_flags = tf.zeros([batch_size, beam_size], tf.bool)
def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
curr_scores, curr_finished):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
finished_seq: Current finished sequences.
[batch_size, beam_size, current_decoded_length]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
curr_seq: current topk sequence that has been grown by one position.
[batch_size, beam_size, current_decoded_length]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_finished: Finished flags for each of these sequences.
[batch_size, beam_size]
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
if not use_tpu:
# First append a column of 0'ids to finished to make the same length with
# finished scores
finished_seq = tf.concat(
[finished_seq,
tf.zeros([batch_size, beam_size, 1], tf.int32)], axis=2)
# Set the scores of the unfinished seq in curr_seq to large negative
# values
curr_scores += (1. - tf.to_float(curr_finished)) * -INF
# concatenating the sequences and scores along beam axis
curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1)
curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1)
curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1)
return compute_topk_scores_and_seq(
curr_finished_seq,
curr_finished_scores,
curr_finished_scores,
curr_finished_flags,
beam_size,
batch_size,
"grow_finished",
use_tpu=use_tpu,
use_top_k_with_unique=use_top_k_with_unique)
def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished, states):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
curr_seq: current topk sequence that has been grown by one position.
[batch_size, beam_size, i+1]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_log_probs: log probs for each of these sequences.
[batch_size, beam_size]
curr_finished: Finished flags for each of these sequences.
[batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
# Set the scores of the finished seq in curr_seq to large negative
# values
curr_scores += tf.to_float(curr_finished) * -INF
return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
curr_finished, beam_size, batch_size,
"grow_alive", states, use_tpu=use_tpu)
def grow_topk(i, alive_seq, alive_log_probs, states):
r"""Inner beam search loop.
This function takes the current alive sequences, and grows them to topk
sequences where k = 2*beam. We use 2*beam because, we could have beam_size
number of sequences that might hit <EOS> and there will be no alive
sequences to continue. With 2*beam_size, this will not happen. This relies
on the assumption the vocab size is > beam size. If this is true, we'll
have at least beam_size non <EOS> extensions if we extract the next top
2*beam words.
Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
https://arxiv.org/abs/1609.08144.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of these sequences. [batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Topk sequences extended by the next word,
The log probs of these sequences,
The scores with length penalty of these sequences,
Flags indicating which of these sequences have finished decoding,
dict of transformed decoding states)
"""
# Get the logits for all the possible next symbols
if use_tpu and states:
flat_ids = tf.reshape(
tf.slice(alive_seq, [0, 0, i], [batch_size, beam_size, 1]),
[batch_size * beam_size, -1])
else:
flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1])
# (batch_size * beam_size, decoded_length)
if states:
flat_states = nest.map_structure(_merge_beam_dim, states)
flat_logits, flat_states = symbols_to_logits_fn(flat_ids, i, flat_states)
states = nest.map_structure(
lambda t: _unmerge_beam_dim(t, batch_size, beam_size), flat_states)
elif use_tpu:
flat_logits = symbols_to_logits_fn(flat_ids, i)
else:
flat_logits = symbols_to_logits_fn(flat_ids)
logits = tf.reshape(flat_logits, [batch_size, beam_size, -1])
# Convert logits to normalized log probs
candidate_log_probs = common_layers.log_prob_from_logits(logits)
# Multiply the probabilities by the current probabilities of the beam.
# (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
length_penalty = tf.pow(((5. + tf.to_float(i + 1)) / 6.), alpha)
curr_scores = log_probs / length_penalty
# Flatten out (beam_size, vocab_size) probs in to a list of possibilities
flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size])
if use_tpu and use_top_k_with_unique:
topk_scores, topk_ids = top_k_with_unique(
flat_curr_scores, k=beam_size * 2)
else:
topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2)
# Recovering the log probs because we will need to send them back
topk_log_probs = topk_scores * length_penalty
# Work out what beam the top probs are in.
topk_beam_index = topk_ids // vocab_size
topk_ids %= vocab_size # Unflatten the ids
if not use_tpu:
# The next three steps are to create coordinates for tf.gather_nd to pull
# out the correct sequences from id's that we need to grow.
# We will also use the coordinates to gather the booleans of the beam
# items that survived.
batch_pos = compute_batch_indices(batch_size, beam_size * 2)
# top beams will give us the actual coordinates to do the gather.
# stacking will create a tensor of dimension batch * beam * 2, where the
# last dimension contains the i,j gathering coordinates.
topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2)
# Gather up the most probable 2*beams both for the ids and
# finished_in_alive bools
topk_seq = tf.gather_nd(alive_seq, topk_coordinates)
if states:
states = nest.map_structure(
lambda state: tf.gather_nd(state, topk_coordinates), states)
# Append the most probable alive
topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)
else:
# Gather up the most probable 2*beams both for the ids and
# finished_in_alive bools
topk_seq = fast_tpu_gather(alive_seq, topk_beam_index)
if states:
states = nest.map_structure(
lambda state: fast_tpu_gather(state, topk_beam_index), states)
# Update the most probable alive
topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1])
topk_seq = inplace_ops.alias_inplace_update(topk_seq, i + 1, topk_ids)
topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0])
topk_finished = tf.equal(topk_ids, eos_id)
return topk_seq, topk_log_probs, topk_scores, topk_finished, states
def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states):
"""Inner beam search loop.
There are three groups of tensors, alive, finished, and topk.
The alive group contains information about the current alive sequences
The topk group contains information about alive + topk current decoded words
the finished group contains information about finished sentences, that is,
the ones that have decoded to <EOS>. These are what we return.
The general beam search algorithm is as follows:
While we haven't terminated (pls look at termination condition)
1. Grow the current alive to get beam*2 topk sequences
2. Among the topk, keep the top beam_size ones that haven't reached EOS
into alive
3. Among the topk, keep the top beam_size ones have reached EOS into
finished
Repeat
To make things simple with using fixed size tensors, we will end
up inserting unfinished sequences into finished in the beginning. To stop
that we add -ve INF to the score of the unfinished sequence so that when a
true finished sequence does appear, it will have a higher score than all the
unfinished ones.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_seq: Current finished sequences.
[batch_size, beam_size, i+1]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
states: dict (possibly nested) of decoding states.
Returns:
Tuple of
(Incremented loop index
New alive sequences,
Log probs of the alive sequences,
New finished sequences,
Scores of the new finished sequences,
Flags indicating which sequence in finished as reached EOS,
dict of final decoding states)
"""
# Each inner loop, we carry out three steps:
# 1. Get the current topk items.
# 2. Extract the ones that have finished and haven't finished
# 3. Recompute the contents of finished based on scores.
topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk(
i, alive_seq, alive_log_probs, states)
alive_seq, alive_log_probs, _, states = grow_alive(
topk_seq, topk_scores, topk_log_probs, topk_finished, states)
finished_seq, finished_scores, finished_flags, _ = grow_finished(
finished_seq, finished_scores, finished_flags, topk_seq, topk_scores,
topk_finished)
return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states)
def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
finished_scores, unused_finished_in_finished, unused_states):
"""Checking termination condition.
We terminate when we decoded up to decode_length or the lowest scoring item
in finished has a greater score that the highest prob item in alive divided
by the max length penalty
Args:
i: loop index
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
Returns:
Bool.
"""
max_length_penalty = tf.pow(((5. + tf.to_float(decode_length)) / 6.), alpha)
# The best possible score of the most likely alive sequence.
lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty
if not stop_early:
# by considering the min score (in the top N beams) we ensure that
# the decoder will keep decoding until there is at least one beam
# (in the top N) that can be improved (w.r.t. the alive beams).
# any unfinished beam will have score -INF - thus the min
# will always be -INF if there is at least one unfinished beam -
# which means the bound_is_met condition cannot be true in this case.
lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores)
else:
# by taking the max score we only care about the first beam;
# as soon as this first beam cannot be beaten from the alive beams
# the beam decoder can stop.
# similarly to the above, if the top beam is not completed, its
# finished_score is -INF, thus it will not activate the
# bound_is_met condition. (i.e., decoder will keep going on).
# note we need to find the max for every sequence eparately - so, we need
# to keep the batch dimension (see axis=1)
lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores,
axis=1)
bound_is_met = tf.reduce_all(
tf.greater(lowest_score_of_finished_in_finished,
lower_bound_alive_scores))
return tf.logical_and(
tf.less(i, decode_length), tf.logical_not(bound_is_met))
inner_shape = tf.TensorShape([None, None, None])
if use_tpu:
inner_shape = tf.TensorShape([batch_size, beam_size, decode_length + 1])
if use_tpu:
state_struc = nest.map_structure(lambda state: state.get_shape(), states)
else:
state_struc = nest.map_structure(get_state_shape_invariants, states)
(_, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, states) = tf.while_loop(
_is_finished,
inner_loop, [
tf.constant(0), alive_seq, alive_log_probs, finished_seq,
finished_scores, finished_flags, states
],
shape_invariants=[
tf.TensorShape([]),
inner_shape,
alive_log_probs.get_shape(),
inner_shape,
finished_scores.get_shape(),
finished_flags.get_shape(),
state_struc
],
parallel_iterations=1,
back_prop=False)
alive_seq.set_shape((None, beam_size, None))
finished_seq.set_shape((None, beam_size, None))
# Accounting for corner case: It's possible that no sequence in alive for a
# particular batch item ever reached EOS. In that case, we should just copy
# the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
# if 0, means that no sequence for that batch index had reached EOS. We need
# to do the same for the scores as well.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores, states
|
[
"def",
"beam_search",
"(",
"symbols_to_logits_fn",
",",
"initial_ids",
",",
"beam_size",
",",
"decode_length",
",",
"vocab_size",
",",
"alpha",
",",
"states",
"=",
"None",
",",
"eos_id",
"=",
"EOS_ID",
",",
"stop_early",
"=",
"True",
",",
"use_tpu",
"=",
"False",
",",
"use_top_k_with_unique",
"=",
"True",
")",
":",
"batch_size",
"=",
"common_layers",
".",
"shape_list",
"(",
"initial_ids",
")",
"[",
"0",
"]",
"# Assume initial_ids are prob 1.0",
"initial_log_probs",
"=",
"tf",
".",
"constant",
"(",
"[",
"[",
"0.",
"]",
"+",
"[",
"-",
"INF",
"]",
"*",
"(",
"beam_size",
"-",
"1",
")",
"]",
")",
"# Expand to beam_size (batch_size, beam_size)",
"alive_log_probs",
"=",
"tf",
".",
"tile",
"(",
"initial_log_probs",
",",
"[",
"batch_size",
",",
"1",
"]",
")",
"# Expand each batch and state to beam_size",
"alive_seq",
"=",
"_expand_to_beam_size",
"(",
"initial_ids",
",",
"beam_size",
")",
"alive_seq",
"=",
"tf",
".",
"expand_dims",
"(",
"alive_seq",
",",
"axis",
"=",
"2",
")",
"# (batch_size, beam_size, 1)",
"if",
"use_tpu",
":",
"alive_seq",
"=",
"tf",
".",
"tile",
"(",
"alive_seq",
",",
"[",
"1",
",",
"1",
",",
"decode_length",
"+",
"1",
"]",
")",
"if",
"states",
":",
"states",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"state",
":",
"_expand_to_beam_size",
"(",
"state",
",",
"beam_size",
")",
",",
"states",
")",
"else",
":",
"states",
"=",
"{",
"}",
"# Finished will keep track of all the sequences that have finished so far",
"# Finished log probs will be negative infinity in the beginning",
"# finished_flags will keep track of booleans",
"finished_seq",
"=",
"tf",
".",
"zeros",
"(",
"common_layers",
".",
"shape_list",
"(",
"alive_seq",
")",
",",
"tf",
".",
"int32",
")",
"# Setting the scores of the initial to negative infinity.",
"finished_scores",
"=",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
",",
"beam_size",
"]",
")",
"*",
"-",
"INF",
"finished_flags",
"=",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"beam_size",
"]",
",",
"tf",
".",
"bool",
")",
"def",
"grow_finished",
"(",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"curr_seq",
",",
"curr_scores",
",",
"curr_finished",
")",
":",
"\"\"\"Given sequences and scores, will gather the top k=beam size sequences.\n\n Args:\n finished_seq: Current finished sequences.\n [batch_size, beam_size, current_decoded_length]\n finished_scores: scores for each of these sequences.\n [batch_size, beam_size]\n finished_flags: finished bools for each of these sequences.\n [batch_size, beam_size]\n curr_seq: current topk sequence that has been grown by one position.\n [batch_size, beam_size, current_decoded_length]\n curr_scores: scores for each of these sequences. [batch_size, beam_size]\n curr_finished: Finished flags for each of these sequences.\n [batch_size, beam_size]\n Returns:\n Tuple of\n (Topk sequences based on scores,\n log probs of these sequences,\n Finished flags of these sequences)\n \"\"\"",
"if",
"not",
"use_tpu",
":",
"# First append a column of 0'ids to finished to make the same length with",
"# finished scores",
"finished_seq",
"=",
"tf",
".",
"concat",
"(",
"[",
"finished_seq",
",",
"tf",
".",
"zeros",
"(",
"[",
"batch_size",
",",
"beam_size",
",",
"1",
"]",
",",
"tf",
".",
"int32",
")",
"]",
",",
"axis",
"=",
"2",
")",
"# Set the scores of the unfinished seq in curr_seq to large negative",
"# values",
"curr_scores",
"+=",
"(",
"1.",
"-",
"tf",
".",
"to_float",
"(",
"curr_finished",
")",
")",
"*",
"-",
"INF",
"# concatenating the sequences and scores along beam axis",
"curr_finished_seq",
"=",
"tf",
".",
"concat",
"(",
"[",
"finished_seq",
",",
"curr_seq",
"]",
",",
"axis",
"=",
"1",
")",
"curr_finished_scores",
"=",
"tf",
".",
"concat",
"(",
"[",
"finished_scores",
",",
"curr_scores",
"]",
",",
"axis",
"=",
"1",
")",
"curr_finished_flags",
"=",
"tf",
".",
"concat",
"(",
"[",
"finished_flags",
",",
"curr_finished",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"compute_topk_scores_and_seq",
"(",
"curr_finished_seq",
",",
"curr_finished_scores",
",",
"curr_finished_scores",
",",
"curr_finished_flags",
",",
"beam_size",
",",
"batch_size",
",",
"\"grow_finished\"",
",",
"use_tpu",
"=",
"use_tpu",
",",
"use_top_k_with_unique",
"=",
"use_top_k_with_unique",
")",
"def",
"grow_alive",
"(",
"curr_seq",
",",
"curr_scores",
",",
"curr_log_probs",
",",
"curr_finished",
",",
"states",
")",
":",
"\"\"\"Given sequences and scores, will gather the top k=beam size sequences.\n\n Args:\n curr_seq: current topk sequence that has been grown by one position.\n [batch_size, beam_size, i+1]\n curr_scores: scores for each of these sequences. [batch_size, beam_size]\n curr_log_probs: log probs for each of these sequences.\n [batch_size, beam_size]\n curr_finished: Finished flags for each of these sequences.\n [batch_size, beam_size]\n states: dict (possibly nested) of decoding states.\n Returns:\n Tuple of\n (Topk sequences based on scores,\n log probs of these sequences,\n Finished flags of these sequences)\n \"\"\"",
"# Set the scores of the finished seq in curr_seq to large negative",
"# values",
"curr_scores",
"+=",
"tf",
".",
"to_float",
"(",
"curr_finished",
")",
"*",
"-",
"INF",
"return",
"compute_topk_scores_and_seq",
"(",
"curr_seq",
",",
"curr_scores",
",",
"curr_log_probs",
",",
"curr_finished",
",",
"beam_size",
",",
"batch_size",
",",
"\"grow_alive\"",
",",
"states",
",",
"use_tpu",
"=",
"use_tpu",
")",
"def",
"grow_topk",
"(",
"i",
",",
"alive_seq",
",",
"alive_log_probs",
",",
"states",
")",
":",
"r\"\"\"Inner beam search loop.\n\n This function takes the current alive sequences, and grows them to topk\n sequences where k = 2*beam. We use 2*beam because, we could have beam_size\n number of sequences that might hit <EOS> and there will be no alive\n sequences to continue. With 2*beam_size, this will not happen. This relies\n on the assumption the vocab size is > beam size. If this is true, we'll\n have at least beam_size non <EOS> extensions if we extract the next top\n 2*beam words.\n Length penalty is given by = (5+len(decode)/6) ^ -\\alpha. Pls refer to\n https://arxiv.org/abs/1609.08144.\n\n Args:\n i: loop index\n alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]\n alive_log_probs: probabilities of these sequences. [batch_size, beam_size]\n states: dict (possibly nested) of decoding states.\n Returns:\n Tuple of\n (Topk sequences extended by the next word,\n The log probs of these sequences,\n The scores with length penalty of these sequences,\n Flags indicating which of these sequences have finished decoding,\n dict of transformed decoding states)\n \"\"\"",
"# Get the logits for all the possible next symbols",
"if",
"use_tpu",
"and",
"states",
":",
"flat_ids",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"slice",
"(",
"alive_seq",
",",
"[",
"0",
",",
"0",
",",
"i",
"]",
",",
"[",
"batch_size",
",",
"beam_size",
",",
"1",
"]",
")",
",",
"[",
"batch_size",
"*",
"beam_size",
",",
"-",
"1",
"]",
")",
"else",
":",
"flat_ids",
"=",
"tf",
".",
"reshape",
"(",
"alive_seq",
",",
"[",
"batch_size",
"*",
"beam_size",
",",
"-",
"1",
"]",
")",
"# (batch_size * beam_size, decoded_length)",
"if",
"states",
":",
"flat_states",
"=",
"nest",
".",
"map_structure",
"(",
"_merge_beam_dim",
",",
"states",
")",
"flat_logits",
",",
"flat_states",
"=",
"symbols_to_logits_fn",
"(",
"flat_ids",
",",
"i",
",",
"flat_states",
")",
"states",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"t",
":",
"_unmerge_beam_dim",
"(",
"t",
",",
"batch_size",
",",
"beam_size",
")",
",",
"flat_states",
")",
"elif",
"use_tpu",
":",
"flat_logits",
"=",
"symbols_to_logits_fn",
"(",
"flat_ids",
",",
"i",
")",
"else",
":",
"flat_logits",
"=",
"symbols_to_logits_fn",
"(",
"flat_ids",
")",
"logits",
"=",
"tf",
".",
"reshape",
"(",
"flat_logits",
",",
"[",
"batch_size",
",",
"beam_size",
",",
"-",
"1",
"]",
")",
"# Convert logits to normalized log probs",
"candidate_log_probs",
"=",
"common_layers",
".",
"log_prob_from_logits",
"(",
"logits",
")",
"# Multiply the probabilities by the current probabilities of the beam.",
"# (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)",
"log_probs",
"=",
"candidate_log_probs",
"+",
"tf",
".",
"expand_dims",
"(",
"alive_log_probs",
",",
"axis",
"=",
"2",
")",
"length_penalty",
"=",
"tf",
".",
"pow",
"(",
"(",
"(",
"5.",
"+",
"tf",
".",
"to_float",
"(",
"i",
"+",
"1",
")",
")",
"/",
"6.",
")",
",",
"alpha",
")",
"curr_scores",
"=",
"log_probs",
"/",
"length_penalty",
"# Flatten out (beam_size, vocab_size) probs in to a list of possibilities",
"flat_curr_scores",
"=",
"tf",
".",
"reshape",
"(",
"curr_scores",
",",
"[",
"-",
"1",
",",
"beam_size",
"*",
"vocab_size",
"]",
")",
"if",
"use_tpu",
"and",
"use_top_k_with_unique",
":",
"topk_scores",
",",
"topk_ids",
"=",
"top_k_with_unique",
"(",
"flat_curr_scores",
",",
"k",
"=",
"beam_size",
"*",
"2",
")",
"else",
":",
"topk_scores",
",",
"topk_ids",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"flat_curr_scores",
",",
"k",
"=",
"beam_size",
"*",
"2",
")",
"# Recovering the log probs because we will need to send them back",
"topk_log_probs",
"=",
"topk_scores",
"*",
"length_penalty",
"# Work out what beam the top probs are in.",
"topk_beam_index",
"=",
"topk_ids",
"//",
"vocab_size",
"topk_ids",
"%=",
"vocab_size",
"# Unflatten the ids",
"if",
"not",
"use_tpu",
":",
"# The next three steps are to create coordinates for tf.gather_nd to pull",
"# out the correct sequences from id's that we need to grow.",
"# We will also use the coordinates to gather the booleans of the beam",
"# items that survived.",
"batch_pos",
"=",
"compute_batch_indices",
"(",
"batch_size",
",",
"beam_size",
"*",
"2",
")",
"# top beams will give us the actual coordinates to do the gather.",
"# stacking will create a tensor of dimension batch * beam * 2, where the",
"# last dimension contains the i,j gathering coordinates.",
"topk_coordinates",
"=",
"tf",
".",
"stack",
"(",
"[",
"batch_pos",
",",
"topk_beam_index",
"]",
",",
"axis",
"=",
"2",
")",
"# Gather up the most probable 2*beams both for the ids and",
"# finished_in_alive bools",
"topk_seq",
"=",
"tf",
".",
"gather_nd",
"(",
"alive_seq",
",",
"topk_coordinates",
")",
"if",
"states",
":",
"states",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"state",
":",
"tf",
".",
"gather_nd",
"(",
"state",
",",
"topk_coordinates",
")",
",",
"states",
")",
"# Append the most probable alive",
"topk_seq",
"=",
"tf",
".",
"concat",
"(",
"[",
"topk_seq",
",",
"tf",
".",
"expand_dims",
"(",
"topk_ids",
",",
"axis",
"=",
"2",
")",
"]",
",",
"axis",
"=",
"2",
")",
"else",
":",
"# Gather up the most probable 2*beams both for the ids and",
"# finished_in_alive bools",
"topk_seq",
"=",
"fast_tpu_gather",
"(",
"alive_seq",
",",
"topk_beam_index",
")",
"if",
"states",
":",
"states",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"state",
":",
"fast_tpu_gather",
"(",
"state",
",",
"topk_beam_index",
")",
",",
"states",
")",
"# Update the most probable alive",
"topk_seq",
"=",
"tf",
".",
"transpose",
"(",
"topk_seq",
",",
"perm",
"=",
"[",
"2",
",",
"0",
",",
"1",
"]",
")",
"topk_seq",
"=",
"inplace_ops",
".",
"alias_inplace_update",
"(",
"topk_seq",
",",
"i",
"+",
"1",
",",
"topk_ids",
")",
"topk_seq",
"=",
"tf",
".",
"transpose",
"(",
"topk_seq",
",",
"perm",
"=",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
"topk_finished",
"=",
"tf",
".",
"equal",
"(",
"topk_ids",
",",
"eos_id",
")",
"return",
"topk_seq",
",",
"topk_log_probs",
",",
"topk_scores",
",",
"topk_finished",
",",
"states",
"def",
"inner_loop",
"(",
"i",
",",
"alive_seq",
",",
"alive_log_probs",
",",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"states",
")",
":",
"\"\"\"Inner beam search loop.\n\n There are three groups of tensors, alive, finished, and topk.\n The alive group contains information about the current alive sequences\n The topk group contains information about alive + topk current decoded words\n the finished group contains information about finished sentences, that is,\n the ones that have decoded to <EOS>. These are what we return.\n The general beam search algorithm is as follows:\n While we haven't terminated (pls look at termination condition)\n 1. Grow the current alive to get beam*2 topk sequences\n 2. Among the topk, keep the top beam_size ones that haven't reached EOS\n into alive\n 3. Among the topk, keep the top beam_size ones have reached EOS into\n finished\n Repeat\n To make things simple with using fixed size tensors, we will end\n up inserting unfinished sequences into finished in the beginning. To stop\n that we add -ve INF to the score of the unfinished sequence so that when a\n true finished sequence does appear, it will have a higher score than all the\n unfinished ones.\n\n Args:\n i: loop index\n alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]\n alive_log_probs: probabilities of the beams. [batch_size, beam_size]\n finished_seq: Current finished sequences.\n [batch_size, beam_size, i+1]\n finished_scores: scores for each of these sequences.\n [batch_size, beam_size]\n finished_flags: finished bools for each of these sequences.\n [batch_size, beam_size]\n states: dict (possibly nested) of decoding states.\n\n Returns:\n Tuple of\n (Incremented loop index\n New alive sequences,\n Log probs of the alive sequences,\n New finished sequences,\n Scores of the new finished sequences,\n Flags indicating which sequence in finished as reached EOS,\n dict of final decoding states)\n \"\"\"",
"# Each inner loop, we carry out three steps:",
"# 1. Get the current topk items.",
"# 2. Extract the ones that have finished and haven't finished",
"# 3. Recompute the contents of finished based on scores.",
"topk_seq",
",",
"topk_log_probs",
",",
"topk_scores",
",",
"topk_finished",
",",
"states",
"=",
"grow_topk",
"(",
"i",
",",
"alive_seq",
",",
"alive_log_probs",
",",
"states",
")",
"alive_seq",
",",
"alive_log_probs",
",",
"_",
",",
"states",
"=",
"grow_alive",
"(",
"topk_seq",
",",
"topk_scores",
",",
"topk_log_probs",
",",
"topk_finished",
",",
"states",
")",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"_",
"=",
"grow_finished",
"(",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"topk_seq",
",",
"topk_scores",
",",
"topk_finished",
")",
"return",
"(",
"i",
"+",
"1",
",",
"alive_seq",
",",
"alive_log_probs",
",",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"states",
")",
"def",
"_is_finished",
"(",
"i",
",",
"unused_alive_seq",
",",
"alive_log_probs",
",",
"unused_finished_seq",
",",
"finished_scores",
",",
"unused_finished_in_finished",
",",
"unused_states",
")",
":",
"\"\"\"Checking termination condition.\n\n We terminate when we decoded up to decode_length or the lowest scoring item\n in finished has a greater score that the highest prob item in alive divided\n by the max length penalty\n\n Args:\n i: loop index\n alive_log_probs: probabilities of the beams. [batch_size, beam_size]\n finished_scores: scores for each of these sequences.\n [batch_size, beam_size]\n\n Returns:\n Bool.\n \"\"\"",
"max_length_penalty",
"=",
"tf",
".",
"pow",
"(",
"(",
"(",
"5.",
"+",
"tf",
".",
"to_float",
"(",
"decode_length",
")",
")",
"/",
"6.",
")",
",",
"alpha",
")",
"# The best possible score of the most likely alive sequence.",
"lower_bound_alive_scores",
"=",
"alive_log_probs",
"[",
":",
",",
"0",
"]",
"/",
"max_length_penalty",
"if",
"not",
"stop_early",
":",
"# by considering the min score (in the top N beams) we ensure that",
"# the decoder will keep decoding until there is at least one beam",
"# (in the top N) that can be improved (w.r.t. the alive beams).",
"# any unfinished beam will have score -INF - thus the min",
"# will always be -INF if there is at least one unfinished beam -",
"# which means the bound_is_met condition cannot be true in this case.",
"lowest_score_of_finished_in_finished",
"=",
"tf",
".",
"reduce_min",
"(",
"finished_scores",
")",
"else",
":",
"# by taking the max score we only care about the first beam;",
"# as soon as this first beam cannot be beaten from the alive beams",
"# the beam decoder can stop.",
"# similarly to the above, if the top beam is not completed, its",
"# finished_score is -INF, thus it will not activate the",
"# bound_is_met condition. (i.e., decoder will keep going on).",
"# note we need to find the max for every sequence eparately - so, we need",
"# to keep the batch dimension (see axis=1)",
"lowest_score_of_finished_in_finished",
"=",
"tf",
".",
"reduce_max",
"(",
"finished_scores",
",",
"axis",
"=",
"1",
")",
"bound_is_met",
"=",
"tf",
".",
"reduce_all",
"(",
"tf",
".",
"greater",
"(",
"lowest_score_of_finished_in_finished",
",",
"lower_bound_alive_scores",
")",
")",
"return",
"tf",
".",
"logical_and",
"(",
"tf",
".",
"less",
"(",
"i",
",",
"decode_length",
")",
",",
"tf",
".",
"logical_not",
"(",
"bound_is_met",
")",
")",
"inner_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"[",
"None",
",",
"None",
",",
"None",
"]",
")",
"if",
"use_tpu",
":",
"inner_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"[",
"batch_size",
",",
"beam_size",
",",
"decode_length",
"+",
"1",
"]",
")",
"if",
"use_tpu",
":",
"state_struc",
"=",
"nest",
".",
"map_structure",
"(",
"lambda",
"state",
":",
"state",
".",
"get_shape",
"(",
")",
",",
"states",
")",
"else",
":",
"state_struc",
"=",
"nest",
".",
"map_structure",
"(",
"get_state_shape_invariants",
",",
"states",
")",
"(",
"_",
",",
"alive_seq",
",",
"alive_log_probs",
",",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"states",
")",
"=",
"tf",
".",
"while_loop",
"(",
"_is_finished",
",",
"inner_loop",
",",
"[",
"tf",
".",
"constant",
"(",
"0",
")",
",",
"alive_seq",
",",
"alive_log_probs",
",",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"states",
"]",
",",
"shape_invariants",
"=",
"[",
"tf",
".",
"TensorShape",
"(",
"[",
"]",
")",
",",
"inner_shape",
",",
"alive_log_probs",
".",
"get_shape",
"(",
")",
",",
"inner_shape",
",",
"finished_scores",
".",
"get_shape",
"(",
")",
",",
"finished_flags",
".",
"get_shape",
"(",
")",
",",
"state_struc",
"]",
",",
"parallel_iterations",
"=",
"1",
",",
"back_prop",
"=",
"False",
")",
"alive_seq",
".",
"set_shape",
"(",
"(",
"None",
",",
"beam_size",
",",
"None",
")",
")",
"finished_seq",
".",
"set_shape",
"(",
"(",
"None",
",",
"beam_size",
",",
"None",
")",
")",
"# Accounting for corner case: It's possible that no sequence in alive for a",
"# particular batch item ever reached EOS. In that case, we should just copy",
"# the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)",
"# if 0, means that no sequence for that batch index had reached EOS. We need",
"# to do the same for the scores as well.",
"finished_seq",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"reduce_any",
"(",
"finished_flags",
",",
"1",
")",
",",
"finished_seq",
",",
"alive_seq",
")",
"finished_scores",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"reduce_any",
"(",
"finished_flags",
",",
"1",
")",
",",
"finished_scores",
",",
"alive_log_probs",
")",
"return",
"finished_seq",
",",
"finished_scores",
",",
"states"
] |
Beam search with length penalties.
Requires a function that can take the currently decoded symbols and return
the logits for the next symbol. The implementation is inspired by
https://arxiv.org/abs/1609.08144.
When running, the beam search steps can be visualized by using tfdbg to watch
the operations generating the output ids for each beam step. These operations
have the pattern:
(alive|finished)_topk_(seq,scores)
Operations marked `alive` represent the new beam sequences that will be
processed in the next step. Operations marked `finished` represent the
completed beam sequences, which may be padded with 0s if no beams finished.
Operations marked `seq` store the full beam sequence for the time step.
Operations marked `scores` store the sequence's final log scores.
The beam search steps will be processed sequentially in order, so when
capturing observed from these operations, tensors, clients can make
assumptions about which step is being recorded.
WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this
means that the shape of the 2nd dimension of these tensors will not be
available (i.e. set to None) inside symbols_to_logits_fn.
Args:
symbols_to_logits_fn: Interface to the model, to provide logits.
Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]
initial_ids: Ids to start off the decoding, this will be the first thing
handed to symbols_to_logits_fn (after expanding to beam size)
[batch_size]
beam_size: Size of the beam.
decode_length: Number of steps to decode for.
vocab_size: Size of the vocab, must equal the size of the logits returned by
symbols_to_logits_fn
alpha: alpha for length penalty.
states: dict (possibly nested) of decoding states.
eos_id: ID for end of sentence.
stop_early: a boolean - stop once best sequence is provably determined.
use_tpu: A bool, whether to do beam search on TPU.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during TPU beam search.
Returns:
Tuple of
(decoded beams [batch_size, beam_size, decode_length]
decoding probabilities [batch_size, beam_size])
|
[
"Beam",
"search",
"with",
"length",
"penalties",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/beam_search.py#L396-L813
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
video_augmentation
|
def video_augmentation(features, hue=False, saturate=False, contrast=False):
"""Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
"""
inputs, targets = features["inputs"], features["targets"]
in_steps = common_layers.shape_list(inputs)[0]
# makes sure that the same augmentation is applied to both input and targets.
# if input is 4-D, then tf.image applies the same transform across the batch.
video = tf.concat((inputs, targets), axis=0)
if hue:
video = tf.image.random_hue(video, max_delta=0.2)
if saturate:
video = tf.image.random_saturation(video, lower=0.5, upper=1.5)
if contrast:
video = tf.image.random_contrast(video, lower=0.5, upper=1.5)
features["inputs"], features["targets"] = video[:in_steps], video[in_steps:]
return features
|
python
|
def video_augmentation(features, hue=False, saturate=False, contrast=False):
"""Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
"""
inputs, targets = features["inputs"], features["targets"]
in_steps = common_layers.shape_list(inputs)[0]
# makes sure that the same augmentation is applied to both input and targets.
# if input is 4-D, then tf.image applies the same transform across the batch.
video = tf.concat((inputs, targets), axis=0)
if hue:
video = tf.image.random_hue(video, max_delta=0.2)
if saturate:
video = tf.image.random_saturation(video, lower=0.5, upper=1.5)
if contrast:
video = tf.image.random_contrast(video, lower=0.5, upper=1.5)
features["inputs"], features["targets"] = video[:in_steps], video[in_steps:]
return features
|
[
"def",
"video_augmentation",
"(",
"features",
",",
"hue",
"=",
"False",
",",
"saturate",
"=",
"False",
",",
"contrast",
"=",
"False",
")",
":",
"inputs",
",",
"targets",
"=",
"features",
"[",
"\"inputs\"",
"]",
",",
"features",
"[",
"\"targets\"",
"]",
"in_steps",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"[",
"0",
"]",
"# makes sure that the same augmentation is applied to both input and targets.",
"# if input is 4-D, then tf.image applies the same transform across the batch.",
"video",
"=",
"tf",
".",
"concat",
"(",
"(",
"inputs",
",",
"targets",
")",
",",
"axis",
"=",
"0",
")",
"if",
"hue",
":",
"video",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"video",
",",
"max_delta",
"=",
"0.2",
")",
"if",
"saturate",
":",
"video",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"video",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"if",
"contrast",
":",
"video",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"video",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"features",
"[",
"\"inputs\"",
"]",
",",
"features",
"[",
"\"targets\"",
"]",
"=",
"video",
"[",
":",
"in_steps",
"]",
",",
"video",
"[",
"in_steps",
":",
"]",
"return",
"features"
] |
Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
|
[
"Augments",
"video",
"with",
"optional",
"hue",
"saturation",
"and",
"constrast",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L52-L78
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
create_border
|
def create_border(video, color="blue", border_percent=2):
"""Creates a border around each frame to differentiate input and target.
Args:
video: 5-D NumPy array.
color: string, "blue", "red" or "green".
border_percent: Percentarge of the frame covered by the border.
Returns:
video: 5-D NumPy array.
"""
# Do not create border if the video is not in RGB format
if video.shape[-1] != 3:
return video
color_to_axis = {"blue": 2, "red": 0, "green": 1}
axis = color_to_axis[color]
_, _, height, width, _ = video.shape
border_height = np.ceil(border_percent * height / 100.0).astype(np.int)
border_width = np.ceil(border_percent * width / 100.0).astype(np.int)
video[:, :, :border_height, :, axis] = 255
video[:, :, -border_height:, :, axis] = 255
video[:, :, :, :border_width, axis] = 255
video[:, :, :, -border_width:, axis] = 255
return video
|
python
|
def create_border(video, color="blue", border_percent=2):
"""Creates a border around each frame to differentiate input and target.
Args:
video: 5-D NumPy array.
color: string, "blue", "red" or "green".
border_percent: Percentarge of the frame covered by the border.
Returns:
video: 5-D NumPy array.
"""
# Do not create border if the video is not in RGB format
if video.shape[-1] != 3:
return video
color_to_axis = {"blue": 2, "red": 0, "green": 1}
axis = color_to_axis[color]
_, _, height, width, _ = video.shape
border_height = np.ceil(border_percent * height / 100.0).astype(np.int)
border_width = np.ceil(border_percent * width / 100.0).astype(np.int)
video[:, :, :border_height, :, axis] = 255
video[:, :, -border_height:, :, axis] = 255
video[:, :, :, :border_width, axis] = 255
video[:, :, :, -border_width:, axis] = 255
return video
|
[
"def",
"create_border",
"(",
"video",
",",
"color",
"=",
"\"blue\"",
",",
"border_percent",
"=",
"2",
")",
":",
"# Do not create border if the video is not in RGB format",
"if",
"video",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"3",
":",
"return",
"video",
"color_to_axis",
"=",
"{",
"\"blue\"",
":",
"2",
",",
"\"red\"",
":",
"0",
",",
"\"green\"",
":",
"1",
"}",
"axis",
"=",
"color_to_axis",
"[",
"color",
"]",
"_",
",",
"_",
",",
"height",
",",
"width",
",",
"_",
"=",
"video",
".",
"shape",
"border_height",
"=",
"np",
".",
"ceil",
"(",
"border_percent",
"*",
"height",
"/",
"100.0",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"border_width",
"=",
"np",
".",
"ceil",
"(",
"border_percent",
"*",
"width",
"/",
"100.0",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"video",
"[",
":",
",",
":",
",",
":",
"border_height",
",",
":",
",",
"axis",
"]",
"=",
"255",
"video",
"[",
":",
",",
":",
",",
"-",
"border_height",
":",
",",
":",
",",
"axis",
"]",
"=",
"255",
"video",
"[",
":",
",",
":",
",",
":",
",",
":",
"border_width",
",",
"axis",
"]",
"=",
"255",
"video",
"[",
":",
",",
":",
",",
":",
",",
"-",
"border_width",
":",
",",
"axis",
"]",
"=",
"255",
"return",
"video"
] |
Creates a border around each frame to differentiate input and target.
Args:
video: 5-D NumPy array.
color: string, "blue", "red" or "green".
border_percent: Percentarge of the frame covered by the border.
Returns:
video: 5-D NumPy array.
|
[
"Creates",
"a",
"border",
"around",
"each",
"frame",
"to",
"differentiate",
"input",
"and",
"target",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L81-L103
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
convert_videos_to_summaries
|
def convert_videos_to_summaries(input_videos, output_videos, target_videos,
tag, decode_hparams,
display_ground_truth=False):
"""Converts input, output and target videos into video summaries.
Args:
input_videos: 5-D NumPy array, (NTHWC) conditioning frames.
output_videos: 5-D NumPy array, (NTHWC) model predictions.
target_videos: 5-D NumPy array, (NTHWC) target frames.
tag: tf summary tag.
decode_hparams: HParams.
display_ground_truth: Whether or not to display ground truth videos.
Returns:
summaries: a list of tf frame-by-frame and video summaries.
"""
fps = decode_hparams.frames_per_second
border_percent = decode_hparams.border_percent
max_outputs = decode_hparams.max_display_outputs
target_steps = target_videos.shape[1]
all_summaries = []
input_videos = create_border(
input_videos, color="blue", border_percent=border_percent)
target_videos = create_border(
target_videos, color="red", border_percent=border_percent)
output_videos = create_border(
output_videos, color="red", border_percent=border_percent)
all_input = np.concatenate((input_videos, target_videos), axis=1)
all_output = np.concatenate((input_videos, output_videos), axis=1)
output_summ_vals, _ = common_video.py_gif_summary(
"%s/output" % tag, all_output, max_outputs=max_outputs, fps=fps,
return_summary_value=True)
all_summaries.extend(output_summ_vals)
# Optionally display ground truth.
if display_ground_truth:
input_summ_vals, _ = common_video.py_gif_summary(
"%s/input" % tag, all_input, max_outputs=max_outputs, fps=fps,
return_summary_value=True)
all_summaries.extend(input_summ_vals)
# Frame-by-frame summaries
iterable = zip(output_videos[:max_outputs, :target_steps],
target_videos[:max_outputs])
for ind, (input_video, output_video) in enumerate(iterable):
t, h, w, c = input_video.shape
# Tile vertically
input_frames = np.reshape(input_video, (t*h, w, c))
output_frames = np.reshape(output_video, (t*h, w, c))
# Concat across width.
all_frames = np.concatenate((input_frames, output_frames), axis=1)
tag = "input/output/%s_sample_%d" % (tag, ind)
frame_by_frame_summ = image_utils.image_to_tf_summary_value(
all_frames, tag=tag)
all_summaries.append(frame_by_frame_summ)
return all_summaries
|
python
|
def convert_videos_to_summaries(input_videos, output_videos, target_videos,
tag, decode_hparams,
display_ground_truth=False):
"""Converts input, output and target videos into video summaries.
Args:
input_videos: 5-D NumPy array, (NTHWC) conditioning frames.
output_videos: 5-D NumPy array, (NTHWC) model predictions.
target_videos: 5-D NumPy array, (NTHWC) target frames.
tag: tf summary tag.
decode_hparams: HParams.
display_ground_truth: Whether or not to display ground truth videos.
Returns:
summaries: a list of tf frame-by-frame and video summaries.
"""
fps = decode_hparams.frames_per_second
border_percent = decode_hparams.border_percent
max_outputs = decode_hparams.max_display_outputs
target_steps = target_videos.shape[1]
all_summaries = []
input_videos = create_border(
input_videos, color="blue", border_percent=border_percent)
target_videos = create_border(
target_videos, color="red", border_percent=border_percent)
output_videos = create_border(
output_videos, color="red", border_percent=border_percent)
all_input = np.concatenate((input_videos, target_videos), axis=1)
all_output = np.concatenate((input_videos, output_videos), axis=1)
output_summ_vals, _ = common_video.py_gif_summary(
"%s/output" % tag, all_output, max_outputs=max_outputs, fps=fps,
return_summary_value=True)
all_summaries.extend(output_summ_vals)
# Optionally display ground truth.
if display_ground_truth:
input_summ_vals, _ = common_video.py_gif_summary(
"%s/input" % tag, all_input, max_outputs=max_outputs, fps=fps,
return_summary_value=True)
all_summaries.extend(input_summ_vals)
# Frame-by-frame summaries
iterable = zip(output_videos[:max_outputs, :target_steps],
target_videos[:max_outputs])
for ind, (input_video, output_video) in enumerate(iterable):
t, h, w, c = input_video.shape
# Tile vertically
input_frames = np.reshape(input_video, (t*h, w, c))
output_frames = np.reshape(output_video, (t*h, w, c))
# Concat across width.
all_frames = np.concatenate((input_frames, output_frames), axis=1)
tag = "input/output/%s_sample_%d" % (tag, ind)
frame_by_frame_summ = image_utils.image_to_tf_summary_value(
all_frames, tag=tag)
all_summaries.append(frame_by_frame_summ)
return all_summaries
|
[
"def",
"convert_videos_to_summaries",
"(",
"input_videos",
",",
"output_videos",
",",
"target_videos",
",",
"tag",
",",
"decode_hparams",
",",
"display_ground_truth",
"=",
"False",
")",
":",
"fps",
"=",
"decode_hparams",
".",
"frames_per_second",
"border_percent",
"=",
"decode_hparams",
".",
"border_percent",
"max_outputs",
"=",
"decode_hparams",
".",
"max_display_outputs",
"target_steps",
"=",
"target_videos",
".",
"shape",
"[",
"1",
"]",
"all_summaries",
"=",
"[",
"]",
"input_videos",
"=",
"create_border",
"(",
"input_videos",
",",
"color",
"=",
"\"blue\"",
",",
"border_percent",
"=",
"border_percent",
")",
"target_videos",
"=",
"create_border",
"(",
"target_videos",
",",
"color",
"=",
"\"red\"",
",",
"border_percent",
"=",
"border_percent",
")",
"output_videos",
"=",
"create_border",
"(",
"output_videos",
",",
"color",
"=",
"\"red\"",
",",
"border_percent",
"=",
"border_percent",
")",
"all_input",
"=",
"np",
".",
"concatenate",
"(",
"(",
"input_videos",
",",
"target_videos",
")",
",",
"axis",
"=",
"1",
")",
"all_output",
"=",
"np",
".",
"concatenate",
"(",
"(",
"input_videos",
",",
"output_videos",
")",
",",
"axis",
"=",
"1",
")",
"output_summ_vals",
",",
"_",
"=",
"common_video",
".",
"py_gif_summary",
"(",
"\"%s/output\"",
"%",
"tag",
",",
"all_output",
",",
"max_outputs",
"=",
"max_outputs",
",",
"fps",
"=",
"fps",
",",
"return_summary_value",
"=",
"True",
")",
"all_summaries",
".",
"extend",
"(",
"output_summ_vals",
")",
"# Optionally display ground truth.",
"if",
"display_ground_truth",
":",
"input_summ_vals",
",",
"_",
"=",
"common_video",
".",
"py_gif_summary",
"(",
"\"%s/input\"",
"%",
"tag",
",",
"all_input",
",",
"max_outputs",
"=",
"max_outputs",
",",
"fps",
"=",
"fps",
",",
"return_summary_value",
"=",
"True",
")",
"all_summaries",
".",
"extend",
"(",
"input_summ_vals",
")",
"# Frame-by-frame summaries",
"iterable",
"=",
"zip",
"(",
"output_videos",
"[",
":",
"max_outputs",
",",
":",
"target_steps",
"]",
",",
"target_videos",
"[",
":",
"max_outputs",
"]",
")",
"for",
"ind",
",",
"(",
"input_video",
",",
"output_video",
")",
"in",
"enumerate",
"(",
"iterable",
")",
":",
"t",
",",
"h",
",",
"w",
",",
"c",
"=",
"input_video",
".",
"shape",
"# Tile vertically",
"input_frames",
"=",
"np",
".",
"reshape",
"(",
"input_video",
",",
"(",
"t",
"*",
"h",
",",
"w",
",",
"c",
")",
")",
"output_frames",
"=",
"np",
".",
"reshape",
"(",
"output_video",
",",
"(",
"t",
"*",
"h",
",",
"w",
",",
"c",
")",
")",
"# Concat across width.",
"all_frames",
"=",
"np",
".",
"concatenate",
"(",
"(",
"input_frames",
",",
"output_frames",
")",
",",
"axis",
"=",
"1",
")",
"tag",
"=",
"\"input/output/%s_sample_%d\"",
"%",
"(",
"tag",
",",
"ind",
")",
"frame_by_frame_summ",
"=",
"image_utils",
".",
"image_to_tf_summary_value",
"(",
"all_frames",
",",
"tag",
"=",
"tag",
")",
"all_summaries",
".",
"append",
"(",
"frame_by_frame_summ",
")",
"return",
"all_summaries"
] |
Converts input, output and target videos into video summaries.
Args:
input_videos: 5-D NumPy array, (NTHWC) conditioning frames.
output_videos: 5-D NumPy array, (NTHWC) model predictions.
target_videos: 5-D NumPy array, (NTHWC) target frames.
tag: tf summary tag.
decode_hparams: HParams.
display_ground_truth: Whether or not to display ground truth videos.
Returns:
summaries: a list of tf frame-by-frame and video summaries.
|
[
"Converts",
"input",
"output",
"and",
"target",
"videos",
"into",
"video",
"summaries",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L106-L162
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
display_video_hooks
|
def display_video_hooks(hook_args):
"""Hooks to display videos at decode time."""
predictions = hook_args.predictions
max_outputs = hook_args.decode_hparams.max_display_outputs
max_decodes = hook_args.decode_hparams.max_display_decodes
with tf.Graph().as_default():
_, best_decodes = video_metrics.compute_video_metrics_from_predictions(
predictions, decode_hparams=hook_args.decode_hparams)
all_summaries = []
# Displays decodes corresponding to the best/worst metric,
for metric, metric_decode_inds in best_decodes.items():
curr_metric_inds = metric_decode_inds[:max_outputs]
best_inputs, best_outputs, best_targets = [], [], []
for sample_ind, decode_ind in enumerate(curr_metric_inds):
curr_decode = predictions[decode_ind][sample_ind]
best_inputs.append(curr_decode["inputs"])
best_outputs.append(curr_decode["outputs"])
best_targets.append(curr_decode["targets"])
best_inputs = np.array(best_inputs, dtype=np.uint8)
best_outputs = np.array(best_outputs, dtype=np.uint8)
best_targets = np.array(best_targets, dtype=np.uint8)
summaries = convert_videos_to_summaries(
best_inputs, best_outputs, best_targets,
tag=metric, decode_hparams=hook_args.decode_hparams)
all_summaries.extend(summaries)
# Display random decodes for ten conditioning frames.
for decode_ind, decode in enumerate(predictions[: max_decodes]):
target_videos = video_metrics.stack_data_given_key(decode, "targets")
output_videos = video_metrics.stack_data_given_key(decode, "outputs")
input_videos = video_metrics.stack_data_given_key(decode, "inputs")
target_videos = np.asarray(target_videos, dtype=np.uint8)
output_videos = np.asarray(output_videos, dtype=np.uint8)
input_videos = np.asarray(input_videos, dtype=np.uint8)
summaries = convert_videos_to_summaries(
input_videos, output_videos, target_videos,
tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams,
display_ground_truth=decode_ind == 0)
all_summaries.extend(summaries)
return all_summaries
|
python
|
def display_video_hooks(hook_args):
"""Hooks to display videos at decode time."""
predictions = hook_args.predictions
max_outputs = hook_args.decode_hparams.max_display_outputs
max_decodes = hook_args.decode_hparams.max_display_decodes
with tf.Graph().as_default():
_, best_decodes = video_metrics.compute_video_metrics_from_predictions(
predictions, decode_hparams=hook_args.decode_hparams)
all_summaries = []
# Displays decodes corresponding to the best/worst metric,
for metric, metric_decode_inds in best_decodes.items():
curr_metric_inds = metric_decode_inds[:max_outputs]
best_inputs, best_outputs, best_targets = [], [], []
for sample_ind, decode_ind in enumerate(curr_metric_inds):
curr_decode = predictions[decode_ind][sample_ind]
best_inputs.append(curr_decode["inputs"])
best_outputs.append(curr_decode["outputs"])
best_targets.append(curr_decode["targets"])
best_inputs = np.array(best_inputs, dtype=np.uint8)
best_outputs = np.array(best_outputs, dtype=np.uint8)
best_targets = np.array(best_targets, dtype=np.uint8)
summaries = convert_videos_to_summaries(
best_inputs, best_outputs, best_targets,
tag=metric, decode_hparams=hook_args.decode_hparams)
all_summaries.extend(summaries)
# Display random decodes for ten conditioning frames.
for decode_ind, decode in enumerate(predictions[: max_decodes]):
target_videos = video_metrics.stack_data_given_key(decode, "targets")
output_videos = video_metrics.stack_data_given_key(decode, "outputs")
input_videos = video_metrics.stack_data_given_key(decode, "inputs")
target_videos = np.asarray(target_videos, dtype=np.uint8)
output_videos = np.asarray(output_videos, dtype=np.uint8)
input_videos = np.asarray(input_videos, dtype=np.uint8)
summaries = convert_videos_to_summaries(
input_videos, output_videos, target_videos,
tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams,
display_ground_truth=decode_ind == 0)
all_summaries.extend(summaries)
return all_summaries
|
[
"def",
"display_video_hooks",
"(",
"hook_args",
")",
":",
"predictions",
"=",
"hook_args",
".",
"predictions",
"max_outputs",
"=",
"hook_args",
".",
"decode_hparams",
".",
"max_display_outputs",
"max_decodes",
"=",
"hook_args",
".",
"decode_hparams",
".",
"max_display_decodes",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"_",
",",
"best_decodes",
"=",
"video_metrics",
".",
"compute_video_metrics_from_predictions",
"(",
"predictions",
",",
"decode_hparams",
"=",
"hook_args",
".",
"decode_hparams",
")",
"all_summaries",
"=",
"[",
"]",
"# Displays decodes corresponding to the best/worst metric,",
"for",
"metric",
",",
"metric_decode_inds",
"in",
"best_decodes",
".",
"items",
"(",
")",
":",
"curr_metric_inds",
"=",
"metric_decode_inds",
"[",
":",
"max_outputs",
"]",
"best_inputs",
",",
"best_outputs",
",",
"best_targets",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"sample_ind",
",",
"decode_ind",
"in",
"enumerate",
"(",
"curr_metric_inds",
")",
":",
"curr_decode",
"=",
"predictions",
"[",
"decode_ind",
"]",
"[",
"sample_ind",
"]",
"best_inputs",
".",
"append",
"(",
"curr_decode",
"[",
"\"inputs\"",
"]",
")",
"best_outputs",
".",
"append",
"(",
"curr_decode",
"[",
"\"outputs\"",
"]",
")",
"best_targets",
".",
"append",
"(",
"curr_decode",
"[",
"\"targets\"",
"]",
")",
"best_inputs",
"=",
"np",
".",
"array",
"(",
"best_inputs",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"best_outputs",
"=",
"np",
".",
"array",
"(",
"best_outputs",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"best_targets",
"=",
"np",
".",
"array",
"(",
"best_targets",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"summaries",
"=",
"convert_videos_to_summaries",
"(",
"best_inputs",
",",
"best_outputs",
",",
"best_targets",
",",
"tag",
"=",
"metric",
",",
"decode_hparams",
"=",
"hook_args",
".",
"decode_hparams",
")",
"all_summaries",
".",
"extend",
"(",
"summaries",
")",
"# Display random decodes for ten conditioning frames.",
"for",
"decode_ind",
",",
"decode",
"in",
"enumerate",
"(",
"predictions",
"[",
":",
"max_decodes",
"]",
")",
":",
"target_videos",
"=",
"video_metrics",
".",
"stack_data_given_key",
"(",
"decode",
",",
"\"targets\"",
")",
"output_videos",
"=",
"video_metrics",
".",
"stack_data_given_key",
"(",
"decode",
",",
"\"outputs\"",
")",
"input_videos",
"=",
"video_metrics",
".",
"stack_data_given_key",
"(",
"decode",
",",
"\"inputs\"",
")",
"target_videos",
"=",
"np",
".",
"asarray",
"(",
"target_videos",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"output_videos",
"=",
"np",
".",
"asarray",
"(",
"output_videos",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"input_videos",
"=",
"np",
".",
"asarray",
"(",
"input_videos",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"summaries",
"=",
"convert_videos_to_summaries",
"(",
"input_videos",
",",
"output_videos",
",",
"target_videos",
",",
"tag",
"=",
"\"decode_%d\"",
"%",
"decode_ind",
",",
"decode_hparams",
"=",
"hook_args",
".",
"decode_hparams",
",",
"display_ground_truth",
"=",
"decode_ind",
"==",
"0",
")",
"all_summaries",
".",
"extend",
"(",
"summaries",
")",
"return",
"all_summaries"
] |
Hooks to display videos at decode time.
|
[
"Hooks",
"to",
"display",
"videos",
"at",
"decode",
"time",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L165-L206
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
summarize_video_metrics
|
def summarize_video_metrics(hook_args):
"""Computes video metrics summaries using the decoder output."""
problem_name = hook_args.problem.name
current_problem = hook_args.problem
hparams = hook_args.hparams
output_dirs = hook_args.output_dirs
predictions = hook_args.predictions
frame_shape = [
current_problem.frame_height, current_problem.frame_width,
current_problem.num_channels
]
metrics_graph = tf.Graph()
with metrics_graph.as_default():
if predictions:
metrics_results, _ = video_metrics.compute_video_metrics_from_predictions(
predictions, decode_hparams=hook_args.decode_hparams)
else:
metrics_results, _ = video_metrics.compute_video_metrics_from_png_files(
output_dirs, problem_name, hparams.video_num_target_frames,
frame_shape)
summary_values = []
for name, array in six.iteritems(metrics_results):
for ind, val in enumerate(array):
tag = "metric_{}/{}".format(name, ind)
summary_values.append(tf.Summary.Value(tag=tag, simple_value=val))
return summary_values
|
python
|
def summarize_video_metrics(hook_args):
"""Computes video metrics summaries using the decoder output."""
problem_name = hook_args.problem.name
current_problem = hook_args.problem
hparams = hook_args.hparams
output_dirs = hook_args.output_dirs
predictions = hook_args.predictions
frame_shape = [
current_problem.frame_height, current_problem.frame_width,
current_problem.num_channels
]
metrics_graph = tf.Graph()
with metrics_graph.as_default():
if predictions:
metrics_results, _ = video_metrics.compute_video_metrics_from_predictions(
predictions, decode_hparams=hook_args.decode_hparams)
else:
metrics_results, _ = video_metrics.compute_video_metrics_from_png_files(
output_dirs, problem_name, hparams.video_num_target_frames,
frame_shape)
summary_values = []
for name, array in six.iteritems(metrics_results):
for ind, val in enumerate(array):
tag = "metric_{}/{}".format(name, ind)
summary_values.append(tf.Summary.Value(tag=tag, simple_value=val))
return summary_values
|
[
"def",
"summarize_video_metrics",
"(",
"hook_args",
")",
":",
"problem_name",
"=",
"hook_args",
".",
"problem",
".",
"name",
"current_problem",
"=",
"hook_args",
".",
"problem",
"hparams",
"=",
"hook_args",
".",
"hparams",
"output_dirs",
"=",
"hook_args",
".",
"output_dirs",
"predictions",
"=",
"hook_args",
".",
"predictions",
"frame_shape",
"=",
"[",
"current_problem",
".",
"frame_height",
",",
"current_problem",
".",
"frame_width",
",",
"current_problem",
".",
"num_channels",
"]",
"metrics_graph",
"=",
"tf",
".",
"Graph",
"(",
")",
"with",
"metrics_graph",
".",
"as_default",
"(",
")",
":",
"if",
"predictions",
":",
"metrics_results",
",",
"_",
"=",
"video_metrics",
".",
"compute_video_metrics_from_predictions",
"(",
"predictions",
",",
"decode_hparams",
"=",
"hook_args",
".",
"decode_hparams",
")",
"else",
":",
"metrics_results",
",",
"_",
"=",
"video_metrics",
".",
"compute_video_metrics_from_png_files",
"(",
"output_dirs",
",",
"problem_name",
",",
"hparams",
".",
"video_num_target_frames",
",",
"frame_shape",
")",
"summary_values",
"=",
"[",
"]",
"for",
"name",
",",
"array",
"in",
"six",
".",
"iteritems",
"(",
"metrics_results",
")",
":",
"for",
"ind",
",",
"val",
"in",
"enumerate",
"(",
"array",
")",
":",
"tag",
"=",
"\"metric_{}/{}\"",
".",
"format",
"(",
"name",
",",
"ind",
")",
"summary_values",
".",
"append",
"(",
"tf",
".",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"simple_value",
"=",
"val",
")",
")",
"return",
"summary_values"
] |
Computes video metrics summaries using the decoder output.
|
[
"Computes",
"video",
"metrics",
"summaries",
"using",
"the",
"decoder",
"output",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L209-L235
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
debug_video_writer_factory
|
def debug_video_writer_factory(output_dir):
"""Creates a VideoWriter for debug videos."""
if FLAGS.disable_ffmpeg:
return common_video.IndividualFrameWriter(output_dir)
else:
output_path = os.path.join(output_dir, "video.avi")
return common_video.WholeVideoWriter(
fps=10, output_path=output_path, file_format="avi"
)
|
python
|
def debug_video_writer_factory(output_dir):
"""Creates a VideoWriter for debug videos."""
if FLAGS.disable_ffmpeg:
return common_video.IndividualFrameWriter(output_dir)
else:
output_path = os.path.join(output_dir, "video.avi")
return common_video.WholeVideoWriter(
fps=10, output_path=output_path, file_format="avi"
)
|
[
"def",
"debug_video_writer_factory",
"(",
"output_dir",
")",
":",
"if",
"FLAGS",
".",
"disable_ffmpeg",
":",
"return",
"common_video",
".",
"IndividualFrameWriter",
"(",
"output_dir",
")",
"else",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"video.avi\"",
")",
"return",
"common_video",
".",
"WholeVideoWriter",
"(",
"fps",
"=",
"10",
",",
"output_path",
"=",
"output_path",
",",
"file_format",
"=",
"\"avi\"",
")"
] |
Creates a VideoWriter for debug videos.
|
[
"Creates",
"a",
"VideoWriter",
"for",
"debug",
"videos",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L238-L246
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
VideoProblem.preprocess_example
|
def preprocess_example(self, example, mode, hparams):
"""Runtime preprocessing, e.g., resize example["frame"]."""
if getattr(hparams, "preprocess_resize_frames", None) is not None:
example["frame"] = tf.image.resize_images(
example["frame"], hparams.preprocess_resize_frames,
tf.image.ResizeMethod.BILINEAR)
return example
|
python
|
def preprocess_example(self, example, mode, hparams):
"""Runtime preprocessing, e.g., resize example["frame"]."""
if getattr(hparams, "preprocess_resize_frames", None) is not None:
example["frame"] = tf.image.resize_images(
example["frame"], hparams.preprocess_resize_frames,
tf.image.ResizeMethod.BILINEAR)
return example
|
[
"def",
"preprocess_example",
"(",
"self",
",",
"example",
",",
"mode",
",",
"hparams",
")",
":",
"if",
"getattr",
"(",
"hparams",
",",
"\"preprocess_resize_frames\"",
",",
"None",
")",
"is",
"not",
"None",
":",
"example",
"[",
"\"frame\"",
"]",
"=",
"tf",
".",
"image",
".",
"resize_images",
"(",
"example",
"[",
"\"frame\"",
"]",
",",
"hparams",
".",
"preprocess_resize_frames",
",",
"tf",
".",
"image",
".",
"ResizeMethod",
".",
"BILINEAR",
")",
"return",
"example"
] |
Runtime preprocessing, e.g., resize example["frame"].
|
[
"Runtime",
"preprocessing",
"e",
".",
"g",
".",
"resize",
"example",
"[",
"frame",
"]",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L346-L352
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
VideoProblem.serving_input_fn
|
def serving_input_fn(self, hparams):
"""For serving/predict, assume that only video frames are provided."""
video_input_frames = tf.placeholder(
dtype=tf.float32,
shape=[
None, hparams.video_num_input_frames, self.frame_width,
self.frame_height, self.num_channels
])
# TODO(michalski): add support for passing input_action and input_reward.
return tf.estimator.export.ServingInputReceiver(
features={"inputs": video_input_frames},
receiver_tensors=video_input_frames)
|
python
|
def serving_input_fn(self, hparams):
"""For serving/predict, assume that only video frames are provided."""
video_input_frames = tf.placeholder(
dtype=tf.float32,
shape=[
None, hparams.video_num_input_frames, self.frame_width,
self.frame_height, self.num_channels
])
# TODO(michalski): add support for passing input_action and input_reward.
return tf.estimator.export.ServingInputReceiver(
features={"inputs": video_input_frames},
receiver_tensors=video_input_frames)
|
[
"def",
"serving_input_fn",
"(",
"self",
",",
"hparams",
")",
":",
"video_input_frames",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
",",
"hparams",
".",
"video_num_input_frames",
",",
"self",
".",
"frame_width",
",",
"self",
".",
"frame_height",
",",
"self",
".",
"num_channels",
"]",
")",
"# TODO(michalski): add support for passing input_action and input_reward.",
"return",
"tf",
".",
"estimator",
".",
"export",
".",
"ServingInputReceiver",
"(",
"features",
"=",
"{",
"\"inputs\"",
":",
"video_input_frames",
"}",
",",
"receiver_tensors",
"=",
"video_input_frames",
")"
] |
For serving/predict, assume that only video frames are provided.
|
[
"For",
"serving",
"/",
"predict",
"assume",
"that",
"only",
"video",
"frames",
"are",
"provided",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L397-L409
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
VideoProblem.generate_encoded_samples
|
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""Generate samples of the encoded frames with possible extra data.
By default this function just encodes the numpy array returned as "frame"
from `self.generate_samples` into a PNG image. Override this function to
get other encodings on disk.
Args:
data_dir: final data directory. Typically only used in this method to copy
over user-supplied vocab files if there are extra fields needing them.
tmp_dir: temporary directory that you can use for downloading and scratch.
dataset_split: problem.DatasetSplit, which data split to generate samples
for (for example, training and evaluation).
Yields:
Sample: dict<str feature_name, feature value> which is in disk encoding.
Raises:
ValueError: if the frame has a different number of channels than required.
"""
writer = None
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for features in self.generate_samples(data_dir, tmp_dir, dataset_split):
unencoded_frame = features.pop("frame")
self.validate_frame(unencoded_frame)
height, width, _ = unencoded_frame.shape
encoded_frame = sess.run(
encoded_image_t, feed_dict={image_t: unencoded_frame})
features["image/encoded"] = [encoded_frame]
features["image/format"] = ["png"]
features["image/height"] = [height]
features["image/width"] = [width]
has_debug_image = "image/debug" in features
if has_debug_image:
unencoded_debug = features.pop("image/debug")
encoded_debug = sess.run(
encoded_image_t, feed_dict={image_t: unencoded_debug})
features["image/encoded_debug"] = [encoded_debug]
if self.debug_dump_frames_path:
# Defer creating debug writer until we know debug_dump_frames_path.
if writer is None:
if not tf.gfile.Exists(self.debug_dump_frames_path):
tf.gfile.MkDir(self.debug_dump_frames_path)
writer = debug_video_writer_factory(self.debug_dump_frames_path)
img = unencoded_debug if has_debug_image else unencoded_frame
encoded_img = encoded_debug if has_debug_image else encoded_frame
writer.write(img, encoded_img)
yield features
if self.debug_dump_frames_path:
writer.finish_to_disk()
|
python
|
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""Generate samples of the encoded frames with possible extra data.
By default this function just encodes the numpy array returned as "frame"
from `self.generate_samples` into a PNG image. Override this function to
get other encodings on disk.
Args:
data_dir: final data directory. Typically only used in this method to copy
over user-supplied vocab files if there are extra fields needing them.
tmp_dir: temporary directory that you can use for downloading and scratch.
dataset_split: problem.DatasetSplit, which data split to generate samples
for (for example, training and evaluation).
Yields:
Sample: dict<str feature_name, feature value> which is in disk encoding.
Raises:
ValueError: if the frame has a different number of channels than required.
"""
writer = None
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for features in self.generate_samples(data_dir, tmp_dir, dataset_split):
unencoded_frame = features.pop("frame")
self.validate_frame(unencoded_frame)
height, width, _ = unencoded_frame.shape
encoded_frame = sess.run(
encoded_image_t, feed_dict={image_t: unencoded_frame})
features["image/encoded"] = [encoded_frame]
features["image/format"] = ["png"]
features["image/height"] = [height]
features["image/width"] = [width]
has_debug_image = "image/debug" in features
if has_debug_image:
unencoded_debug = features.pop("image/debug")
encoded_debug = sess.run(
encoded_image_t, feed_dict={image_t: unencoded_debug})
features["image/encoded_debug"] = [encoded_debug]
if self.debug_dump_frames_path:
# Defer creating debug writer until we know debug_dump_frames_path.
if writer is None:
if not tf.gfile.Exists(self.debug_dump_frames_path):
tf.gfile.MkDir(self.debug_dump_frames_path)
writer = debug_video_writer_factory(self.debug_dump_frames_path)
img = unencoded_debug if has_debug_image else unencoded_frame
encoded_img = encoded_debug if has_debug_image else encoded_frame
writer.write(img, encoded_img)
yield features
if self.debug_dump_frames_path:
writer.finish_to_disk()
|
[
"def",
"generate_encoded_samples",
"(",
"self",
",",
"data_dir",
",",
"tmp_dir",
",",
"dataset_split",
")",
":",
"writer",
"=",
"None",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"image_t",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"uint8",
",",
"shape",
"=",
"(",
"None",
",",
"None",
",",
"None",
")",
")",
"encoded_image_t",
"=",
"tf",
".",
"image",
".",
"encode_png",
"(",
"image_t",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"for",
"features",
"in",
"self",
".",
"generate_samples",
"(",
"data_dir",
",",
"tmp_dir",
",",
"dataset_split",
")",
":",
"unencoded_frame",
"=",
"features",
".",
"pop",
"(",
"\"frame\"",
")",
"self",
".",
"validate_frame",
"(",
"unencoded_frame",
")",
"height",
",",
"width",
",",
"_",
"=",
"unencoded_frame",
".",
"shape",
"encoded_frame",
"=",
"sess",
".",
"run",
"(",
"encoded_image_t",
",",
"feed_dict",
"=",
"{",
"image_t",
":",
"unencoded_frame",
"}",
")",
"features",
"[",
"\"image/encoded\"",
"]",
"=",
"[",
"encoded_frame",
"]",
"features",
"[",
"\"image/format\"",
"]",
"=",
"[",
"\"png\"",
"]",
"features",
"[",
"\"image/height\"",
"]",
"=",
"[",
"height",
"]",
"features",
"[",
"\"image/width\"",
"]",
"=",
"[",
"width",
"]",
"has_debug_image",
"=",
"\"image/debug\"",
"in",
"features",
"if",
"has_debug_image",
":",
"unencoded_debug",
"=",
"features",
".",
"pop",
"(",
"\"image/debug\"",
")",
"encoded_debug",
"=",
"sess",
".",
"run",
"(",
"encoded_image_t",
",",
"feed_dict",
"=",
"{",
"image_t",
":",
"unencoded_debug",
"}",
")",
"features",
"[",
"\"image/encoded_debug\"",
"]",
"=",
"[",
"encoded_debug",
"]",
"if",
"self",
".",
"debug_dump_frames_path",
":",
"# Defer creating debug writer until we know debug_dump_frames_path.",
"if",
"writer",
"is",
"None",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"self",
".",
"debug_dump_frames_path",
")",
":",
"tf",
".",
"gfile",
".",
"MkDir",
"(",
"self",
".",
"debug_dump_frames_path",
")",
"writer",
"=",
"debug_video_writer_factory",
"(",
"self",
".",
"debug_dump_frames_path",
")",
"img",
"=",
"unencoded_debug",
"if",
"has_debug_image",
"else",
"unencoded_frame",
"encoded_img",
"=",
"encoded_debug",
"if",
"has_debug_image",
"else",
"encoded_frame",
"writer",
".",
"write",
"(",
"img",
",",
"encoded_img",
")",
"yield",
"features",
"if",
"self",
".",
"debug_dump_frames_path",
":",
"writer",
".",
"finish_to_disk",
"(",
")"
] |
Generate samples of the encoded frames with possible extra data.
By default this function just encodes the numpy array returned as "frame"
from `self.generate_samples` into a PNG image. Override this function to
get other encodings on disk.
Args:
data_dir: final data directory. Typically only used in this method to copy
over user-supplied vocab files if there are extra fields needing them.
tmp_dir: temporary directory that you can use for downloading and scratch.
dataset_split: problem.DatasetSplit, which data split to generate samples
for (for example, training and evaluation).
Yields:
Sample: dict<str feature_name, feature value> which is in disk encoding.
Raises:
ValueError: if the frame has a different number of channels than required.
|
[
"Generate",
"samples",
"of",
"the",
"encoded",
"frames",
"with",
"possible",
"extra",
"data",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L573-L630
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/video_utils.py
|
VideoProblem.generate_data
|
def generate_data(self, data_dir, tmp_dir, task_id=-1):
"""The function generating the data."""
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
# We set shuffled=True as we don't want to shuffle on disk later.
split_paths = [(split["split"], filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir, split),
paths,
cycle_every_n=self.total_number_of_frames // len(paths))
else:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir,
problem.DatasetSplit.TRAIN),
all_paths,
cycle_every_n=self.total_number_of_frames // len(all_paths))
|
python
|
def generate_data(self, data_dir, tmp_dir, task_id=-1):
"""The function generating the data."""
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
# We set shuffled=True as we don't want to shuffle on disk later.
split_paths = [(split["split"], filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir, split),
paths,
cycle_every_n=self.total_number_of_frames // len(paths))
else:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir,
problem.DatasetSplit.TRAIN),
all_paths,
cycle_every_n=self.total_number_of_frames // len(all_paths))
|
[
"def",
"generate_data",
"(",
"self",
",",
"data_dir",
",",
"tmp_dir",
",",
"task_id",
"=",
"-",
"1",
")",
":",
"filepath_fns",
"=",
"{",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
":",
"self",
".",
"training_filepaths",
",",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
":",
"self",
".",
"dev_filepaths",
",",
"problem",
".",
"DatasetSplit",
".",
"TEST",
":",
"self",
".",
"test_filepaths",
",",
"}",
"# We set shuffled=True as we don't want to shuffle on disk later.",
"split_paths",
"=",
"[",
"(",
"split",
"[",
"\"split\"",
"]",
",",
"filepath_fns",
"[",
"split",
"[",
"\"split\"",
"]",
"]",
"(",
"data_dir",
",",
"split",
"[",
"\"shards\"",
"]",
",",
"shuffled",
"=",
"True",
")",
")",
"for",
"split",
"in",
"self",
".",
"dataset_splits",
"]",
"all_paths",
"=",
"[",
"]",
"for",
"_",
",",
"paths",
"in",
"split_paths",
":",
"all_paths",
".",
"extend",
"(",
"paths",
")",
"if",
"self",
".",
"is_generate_per_split",
":",
"for",
"split",
",",
"paths",
"in",
"split_paths",
":",
"generator_utils",
".",
"generate_files",
"(",
"self",
".",
"generate_encoded_samples",
"(",
"data_dir",
",",
"tmp_dir",
",",
"split",
")",
",",
"paths",
",",
"cycle_every_n",
"=",
"self",
".",
"total_number_of_frames",
"//",
"len",
"(",
"paths",
")",
")",
"else",
":",
"generator_utils",
".",
"generate_files",
"(",
"self",
".",
"generate_encoded_samples",
"(",
"data_dir",
",",
"tmp_dir",
",",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
")",
",",
"all_paths",
",",
"cycle_every_n",
"=",
"self",
".",
"total_number_of_frames",
"//",
"len",
"(",
"all_paths",
")",
")"
] |
The function generating the data.
|
[
"The",
"function",
"generating",
"the",
"data",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/video_utils.py#L632-L659
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
add_scope
|
def add_scope(scope=None, scope_fn=None):
"""Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
name = kwargs.pop("name", None) # Python 2 hack for keyword only args
with scope_fn(name or scope or f.__name__):
return f(*args, **kwargs)
return decorated
return decorator
|
python
|
def add_scope(scope=None, scope_fn=None):
"""Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
name = kwargs.pop("name", None) # Python 2 hack for keyword only args
with scope_fn(name or scope or f.__name__):
return f(*args, **kwargs)
return decorated
return decorator
|
[
"def",
"add_scope",
"(",
"scope",
"=",
"None",
",",
"scope_fn",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"\"name\"",
",",
"None",
")",
"# Python 2 hack for keyword only args",
"with",
"scope_fn",
"(",
"name",
"or",
"scope",
"or",
"f",
".",
"__name__",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorated",
"return",
"decorator"
] |
Return a decorator which add a TF name/variable scope to a function.
Note that the function returned by the decorator accept an additional 'name'
parameter, which can overwrite the name scope given when the function is
created.
Args:
scope (str): name of the scope. If None, the function name is used.
scope_fn (fct): Either tf.name_scope or tf.variable_scope
Returns:
fct: the add_scope decorator
|
[
"Return",
"a",
"decorator",
"which",
"add",
"a",
"TF",
"name",
"/",
"variable",
"scope",
"to",
"a",
"function",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L40-L64
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
_add_variable_proxy_methods
|
def _add_variable_proxy_methods(var, proxy_tensor):
"""Proxy methods of underlying variable.
This enables our custom getters to still work with, e.g., batch norm.
Args:
var: Variable to proxy
proxy_tensor: Tensor that is identity of var
"""
proxy_tensor.read_value = lambda: tf.identity(proxy_tensor)
proxy_tensor.assign_sub = var.assign_sub
proxy_tensor.assign = var.assign
proxy_tensor.initialized_value = var.initialized_value
|
python
|
def _add_variable_proxy_methods(var, proxy_tensor):
"""Proxy methods of underlying variable.
This enables our custom getters to still work with, e.g., batch norm.
Args:
var: Variable to proxy
proxy_tensor: Tensor that is identity of var
"""
proxy_tensor.read_value = lambda: tf.identity(proxy_tensor)
proxy_tensor.assign_sub = var.assign_sub
proxy_tensor.assign = var.assign
proxy_tensor.initialized_value = var.initialized_value
|
[
"def",
"_add_variable_proxy_methods",
"(",
"var",
",",
"proxy_tensor",
")",
":",
"proxy_tensor",
".",
"read_value",
"=",
"lambda",
":",
"tf",
".",
"identity",
"(",
"proxy_tensor",
")",
"proxy_tensor",
".",
"assign_sub",
"=",
"var",
".",
"assign_sub",
"proxy_tensor",
".",
"assign",
"=",
"var",
".",
"assign",
"proxy_tensor",
".",
"initialized_value",
"=",
"var",
".",
"initialized_value"
] |
Proxy methods of underlying variable.
This enables our custom getters to still work with, e.g., batch norm.
Args:
var: Variable to proxy
proxy_tensor: Tensor that is identity of var
|
[
"Proxy",
"methods",
"of",
"underlying",
"variable",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L75-L87
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
_rowwise_unsorted_segment_sum
|
def _rowwise_unsorted_segment_sum(values, indices, n):
"""UnsortedSegmentSum on each row.
Args:
values: a `Tensor` with shape `[batch_size, k]`.
indices: an integer `Tensor` with shape `[batch_size, k]`.
n: an integer.
Returns:
A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
"""
batch, k = tf.unstack(tf.shape(indices), num=2)
indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n
ret_flat = tf.unsorted_segment_sum(
tf.reshape(values, [-1]), indices_flat, batch * n)
return tf.reshape(ret_flat, [batch, n])
|
python
|
def _rowwise_unsorted_segment_sum(values, indices, n):
"""UnsortedSegmentSum on each row.
Args:
values: a `Tensor` with shape `[batch_size, k]`.
indices: an integer `Tensor` with shape `[batch_size, k]`.
n: an integer.
Returns:
A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
"""
batch, k = tf.unstack(tf.shape(indices), num=2)
indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n
ret_flat = tf.unsorted_segment_sum(
tf.reshape(values, [-1]), indices_flat, batch * n)
return tf.reshape(ret_flat, [batch, n])
|
[
"def",
"_rowwise_unsorted_segment_sum",
"(",
"values",
",",
"indices",
",",
"n",
")",
":",
"batch",
",",
"k",
"=",
"tf",
".",
"unstack",
"(",
"tf",
".",
"shape",
"(",
"indices",
")",
",",
"num",
"=",
"2",
")",
"indices_flat",
"=",
"tf",
".",
"reshape",
"(",
"indices",
",",
"[",
"-",
"1",
"]",
")",
"+",
"tf",
".",
"div",
"(",
"tf",
".",
"range",
"(",
"batch",
"*",
"k",
")",
",",
"k",
")",
"*",
"n",
"ret_flat",
"=",
"tf",
".",
"unsorted_segment_sum",
"(",
"tf",
".",
"reshape",
"(",
"values",
",",
"[",
"-",
"1",
"]",
")",
",",
"indices_flat",
",",
"batch",
"*",
"n",
")",
"return",
"tf",
".",
"reshape",
"(",
"ret_flat",
",",
"[",
"batch",
",",
"n",
"]",
")"
] |
UnsortedSegmentSum on each row.
Args:
values: a `Tensor` with shape `[batch_size, k]`.
indices: an integer `Tensor` with shape `[batch_size, k]`.
n: an integer.
Returns:
A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
|
[
"UnsortedSegmentSum",
"on",
"each",
"row",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L267-L281
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
_prob_in_top_k
|
def _prob_in_top_k(
clean_values, noisy_values, noise_stddev, noisy_top_values, k):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
k: an integer.
Returns:
a `Tensor` of shape [batch, n].
"""
batch = tf.shape(clean_values)[0]
m = tf.shape(noisy_top_values)[1]
top_values_flat = tf.reshape(noisy_top_values, [-1])
# we want to compute the threshold that a particular value would have to
# exceed in order to make the top k. This computation differs depending
# on whether the value is already in the top k.
threshold_positions_if_in = tf.range(batch) * m + k
threshold_if_in = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_in), 1)
is_in = tf.greater(noisy_values, threshold_if_in)
if noise_stddev is None:
return tf.to_float(is_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_out), 1)
# is each value currently in the top k.
prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in,
noise_stddev)
prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out,
noise_stddev)
prob = tf.where(is_in, prob_if_in, prob_if_out)
return prob
|
python
|
def _prob_in_top_k(
clean_values, noisy_values, noise_stddev, noisy_top_values, k):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
k: an integer.
Returns:
a `Tensor` of shape [batch, n].
"""
batch = tf.shape(clean_values)[0]
m = tf.shape(noisy_top_values)[1]
top_values_flat = tf.reshape(noisy_top_values, [-1])
# we want to compute the threshold that a particular value would have to
# exceed in order to make the top k. This computation differs depending
# on whether the value is already in the top k.
threshold_positions_if_in = tf.range(batch) * m + k
threshold_if_in = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_in), 1)
is_in = tf.greater(noisy_values, threshold_if_in)
if noise_stddev is None:
return tf.to_float(is_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_out), 1)
# is each value currently in the top k.
prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in,
noise_stddev)
prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out,
noise_stddev)
prob = tf.where(is_in, prob_if_in, prob_if_out)
return prob
|
[
"def",
"_prob_in_top_k",
"(",
"clean_values",
",",
"noisy_values",
",",
"noise_stddev",
",",
"noisy_top_values",
",",
"k",
")",
":",
"batch",
"=",
"tf",
".",
"shape",
"(",
"clean_values",
")",
"[",
"0",
"]",
"m",
"=",
"tf",
".",
"shape",
"(",
"noisy_top_values",
")",
"[",
"1",
"]",
"top_values_flat",
"=",
"tf",
".",
"reshape",
"(",
"noisy_top_values",
",",
"[",
"-",
"1",
"]",
")",
"# we want to compute the threshold that a particular value would have to",
"# exceed in order to make the top k. This computation differs depending",
"# on whether the value is already in the top k.",
"threshold_positions_if_in",
"=",
"tf",
".",
"range",
"(",
"batch",
")",
"*",
"m",
"+",
"k",
"threshold_if_in",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"gather",
"(",
"top_values_flat",
",",
"threshold_positions_if_in",
")",
",",
"1",
")",
"is_in",
"=",
"tf",
".",
"greater",
"(",
"noisy_values",
",",
"threshold_if_in",
")",
"if",
"noise_stddev",
"is",
"None",
":",
"return",
"tf",
".",
"to_float",
"(",
"is_in",
")",
"threshold_positions_if_out",
"=",
"threshold_positions_if_in",
"-",
"1",
"threshold_if_out",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"gather",
"(",
"top_values_flat",
",",
"threshold_positions_if_out",
")",
",",
"1",
")",
"# is each value currently in the top k.",
"prob_if_in",
"=",
"_normal_distribution_cdf",
"(",
"clean_values",
"-",
"threshold_if_in",
",",
"noise_stddev",
")",
"prob_if_out",
"=",
"_normal_distribution_cdf",
"(",
"clean_values",
"-",
"threshold_if_out",
",",
"noise_stddev",
")",
"prob",
"=",
"tf",
".",
"where",
"(",
"is_in",
",",
"prob_if_in",
",",
"prob_if_out",
")",
"return",
"prob"
] |
Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
k: an integer.
Returns:
a `Tensor` of shape [batch, n].
|
[
"Helper",
"function",
"to",
"NoisyTopKGating",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L303-L348
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
cv_squared
|
def cv_squared(x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
return variance / (tf.square(mean) + epsilon)
|
python
|
def cv_squared(x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
return variance / (tf.square(mean) + epsilon)
|
[
"def",
"cv_squared",
"(",
"x",
")",
":",
"epsilon",
"=",
"1e-10",
"float_size",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"size",
"(",
"x",
")",
")",
"+",
"epsilon",
"mean",
"=",
"tf",
".",
"reduce_sum",
"(",
"x",
")",
"/",
"float_size",
"variance",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"squared_difference",
"(",
"x",
",",
"mean",
")",
")",
"/",
"float_size",
"return",
"variance",
"/",
"(",
"tf",
".",
"square",
"(",
"mean",
")",
"+",
"epsilon",
")"
] |
The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
|
[
"The",
"squared",
"coefficient",
"of",
"variation",
"of",
"a",
"sample",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L351-L368
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
update_hparams_for_vq_gating
|
def update_hparams_for_vq_gating(hparams):
"""VQ Gating hparams."""
hparams.add_hparam("z_size", 4)
hparams.add_hparam("noise_dev", 0.5)
# Bottleneck kinds supported: dense, vae, dvq.
hparams.add_hparam("bottleneck_kind", "dvq")
hparams.add_hparam("num_blocks", 1)
hparams.add_hparam("num_residuals", 1)
# Reshape method for DVQ: slice, project
hparams.add_hparam("beta", 0.25)
hparams.add_hparam("epsilon", 1e-5)
hparams.add_hparam("decay", 0.999)
hparams.add_hparam("ema", False) # default is false until ema is implemented
hparams.add_hparam("random_top_k", 1)
hparams.add_hparam("soft_em", False)
hparams.add_hparam("num_samples", 10)
hparams.add_hparam("gating_type", "vq")
hparams.add_hparam("use_scales", int(True))
hparams.add_hparam("residual_centroids", int(False))
|
python
|
def update_hparams_for_vq_gating(hparams):
"""VQ Gating hparams."""
hparams.add_hparam("z_size", 4)
hparams.add_hparam("noise_dev", 0.5)
# Bottleneck kinds supported: dense, vae, dvq.
hparams.add_hparam("bottleneck_kind", "dvq")
hparams.add_hparam("num_blocks", 1)
hparams.add_hparam("num_residuals", 1)
# Reshape method for DVQ: slice, project
hparams.add_hparam("beta", 0.25)
hparams.add_hparam("epsilon", 1e-5)
hparams.add_hparam("decay", 0.999)
hparams.add_hparam("ema", False) # default is false until ema is implemented
hparams.add_hparam("random_top_k", 1)
hparams.add_hparam("soft_em", False)
hparams.add_hparam("num_samples", 10)
hparams.add_hparam("gating_type", "vq")
hparams.add_hparam("use_scales", int(True))
hparams.add_hparam("residual_centroids", int(False))
|
[
"def",
"update_hparams_for_vq_gating",
"(",
"hparams",
")",
":",
"hparams",
".",
"add_hparam",
"(",
"\"z_size\"",
",",
"4",
")",
"hparams",
".",
"add_hparam",
"(",
"\"noise_dev\"",
",",
"0.5",
")",
"# Bottleneck kinds supported: dense, vae, dvq.",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_kind\"",
",",
"\"dvq\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_blocks\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_residuals\"",
",",
"1",
")",
"# Reshape method for DVQ: slice, project",
"hparams",
".",
"add_hparam",
"(",
"\"beta\"",
",",
"0.25",
")",
"hparams",
".",
"add_hparam",
"(",
"\"epsilon\"",
",",
"1e-5",
")",
"hparams",
".",
"add_hparam",
"(",
"\"decay\"",
",",
"0.999",
")",
"hparams",
".",
"add_hparam",
"(",
"\"ema\"",
",",
"False",
")",
"# default is false until ema is implemented",
"hparams",
".",
"add_hparam",
"(",
"\"random_top_k\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"soft_em\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_samples\"",
",",
"10",
")",
"hparams",
".",
"add_hparam",
"(",
"\"gating_type\"",
",",
"\"vq\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"use_scales\"",
",",
"int",
"(",
"True",
")",
")",
"hparams",
".",
"add_hparam",
"(",
"\"residual_centroids\"",
",",
"int",
"(",
"False",
")",
")"
] |
VQ Gating hparams.
|
[
"VQ",
"Gating",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L384-L402
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
_my_top_k
|
def _my_top_k(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
|
python
|
def _my_top_k(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
|
[
"def",
"_my_top_k",
"(",
"x",
",",
"k",
")",
":",
"if",
"k",
">",
"10",
":",
"return",
"tf",
".",
"nn",
".",
"top_k",
"(",
"x",
",",
"k",
")",
"values",
"=",
"[",
"]",
"indices",
"=",
"[",
"]",
"depth",
"=",
"tf",
".",
"shape",
"(",
"x",
")",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"values",
".",
"append",
"(",
"tf",
".",
"reduce_max",
"(",
"x",
",",
"1",
")",
")",
"argmax",
"=",
"tf",
".",
"argmax",
"(",
"x",
",",
"1",
")",
"indices",
".",
"append",
"(",
"argmax",
")",
"if",
"i",
"+",
"1",
"<",
"k",
":",
"x",
"+=",
"tf",
".",
"one_hot",
"(",
"argmax",
",",
"depth",
",",
"-",
"1e9",
")",
"return",
"tf",
".",
"stack",
"(",
"values",
",",
"axis",
"=",
"1",
")",
",",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"stack",
"(",
"indices",
",",
"axis",
"=",
"1",
")",
")"
] |
GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
|
[
"GPU",
"-",
"compatible",
"version",
"of",
"top",
"-",
"k",
"that",
"works",
"for",
"very",
"small",
"constant",
"k",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L405-L434
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
vq_gating
|
def vq_gating(x,
num_experts,
k,
bneck,
hparams=None,
name="vq_gating"):
"""VQ gating.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
k: an integer - number of experts per example
bneck: a bottleneck object
hparams: optional hparams
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.use_scales:
scales = tf.get_variable(
"scales", [num_experts],
tf.float32,
initializer=tf.ones_initializer())
scales = tf.nn.softmax(scales)
hparams.scales = scales
input_size = x.get_shape().as_list()[-1]
batch_size = common_layers.shape_list(x)[0]
if k > 1:
# first project into two dense layers, chop and discretize, and gate
# TODO(avaswani): Maybe scale the embeddings flowing out of the experts.
# We might want to do this to match the computation being done by topk
x = tf.layers.dense(x, input_size * k)
# x goes from [batch_size, input_size*k] to [batch_size*k, input_size]
x = tf.reshape(x, [batch_size * k, input_size])
inputs = tf.expand_dims(x, axis=1)
inputs = tf.expand_dims(inputs, axis=1)
# VQ hparams
hparams.z_size = int(math.log(num_experts, 2))
hparams.hidden_size = input_size
hparams.top_k = k
d = bneck.discrete_bottleneck(inputs)
centroids = None
exp_discrete = d["discrete"]
embed_lookup = d["embed"]
extra_loss = d["loss"]
if hparams.residual_centroids:
centroids = embed_lookup(exp_discrete) # gives the centroids
top_k_indices = tf.squeeze(exp_discrete, axis=1)
tf.summary.histogram("discrete_counts", top_k_indices)
# if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1]
# to [batch_size, k]
if k > 1:
top_k_indices = tf.reshape(top_k_indices, [batch_size, k])
# get the top k gates
top_k_gates = tf.ones([batch_size, k])
# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the
# positions corresponding to all but the top k experts per example.
gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,
num_experts)
# Compute count per expert from the gates.
# gates has shape [batch_size, num_experts]
# count per expert has shape [num_experts, 1]
count_per_expert = tf.reduce_sum(gates, axis=0)
if hparams.use_scales:
scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales)
extra_loss += scale_loss
if common_layers.should_generate_summaries():
tf.summary.histogram("vq_loss", extra_loss)
tf.summary.historgram("scale_loss", scale_loss)
return gates, extra_loss, centroids
|
python
|
def vq_gating(x,
num_experts,
k,
bneck,
hparams=None,
name="vq_gating"):
"""VQ gating.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
k: an integer - number of experts per example
bneck: a bottleneck object
hparams: optional hparams
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.use_scales:
scales = tf.get_variable(
"scales", [num_experts],
tf.float32,
initializer=tf.ones_initializer())
scales = tf.nn.softmax(scales)
hparams.scales = scales
input_size = x.get_shape().as_list()[-1]
batch_size = common_layers.shape_list(x)[0]
if k > 1:
# first project into two dense layers, chop and discretize, and gate
# TODO(avaswani): Maybe scale the embeddings flowing out of the experts.
# We might want to do this to match the computation being done by topk
x = tf.layers.dense(x, input_size * k)
# x goes from [batch_size, input_size*k] to [batch_size*k, input_size]
x = tf.reshape(x, [batch_size * k, input_size])
inputs = tf.expand_dims(x, axis=1)
inputs = tf.expand_dims(inputs, axis=1)
# VQ hparams
hparams.z_size = int(math.log(num_experts, 2))
hparams.hidden_size = input_size
hparams.top_k = k
d = bneck.discrete_bottleneck(inputs)
centroids = None
exp_discrete = d["discrete"]
embed_lookup = d["embed"]
extra_loss = d["loss"]
if hparams.residual_centroids:
centroids = embed_lookup(exp_discrete) # gives the centroids
top_k_indices = tf.squeeze(exp_discrete, axis=1)
tf.summary.histogram("discrete_counts", top_k_indices)
# if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1]
# to [batch_size, k]
if k > 1:
top_k_indices = tf.reshape(top_k_indices, [batch_size, k])
# get the top k gates
top_k_gates = tf.ones([batch_size, k])
# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the
# positions corresponding to all but the top k experts per example.
gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,
num_experts)
# Compute count per expert from the gates.
# gates has shape [batch_size, num_experts]
# count per expert has shape [num_experts, 1]
count_per_expert = tf.reduce_sum(gates, axis=0)
if hparams.use_scales:
scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales)
extra_loss += scale_loss
if common_layers.should_generate_summaries():
tf.summary.histogram("vq_loss", extra_loss)
tf.summary.historgram("scale_loss", scale_loss)
return gates, extra_loss, centroids
|
[
"def",
"vq_gating",
"(",
"x",
",",
"num_experts",
",",
"k",
",",
"bneck",
",",
"hparams",
"=",
"None",
",",
"name",
"=",
"\"vq_gating\"",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"if",
"hparams",
".",
"use_scales",
":",
"scales",
"=",
"tf",
".",
"get_variable",
"(",
"\"scales\"",
",",
"[",
"num_experts",
"]",
",",
"tf",
".",
"float32",
",",
"initializer",
"=",
"tf",
".",
"ones_initializer",
"(",
")",
")",
"scales",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"scales",
")",
"hparams",
".",
"scales",
"=",
"scales",
"input_size",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"batch_size",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"[",
"0",
"]",
"if",
"k",
">",
"1",
":",
"# first project into two dense layers, chop and discretize, and gate",
"# TODO(avaswani): Maybe scale the embeddings flowing out of the experts.",
"# We might want to do this to match the computation being done by topk",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"x",
",",
"input_size",
"*",
"k",
")",
"# x goes from [batch_size, input_size*k] to [batch_size*k, input_size]",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"batch_size",
"*",
"k",
",",
"input_size",
"]",
")",
"inputs",
"=",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"axis",
"=",
"1",
")",
"inputs",
"=",
"tf",
".",
"expand_dims",
"(",
"inputs",
",",
"axis",
"=",
"1",
")",
"# VQ hparams",
"hparams",
".",
"z_size",
"=",
"int",
"(",
"math",
".",
"log",
"(",
"num_experts",
",",
"2",
")",
")",
"hparams",
".",
"hidden_size",
"=",
"input_size",
"hparams",
".",
"top_k",
"=",
"k",
"d",
"=",
"bneck",
".",
"discrete_bottleneck",
"(",
"inputs",
")",
"centroids",
"=",
"None",
"exp_discrete",
"=",
"d",
"[",
"\"discrete\"",
"]",
"embed_lookup",
"=",
"d",
"[",
"\"embed\"",
"]",
"extra_loss",
"=",
"d",
"[",
"\"loss\"",
"]",
"if",
"hparams",
".",
"residual_centroids",
":",
"centroids",
"=",
"embed_lookup",
"(",
"exp_discrete",
")",
"# gives the centroids",
"top_k_indices",
"=",
"tf",
".",
"squeeze",
"(",
"exp_discrete",
",",
"axis",
"=",
"1",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"discrete_counts\"",
",",
"top_k_indices",
")",
"# if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1]",
"# to [batch_size, k]",
"if",
"k",
">",
"1",
":",
"top_k_indices",
"=",
"tf",
".",
"reshape",
"(",
"top_k_indices",
",",
"[",
"batch_size",
",",
"k",
"]",
")",
"# get the top k gates",
"top_k_gates",
"=",
"tf",
".",
"ones",
"(",
"[",
"batch_size",
",",
"k",
"]",
")",
"# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the",
"# positions corresponding to all but the top k experts per example.",
"gates",
"=",
"_rowwise_unsorted_segment_sum",
"(",
"top_k_gates",
",",
"top_k_indices",
",",
"num_experts",
")",
"# Compute count per expert from the gates.",
"# gates has shape [batch_size, num_experts]",
"# count per expert has shape [num_experts, 1]",
"count_per_expert",
"=",
"tf",
".",
"reduce_sum",
"(",
"gates",
",",
"axis",
"=",
"0",
")",
"if",
"hparams",
".",
"use_scales",
":",
"scale_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"to_float",
"(",
"count_per_expert",
")",
"*",
"scales",
")",
"extra_loss",
"+=",
"scale_loss",
"if",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
":",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"vq_loss\"",
",",
"extra_loss",
")",
"tf",
".",
"summary",
".",
"historgram",
"(",
"\"scale_loss\"",
",",
"scale_loss",
")",
"return",
"gates",
",",
"extra_loss",
",",
"centroids"
] |
VQ gating.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
k: an integer - number of experts per example
bneck: a bottleneck object
hparams: optional hparams
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
|
[
"VQ",
"gating",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L437-L511
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
noisy_top_k_gating
|
def noisy_top_k_gating(x,
num_experts,
train,
k=2,
initializer=tf.zeros_initializer(),
noisy_gating=True,
noise_epsilon=1e-2,
name=None):
"""Noisy top-k gating.
See paper: https://arxiv.org/abs/1701.06538.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
train: a boolean - we only add noise at training time.
k: an integer - number of experts per example
initializer: an initializer
noisy_gating: a boolean
noise_epsilon: a float
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
with tf.variable_scope(name, default_name="noisy_top_k_gating"):
input_size = x.get_shape().as_list()[-1]
w_gate = tf.get_variable(
"w_gate", [input_size, num_experts], tf.float32, initializer)
if noisy_gating:
w_noise = tf.get_variable("w_noise",
[input_size, num_experts], tf.float32,
initializer)
clean_logits = tf.matmul(x, w_gate)
if noisy_gating:
raw_noise_stddev = tf.matmul(x, w_noise)
noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + noise_epsilon) *
(tf.to_float(train)))
noisy_logits = clean_logits + (
tf.random_normal(tf.shape(clean_logits)) * noise_stddev)
logits = noisy_logits
if common_layers.should_generate_summaries():
tf.summary.histogram("noisy_logits", noisy_logits)
tf.summary.histogram("noise_stddev", noise_stddev)
else:
logits = clean_logits
top_logits, top_indices = _my_top_k(logits, min(k + 1, num_experts))
# top k logits has shape [batch, k]
top_k_logits = tf.slice(top_logits, [0, 0], [-1, k])
top_k_indices = tf.slice(top_indices, [0, 0], [-1, k])
top_k_gates = tf.nn.softmax(top_k_logits)
# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the
# positions corresponding to all but the top k experts per example.
gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,
num_experts)
if noisy_gating and k < num_experts:
load = tf.reduce_sum(
_prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits,
k), 0)
else:
load = _gates_to_load(gates)
if common_layers.should_generate_summaries():
tf.summary.histogram("importance", tf.reduce_sum(gates, 0))
tf.summary.histogram("load", load)
return gates, load
|
python
|
def noisy_top_k_gating(x,
num_experts,
train,
k=2,
initializer=tf.zeros_initializer(),
noisy_gating=True,
noise_epsilon=1e-2,
name=None):
"""Noisy top-k gating.
See paper: https://arxiv.org/abs/1701.06538.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
train: a boolean - we only add noise at training time.
k: an integer - number of experts per example
initializer: an initializer
noisy_gating: a boolean
noise_epsilon: a float
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
with tf.variable_scope(name, default_name="noisy_top_k_gating"):
input_size = x.get_shape().as_list()[-1]
w_gate = tf.get_variable(
"w_gate", [input_size, num_experts], tf.float32, initializer)
if noisy_gating:
w_noise = tf.get_variable("w_noise",
[input_size, num_experts], tf.float32,
initializer)
clean_logits = tf.matmul(x, w_gate)
if noisy_gating:
raw_noise_stddev = tf.matmul(x, w_noise)
noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + noise_epsilon) *
(tf.to_float(train)))
noisy_logits = clean_logits + (
tf.random_normal(tf.shape(clean_logits)) * noise_stddev)
logits = noisy_logits
if common_layers.should_generate_summaries():
tf.summary.histogram("noisy_logits", noisy_logits)
tf.summary.histogram("noise_stddev", noise_stddev)
else:
logits = clean_logits
top_logits, top_indices = _my_top_k(logits, min(k + 1, num_experts))
# top k logits has shape [batch, k]
top_k_logits = tf.slice(top_logits, [0, 0], [-1, k])
top_k_indices = tf.slice(top_indices, [0, 0], [-1, k])
top_k_gates = tf.nn.softmax(top_k_logits)
# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the
# positions corresponding to all but the top k experts per example.
gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices,
num_experts)
if noisy_gating and k < num_experts:
load = tf.reduce_sum(
_prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits,
k), 0)
else:
load = _gates_to_load(gates)
if common_layers.should_generate_summaries():
tf.summary.histogram("importance", tf.reduce_sum(gates, 0))
tf.summary.histogram("load", load)
return gates, load
|
[
"def",
"noisy_top_k_gating",
"(",
"x",
",",
"num_experts",
",",
"train",
",",
"k",
"=",
"2",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"noisy_gating",
"=",
"True",
",",
"noise_epsilon",
"=",
"1e-2",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"noisy_top_k_gating\"",
")",
":",
"input_size",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"w_gate",
"=",
"tf",
".",
"get_variable",
"(",
"\"w_gate\"",
",",
"[",
"input_size",
",",
"num_experts",
"]",
",",
"tf",
".",
"float32",
",",
"initializer",
")",
"if",
"noisy_gating",
":",
"w_noise",
"=",
"tf",
".",
"get_variable",
"(",
"\"w_noise\"",
",",
"[",
"input_size",
",",
"num_experts",
"]",
",",
"tf",
".",
"float32",
",",
"initializer",
")",
"clean_logits",
"=",
"tf",
".",
"matmul",
"(",
"x",
",",
"w_gate",
")",
"if",
"noisy_gating",
":",
"raw_noise_stddev",
"=",
"tf",
".",
"matmul",
"(",
"x",
",",
"w_noise",
")",
"noise_stddev",
"=",
"(",
"(",
"tf",
".",
"nn",
".",
"softplus",
"(",
"raw_noise_stddev",
")",
"+",
"noise_epsilon",
")",
"*",
"(",
"tf",
".",
"to_float",
"(",
"train",
")",
")",
")",
"noisy_logits",
"=",
"clean_logits",
"+",
"(",
"tf",
".",
"random_normal",
"(",
"tf",
".",
"shape",
"(",
"clean_logits",
")",
")",
"*",
"noise_stddev",
")",
"logits",
"=",
"noisy_logits",
"if",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
":",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"noisy_logits\"",
",",
"noisy_logits",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"noise_stddev\"",
",",
"noise_stddev",
")",
"else",
":",
"logits",
"=",
"clean_logits",
"top_logits",
",",
"top_indices",
"=",
"_my_top_k",
"(",
"logits",
",",
"min",
"(",
"k",
"+",
"1",
",",
"num_experts",
")",
")",
"# top k logits has shape [batch, k]",
"top_k_logits",
"=",
"tf",
".",
"slice",
"(",
"top_logits",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"k",
"]",
")",
"top_k_indices",
"=",
"tf",
".",
"slice",
"(",
"top_indices",
",",
"[",
"0",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"k",
"]",
")",
"top_k_gates",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"top_k_logits",
")",
"# This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the",
"# positions corresponding to all but the top k experts per example.",
"gates",
"=",
"_rowwise_unsorted_segment_sum",
"(",
"top_k_gates",
",",
"top_k_indices",
",",
"num_experts",
")",
"if",
"noisy_gating",
"and",
"k",
"<",
"num_experts",
":",
"load",
"=",
"tf",
".",
"reduce_sum",
"(",
"_prob_in_top_k",
"(",
"clean_logits",
",",
"noisy_logits",
",",
"noise_stddev",
",",
"top_logits",
",",
"k",
")",
",",
"0",
")",
"else",
":",
"load",
"=",
"_gates_to_load",
"(",
"gates",
")",
"if",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
":",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"importance\"",
",",
"tf",
".",
"reduce_sum",
"(",
"gates",
",",
"0",
")",
")",
"tf",
".",
"summary",
".",
"histogram",
"(",
"\"load\"",
",",
"load",
")",
"return",
"gates",
",",
"load"
] |
Noisy top-k gating.
See paper: https://arxiv.org/abs/1701.06538.
Args:
x: input Tensor with shape [batch_size, input_size]
num_experts: an integer
train: a boolean - we only add noise at training time.
k: an integer - number of experts per example
initializer: an initializer
noisy_gating: a boolean
noise_epsilon: a float
name: an optional string
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
|
[
"Noisy",
"top",
"-",
"k",
"gating",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L514-L579
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
map_ids
|
def map_ids(x, indices, map_fn):
"""Apply a function to each coordinate ids of a multidimensional tensor.
This allows to process each sequence of a batch independently. This is
similar to tf.map_fn but with tensor where the batch dim has been flatten.
Warning: The indices ids have to be contiguous and ordered in memory as the
output vector for each of the ids are simply concatenated after being
processed.
Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed
rows in the following order: [0,0,1,2,2,2]
Args:
x (Tensor): The tensor to be dispatched of shape [length,...]
indices (Tensor): A int32 tensor of size [length, 1] containing the batch
coordinate of x
map_fn (fct): Function called for every ids of the original tensor. Take
as input a tensor of same rank than x and from shape [length_id,...] with
length_id <= length. Isn't called if length_id == 0
Returns:
a tensor of same shape as x, where each elements has been processed
"""
indices = tf.reshape(indices, [-1])
t_i = tf.constant(0)
# batch_coordinates start at 0
t_batch_size = tf.reduce_max(indices) + 1
# ta_stack_out will store the intermediate results for each individual id
# As alternative to tf.TensorArray, scatter_update could potentially be used
# but that would require an additional mutable tensor.
ta_stack_out = tf.TensorArray(
x.dtype,
size=t_batch_size,
)
# Then we iterate over each sequence individually and compute the
# transformation for each id
while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size)
def body(t_i, ta_stack_out):
"""Loop body."""
# Gather the ids
current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i)))
t_row = tf.gather_nd(x, indices=current_ids)
# TODO(epot): Should not call map_fn if t_row size is 0
# Apply transformation to each id
# Restore batch_dim=1 as most function expect [batch_dim, length, ...] as
# input
t_row = tf.expand_dims(t_row, axis=0)
t_row = map_fn(t_row)
t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation
ta_stack_out = ta_stack_out.write(t_i, t_row)
return [tf.add(t_i, 1), ta_stack_out] # ++i
# Run the loop, equivalent to:
# stack_out = []
# while i < batch_size:
# stack_out.expand(map_fn(x[indices==i]))
_, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out])
# Merge all results
return ta_stack_out.concat()
|
python
|
def map_ids(x, indices, map_fn):
"""Apply a function to each coordinate ids of a multidimensional tensor.
This allows to process each sequence of a batch independently. This is
similar to tf.map_fn but with tensor where the batch dim has been flatten.
Warning: The indices ids have to be contiguous and ordered in memory as the
output vector for each of the ids are simply concatenated after being
processed.
Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed
rows in the following order: [0,0,1,2,2,2]
Args:
x (Tensor): The tensor to be dispatched of shape [length,...]
indices (Tensor): A int32 tensor of size [length, 1] containing the batch
coordinate of x
map_fn (fct): Function called for every ids of the original tensor. Take
as input a tensor of same rank than x and from shape [length_id,...] with
length_id <= length. Isn't called if length_id == 0
Returns:
a tensor of same shape as x, where each elements has been processed
"""
indices = tf.reshape(indices, [-1])
t_i = tf.constant(0)
# batch_coordinates start at 0
t_batch_size = tf.reduce_max(indices) + 1
# ta_stack_out will store the intermediate results for each individual id
# As alternative to tf.TensorArray, scatter_update could potentially be used
# but that would require an additional mutable tensor.
ta_stack_out = tf.TensorArray(
x.dtype,
size=t_batch_size,
)
# Then we iterate over each sequence individually and compute the
# transformation for each id
while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size)
def body(t_i, ta_stack_out):
"""Loop body."""
# Gather the ids
current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i)))
t_row = tf.gather_nd(x, indices=current_ids)
# TODO(epot): Should not call map_fn if t_row size is 0
# Apply transformation to each id
# Restore batch_dim=1 as most function expect [batch_dim, length, ...] as
# input
t_row = tf.expand_dims(t_row, axis=0)
t_row = map_fn(t_row)
t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation
ta_stack_out = ta_stack_out.write(t_i, t_row)
return [tf.add(t_i, 1), ta_stack_out] # ++i
# Run the loop, equivalent to:
# stack_out = []
# while i < batch_size:
# stack_out.expand(map_fn(x[indices==i]))
_, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out])
# Merge all results
return ta_stack_out.concat()
|
[
"def",
"map_ids",
"(",
"x",
",",
"indices",
",",
"map_fn",
")",
":",
"indices",
"=",
"tf",
".",
"reshape",
"(",
"indices",
",",
"[",
"-",
"1",
"]",
")",
"t_i",
"=",
"tf",
".",
"constant",
"(",
"0",
")",
"# batch_coordinates start at 0",
"t_batch_size",
"=",
"tf",
".",
"reduce_max",
"(",
"indices",
")",
"+",
"1",
"# ta_stack_out will store the intermediate results for each individual id",
"# As alternative to tf.TensorArray, scatter_update could potentially be used",
"# but that would require an additional mutable tensor.",
"ta_stack_out",
"=",
"tf",
".",
"TensorArray",
"(",
"x",
".",
"dtype",
",",
"size",
"=",
"t_batch_size",
",",
")",
"# Then we iterate over each sequence individually and compute the",
"# transformation for each id",
"while_condition",
"=",
"lambda",
"t_i",
",",
"*",
"args",
":",
"tf",
".",
"less",
"(",
"t_i",
",",
"t_batch_size",
")",
"def",
"body",
"(",
"t_i",
",",
"ta_stack_out",
")",
":",
"\"\"\"Loop body.\"\"\"",
"# Gather the ids",
"current_ids",
"=",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"where",
"(",
"tf",
".",
"equal",
"(",
"indices",
",",
"t_i",
")",
")",
")",
"t_row",
"=",
"tf",
".",
"gather_nd",
"(",
"x",
",",
"indices",
"=",
"current_ids",
")",
"# TODO(epot): Should not call map_fn if t_row size is 0",
"# Apply transformation to each id",
"# Restore batch_dim=1 as most function expect [batch_dim, length, ...] as",
"# input",
"t_row",
"=",
"tf",
".",
"expand_dims",
"(",
"t_row",
",",
"axis",
"=",
"0",
")",
"t_row",
"=",
"map_fn",
"(",
"t_row",
")",
"t_row",
"=",
"tf",
".",
"squeeze",
"(",
"t_row",
",",
"axis",
"=",
"0",
")",
"# Squeeze for concatenation",
"ta_stack_out",
"=",
"ta_stack_out",
".",
"write",
"(",
"t_i",
",",
"t_row",
")",
"return",
"[",
"tf",
".",
"add",
"(",
"t_i",
",",
"1",
")",
",",
"ta_stack_out",
"]",
"# ++i",
"# Run the loop, equivalent to:",
"# stack_out = []",
"# while i < batch_size:",
"# stack_out.expand(map_fn(x[indices==i]))",
"_",
",",
"ta_stack_out",
"=",
"tf",
".",
"while_loop",
"(",
"while_condition",
",",
"body",
",",
"[",
"t_i",
",",
"ta_stack_out",
"]",
")",
"# Merge all results",
"return",
"ta_stack_out",
".",
"concat",
"(",
")"
] |
Apply a function to each coordinate ids of a multidimensional tensor.
This allows to process each sequence of a batch independently. This is
similar to tf.map_fn but with tensor where the batch dim has been flatten.
Warning: The indices ids have to be contiguous and ordered in memory as the
output vector for each of the ids are simply concatenated after being
processed.
Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed
rows in the following order: [0,0,1,2,2,2]
Args:
x (Tensor): The tensor to be dispatched of shape [length,...]
indices (Tensor): A int32 tensor of size [length, 1] containing the batch
coordinate of x
map_fn (fct): Function called for every ids of the original tensor. Take
as input a tensor of same rank than x and from shape [length_id,...] with
length_id <= length. Isn't called if length_id == 0
Returns:
a tensor of same shape as x, where each elements has been processed
|
[
"Apply",
"a",
"function",
"to",
"each",
"coordinate",
"ids",
"of",
"a",
"multidimensional",
"tensor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L665-L730
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
ffn_expert_fn
|
def ffn_expert_fn(input_size,
hidden_sizes,
output_size,
hidden_activation=tf.nn.relu):
"""Returns a function that creates a feed-forward network.
Use this function to create the expert_fn argument to distributed_moe.
Args:
input_size: an integer
hidden_sizes: a list of integers
output_size: an integer
hidden_activation: a unary function.
Returns:
a unary function
"""
def my_fn(x):
layer_sizes = [input_size] + hidden_sizes + [output_size]
for i in range(1 + len(hidden_sizes)):
w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2], tf.float32)
x = tf.matmul(x, w)
if i < len(hidden_sizes):
x = hidden_activation(x)
if layer_sizes[i] != input_size:
x *= (layer_sizes[i] / float(input_size))**-0.5
return x
return my_fn
|
python
|
def ffn_expert_fn(input_size,
hidden_sizes,
output_size,
hidden_activation=tf.nn.relu):
"""Returns a function that creates a feed-forward network.
Use this function to create the expert_fn argument to distributed_moe.
Args:
input_size: an integer
hidden_sizes: a list of integers
output_size: an integer
hidden_activation: a unary function.
Returns:
a unary function
"""
def my_fn(x):
layer_sizes = [input_size] + hidden_sizes + [output_size]
for i in range(1 + len(hidden_sizes)):
w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2], tf.float32)
x = tf.matmul(x, w)
if i < len(hidden_sizes):
x = hidden_activation(x)
if layer_sizes[i] != input_size:
x *= (layer_sizes[i] / float(input_size))**-0.5
return x
return my_fn
|
[
"def",
"ffn_expert_fn",
"(",
"input_size",
",",
"hidden_sizes",
",",
"output_size",
",",
"hidden_activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
":",
"def",
"my_fn",
"(",
"x",
")",
":",
"layer_sizes",
"=",
"[",
"input_size",
"]",
"+",
"hidden_sizes",
"+",
"[",
"output_size",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
"+",
"len",
"(",
"hidden_sizes",
")",
")",
":",
"w",
"=",
"tf",
".",
"get_variable",
"(",
"\"w_%d\"",
"%",
"i",
",",
"layer_sizes",
"[",
"i",
":",
"i",
"+",
"2",
"]",
",",
"tf",
".",
"float32",
")",
"x",
"=",
"tf",
".",
"matmul",
"(",
"x",
",",
"w",
")",
"if",
"i",
"<",
"len",
"(",
"hidden_sizes",
")",
":",
"x",
"=",
"hidden_activation",
"(",
"x",
")",
"if",
"layer_sizes",
"[",
"i",
"]",
"!=",
"input_size",
":",
"x",
"*=",
"(",
"layer_sizes",
"[",
"i",
"]",
"/",
"float",
"(",
"input_size",
")",
")",
"**",
"-",
"0.5",
"return",
"x",
"return",
"my_fn"
] |
Returns a function that creates a feed-forward network.
Use this function to create the expert_fn argument to distributed_moe.
Args:
input_size: an integer
hidden_sizes: a list of integers
output_size: an integer
hidden_activation: a unary function.
Returns:
a unary function
|
[
"Returns",
"a",
"function",
"that",
"creates",
"a",
"feed",
"-",
"forward",
"network",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L956-L983
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
flatten_all_but_last
|
def flatten_all_but_last(a):
"""Flatten all dimensions of a except the last."""
ret = tf.reshape(a, [-1, tf.shape(a)[-1]])
if not tf.executing_eagerly():
ret.set_shape([None] + a.get_shape().as_list()[-1:])
return ret
|
python
|
def flatten_all_but_last(a):
"""Flatten all dimensions of a except the last."""
ret = tf.reshape(a, [-1, tf.shape(a)[-1]])
if not tf.executing_eagerly():
ret.set_shape([None] + a.get_shape().as_list()[-1:])
return ret
|
[
"def",
"flatten_all_but_last",
"(",
"a",
")",
":",
"ret",
"=",
"tf",
".",
"reshape",
"(",
"a",
",",
"[",
"-",
"1",
",",
"tf",
".",
"shape",
"(",
"a",
")",
"[",
"-",
"1",
"]",
"]",
")",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"ret",
".",
"set_shape",
"(",
"[",
"None",
"]",
"+",
"a",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
":",
"]",
")",
"return",
"ret"
] |
Flatten all dimensions of a except the last.
|
[
"Flatten",
"all",
"dimensions",
"of",
"a",
"except",
"the",
"last",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L986-L991
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
local_moe
|
def local_moe(x,
train,
expert_fn,
num_experts,
k=1,
loss_coef=1e-2,
hparams=None,
pass_x=True,
pass_gates=False,
additional_dispatch_params=None,
name=None):
"""Call a local mixture of experts.
Args:
x: a tensors with shape [... , input_size]
train: a boolean scalar.
expert_fn: a function.
num_experts: an integer - number of experts
k: an integer - how many experts to use for each batch element
loss_coef: a scalar - multiplier on load-balancing losses
hparams: optional hparams for vq gating
pass_x: a boolean. If true, x will also be dispatched to the experts.
pass_gates: a boolean. If true, gates will be passed to experts. Might be
necessary when dealing with sparse encoder-encoder decoder attention
additional_dispatch_params: The extra tensors that need to be sent to each
expert. Examples include batch batch coordinates (see
common_attention.local_expert_attention)
name: a string
Returns:
y: a tensor. Has the same shape as x, except for the last dimension,
which is output_size.
extra_training_loss: a scalar. This should be added into the overall
training loss of the model. The backpropagation of this loss
encourages all experts to be approximately equally used across a batch.
"""
bneck = DiscreteBottleneck(hparams)
with tf.variable_scope(name, default_name="local_moe"):
centroids = None
x_flat = flatten_all_but_last(x)
if hparams.gating_type == "topk":
tf.logging.info("Using noisy top_k with k = {}".format(k))
# The gates indicate which batch elements go to which tensors.
# load is a measure of approximately how many examples go to each expert
gates, load = noisy_top_k_gating(
x_flat,
num_experts,
train,
k,
initializer=tf.zeros_initializer(),
noisy_gating=True,
noise_epsilon=1e-2)
importance = tf.reduce_sum(gates, 0)
loss = loss_coef * (cv_squared(importance) + cv_squared(load))
else:
assert hparams.gating_type == "vq"
tf.logging.info("Using VQ gating")
gates, loss, centroids = vq_gating(
x_flat, num_experts, k, bneck, hparams=hparams)
loss *= loss_coef
# Shuffle data between datashards and experts.
dispatcher = SparseDispatcher(num_experts, gates)
# Set up expert_fn arguments
expert_kwargs = {}
if pass_x:
expert_kwargs["x"] = dispatcher.dispatch(x_flat)
if pass_gates:
expert_kwargs["gates"] = dispatcher.expert_to_gates()
for key, val in six.iteritems(additional_dispatch_params or {}):
val = flatten_all_but_last(val)
expert_kwargs[key] = dispatcher.dispatch(val)
ep = Parallelism([DEFAULT_DEV_STRING] * num_experts, reuse=None)
expert_outputs = ep(expert_fn, **expert_kwargs)
y_flat = dispatcher.combine(expert_outputs)
if centroids is not None:
centroids = tf.squeeze(centroids, axis=[1, 2])
y_flat += centroids
y = common_layers.reshape_like(y_flat, x)
return y, loss
|
python
|
def local_moe(x,
train,
expert_fn,
num_experts,
k=1,
loss_coef=1e-2,
hparams=None,
pass_x=True,
pass_gates=False,
additional_dispatch_params=None,
name=None):
"""Call a local mixture of experts.
Args:
x: a tensors with shape [... , input_size]
train: a boolean scalar.
expert_fn: a function.
num_experts: an integer - number of experts
k: an integer - how many experts to use for each batch element
loss_coef: a scalar - multiplier on load-balancing losses
hparams: optional hparams for vq gating
pass_x: a boolean. If true, x will also be dispatched to the experts.
pass_gates: a boolean. If true, gates will be passed to experts. Might be
necessary when dealing with sparse encoder-encoder decoder attention
additional_dispatch_params: The extra tensors that need to be sent to each
expert. Examples include batch batch coordinates (see
common_attention.local_expert_attention)
name: a string
Returns:
y: a tensor. Has the same shape as x, except for the last dimension,
which is output_size.
extra_training_loss: a scalar. This should be added into the overall
training loss of the model. The backpropagation of this loss
encourages all experts to be approximately equally used across a batch.
"""
bneck = DiscreteBottleneck(hparams)
with tf.variable_scope(name, default_name="local_moe"):
centroids = None
x_flat = flatten_all_but_last(x)
if hparams.gating_type == "topk":
tf.logging.info("Using noisy top_k with k = {}".format(k))
# The gates indicate which batch elements go to which tensors.
# load is a measure of approximately how many examples go to each expert
gates, load = noisy_top_k_gating(
x_flat,
num_experts,
train,
k,
initializer=tf.zeros_initializer(),
noisy_gating=True,
noise_epsilon=1e-2)
importance = tf.reduce_sum(gates, 0)
loss = loss_coef * (cv_squared(importance) + cv_squared(load))
else:
assert hparams.gating_type == "vq"
tf.logging.info("Using VQ gating")
gates, loss, centroids = vq_gating(
x_flat, num_experts, k, bneck, hparams=hparams)
loss *= loss_coef
# Shuffle data between datashards and experts.
dispatcher = SparseDispatcher(num_experts, gates)
# Set up expert_fn arguments
expert_kwargs = {}
if pass_x:
expert_kwargs["x"] = dispatcher.dispatch(x_flat)
if pass_gates:
expert_kwargs["gates"] = dispatcher.expert_to_gates()
for key, val in six.iteritems(additional_dispatch_params or {}):
val = flatten_all_but_last(val)
expert_kwargs[key] = dispatcher.dispatch(val)
ep = Parallelism([DEFAULT_DEV_STRING] * num_experts, reuse=None)
expert_outputs = ep(expert_fn, **expert_kwargs)
y_flat = dispatcher.combine(expert_outputs)
if centroids is not None:
centroids = tf.squeeze(centroids, axis=[1, 2])
y_flat += centroids
y = common_layers.reshape_like(y_flat, x)
return y, loss
|
[
"def",
"local_moe",
"(",
"x",
",",
"train",
",",
"expert_fn",
",",
"num_experts",
",",
"k",
"=",
"1",
",",
"loss_coef",
"=",
"1e-2",
",",
"hparams",
"=",
"None",
",",
"pass_x",
"=",
"True",
",",
"pass_gates",
"=",
"False",
",",
"additional_dispatch_params",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"bneck",
"=",
"DiscreteBottleneck",
"(",
"hparams",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"local_moe\"",
")",
":",
"centroids",
"=",
"None",
"x_flat",
"=",
"flatten_all_but_last",
"(",
"x",
")",
"if",
"hparams",
".",
"gating_type",
"==",
"\"topk\"",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using noisy top_k with k = {}\"",
".",
"format",
"(",
"k",
")",
")",
"# The gates indicate which batch elements go to which tensors.",
"# load is a measure of approximately how many examples go to each expert",
"gates",
",",
"load",
"=",
"noisy_top_k_gating",
"(",
"x_flat",
",",
"num_experts",
",",
"train",
",",
"k",
",",
"initializer",
"=",
"tf",
".",
"zeros_initializer",
"(",
")",
",",
"noisy_gating",
"=",
"True",
",",
"noise_epsilon",
"=",
"1e-2",
")",
"importance",
"=",
"tf",
".",
"reduce_sum",
"(",
"gates",
",",
"0",
")",
"loss",
"=",
"loss_coef",
"*",
"(",
"cv_squared",
"(",
"importance",
")",
"+",
"cv_squared",
"(",
"load",
")",
")",
"else",
":",
"assert",
"hparams",
".",
"gating_type",
"==",
"\"vq\"",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using VQ gating\"",
")",
"gates",
",",
"loss",
",",
"centroids",
"=",
"vq_gating",
"(",
"x_flat",
",",
"num_experts",
",",
"k",
",",
"bneck",
",",
"hparams",
"=",
"hparams",
")",
"loss",
"*=",
"loss_coef",
"# Shuffle data between datashards and experts.",
"dispatcher",
"=",
"SparseDispatcher",
"(",
"num_experts",
",",
"gates",
")",
"# Set up expert_fn arguments",
"expert_kwargs",
"=",
"{",
"}",
"if",
"pass_x",
":",
"expert_kwargs",
"[",
"\"x\"",
"]",
"=",
"dispatcher",
".",
"dispatch",
"(",
"x_flat",
")",
"if",
"pass_gates",
":",
"expert_kwargs",
"[",
"\"gates\"",
"]",
"=",
"dispatcher",
".",
"expert_to_gates",
"(",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"additional_dispatch_params",
"or",
"{",
"}",
")",
":",
"val",
"=",
"flatten_all_but_last",
"(",
"val",
")",
"expert_kwargs",
"[",
"key",
"]",
"=",
"dispatcher",
".",
"dispatch",
"(",
"val",
")",
"ep",
"=",
"Parallelism",
"(",
"[",
"DEFAULT_DEV_STRING",
"]",
"*",
"num_experts",
",",
"reuse",
"=",
"None",
")",
"expert_outputs",
"=",
"ep",
"(",
"expert_fn",
",",
"*",
"*",
"expert_kwargs",
")",
"y_flat",
"=",
"dispatcher",
".",
"combine",
"(",
"expert_outputs",
")",
"if",
"centroids",
"is",
"not",
"None",
":",
"centroids",
"=",
"tf",
".",
"squeeze",
"(",
"centroids",
",",
"axis",
"=",
"[",
"1",
",",
"2",
"]",
")",
"y_flat",
"+=",
"centroids",
"y",
"=",
"common_layers",
".",
"reshape_like",
"(",
"y_flat",
",",
"x",
")",
"return",
"y",
",",
"loss"
] |
Call a local mixture of experts.
Args:
x: a tensors with shape [... , input_size]
train: a boolean scalar.
expert_fn: a function.
num_experts: an integer - number of experts
k: an integer - how many experts to use for each batch element
loss_coef: a scalar - multiplier on load-balancing losses
hparams: optional hparams for vq gating
pass_x: a boolean. If true, x will also be dispatched to the experts.
pass_gates: a boolean. If true, gates will be passed to experts. Might be
necessary when dealing with sparse encoder-encoder decoder attention
additional_dispatch_params: The extra tensors that need to be sent to each
expert. Examples include batch batch coordinates (see
common_attention.local_expert_attention)
name: a string
Returns:
y: a tensor. Has the same shape as x, except for the last dimension,
which is output_size.
extra_training_loss: a scalar. This should be added into the overall
training loss of the model. The backpropagation of this loss
encourages all experts to be approximately equally used across a batch.
|
[
"Call",
"a",
"local",
"mixture",
"of",
"experts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L994-L1074
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
local_moe_tpu
|
def local_moe_tpu(inputs,
hidden_size,
output_size,
num_experts,
loss_coef=1e-3,
overhead=1.0):
"""Local mixture of experts that works well on TPU.
See https://arxiv.org/abs/1701.06538
There are num_experts expert networks, each containing a relu-activated
hidden layer of size hidden_size, followed by an output projection.
The number of parameters is thus:
num_experts * (input_size * hidden_size + hidden_size * output_size)
The input is 3d: [batch, length, depth], consisting of the representations
of all positions in a batch of sequences.
Each position of each sequence is sent to 0-2 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
as opposed to on individual sequences. This would allow more freedom
for individual sequences to be unbalanced. Unfortunately, that would
slow down our hacked-up gather-by-matmul implementation.
TODO(noam): There is no real reason for a single sequence to be the unit
of equal allocation. Reshaping the inputs would allow us to pick a
different unit of equal allocation.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves. We also want to integrate this
gating/dispatching logic into multi-device mixtures-of-experts.
Args:
inputs: a Tensor with shape [batch, length, depth]
hidden_size: an integer
output_size: an integer
num_experts: an integer
loss_coef: a float scalar
overhead: multiplicative factor of how much spare capacity to assign
Returns:
outputs: a Tensor with shape [batch, length, output_size]
loss: a scalar
"""
batch, length, input_size = common_layers.shape_list(inputs)[:]
# Each sequence sends expert_capacity positions to each expert.
if isinstance(length, int):
expert_capacity = min(
length, int((length * 2 * overhead) / num_experts))
else:
expert_capacity = tf.minimum(
length, tf.to_int32(
tf.to_float(length) * 2 * overhead / num_experts))
expert_capacity_f = tf.to_float(expert_capacity)
# This is the learned gating function.
gates = tf.nn.softmax(
tf.to_float(common_layers.dense(inputs, num_experts, name="logits")))
# Find the top expert for each position.
gate_1, index_1 = common_layers.top_1_tpu(gates)
# [batch, length, num_experts]
mask_1 = tf.one_hot(index_1, num_experts)
# [batch, length, num_experts]
# This is the position within the expert's mini-batch for this sequence
position_in_expert_1 = common_layers.cumsum(
mask_1, axis=1, exclusive=True) * mask_1
# Remove the elements that don't fit.
mask_1 *= tf.to_float(tf.less(position_in_expert_1, expert_capacity_f))
# [batch, 1, num_experts]
# How many examples in this sequence go to this expert
mask_1_count = tf.reduce_sum(mask_1, axis=1, keepdims=True)
# [batch, length] - mostly ones, but zeros where something didn't fit
mask_1_flat = tf.reduce_sum(mask_1, axis=2)
position_in_expert_1 = tf.reduce_sum(position_in_expert_1, axis=2)
# Weight assigned to first expert.
gate_1 *= mask_1_flat
# Pick a second-place expert for each position.
# We first mask out the experts that we expect to be over-capacity
space_remaining = expert_capacity_f - mask_1_count
use_rate = (mask_1_count + 1.0) / tf.to_float(length)
# At what point in the sequence do we expect the expert to be full.
expected_exhaustion_pos = space_remaining / use_rate
# A Tensor with shape [batch, length, num_experts] representing a boolean
# - whether we expect that the expert will already be full.
expected_exhausted = tf.to_float(tf.greater(
tf.reshape(tf.to_float(tf.range(length)), [1, length, 1]),
expected_exhaustion_pos))
masked_gates = gates - mask_1 - expected_exhausted
# This section is similar to the section above.
gate_2, index_2 = common_layers.top_1_tpu(masked_gates)
# [batch, length, num_experts]
mask_2 = tf.one_hot(index_2, num_experts)
position_in_expert_2 = (
common_layers.cumsum(mask_2, axis=1, exclusive=True) + mask_1_count)
position_in_expert_2 *= mask_2
mask_2 *= tf.to_float(tf.less(position_in_expert_2, expert_capacity_f))
mask_2_count = tf.reduce_sum(mask_2, axis=1, keepdims=True)
mask_2_flat = tf.reduce_sum(mask_2, axis=2)
position_in_expert_2 = tf.reduce_sum(position_in_expert_2, axis=2)
gate_2 *= mask_2_flat
# What fraction didn't fit - show summaries
miss_rate_1 = 1.0 - tf.reduce_sum(mask_1_count) / tf.to_float(batch * length)
miss_rate_2 = 1.0 - tf.reduce_sum(mask_2_count) / tf.to_float(batch * length)
tf.summary.scalar("miss_rate_1", miss_rate_1)
tf.summary.scalar("miss_rate_2", miss_rate_2)
# renormalize the two gate values to add up to 1
denom = gate_1 + gate_2 + 1e-9
gate_1 /= denom
gate_2 /= denom
# inputs: [batch, length, input_size]
# forward_assignment: [batch, length, num_experts * expert_capacity]
# expert_inputs: [batch, num_experts * expert_capacity, input_size]
segment_ids_forward_1 = (
(index_1 * expert_capacity) +
tf.to_int32(position_in_expert_1) +
tf.to_int32(1.0 - mask_1_flat) * (num_experts * expert_capacity))
segment_ids_forward_2 = (
(index_2 * expert_capacity) +
tf.to_int32(position_in_expert_2) +
tf.to_int32(1.0 - mask_2_flat) * (num_experts * expert_capacity))
# Gather and scatter are painfully slow on TPU.
# We will use one_hot and matmul instead.
# [batch, length, num_experts * expert_capacity]
one_hot_1 = tf.one_hot(
segment_ids_forward_1, num_experts * expert_capacity, dtype=inputs.dtype)
one_hot_2 = tf.one_hot(
segment_ids_forward_2, num_experts * expert_capacity, dtype=inputs.dtype)
forward_assignment = (one_hot_1 + one_hot_2)
# [batch, num_experts * expert_capacity, input_size]
expert_inputs = tf.matmul(forward_assignment, inputs, transpose_a=True)
# [batch, num_experts, expert_capacity, input_size]
expert_inputs = tf.reshape(
expert_inputs, [batch, num_experts, expert_capacity, input_size])
# [num_experts, batch, expert_capacity, input_size]
expert_inputs = tf.transpose(expert_inputs, [1, 0, 2, 3])
# [num_experts, batch * expert_capacity, input_size]
expert_inputs = tf.reshape(
expert_inputs, [num_experts, batch * expert_capacity, input_size])
# Now feed the expert inputs through the experts.
h = common_layers.batch_dense(
expert_inputs, hidden_size, activation=tf.nn.relu, name="x0")
expert_output = common_layers.batch_dense(h, output_size, name="x1")
expert_output = tf.reshape(
expert_output, [num_experts, batch, expert_capacity, output_size])
# [batch, num_experts, expert_capacity, output_size]
expert_output = tf.transpose(expert_output, [1, 0, 2, 3])
expert_output = tf.reshape(
expert_output, [batch, num_experts * expert_capacity, output_size])
# Again, use matmul instead of unsorted_segment_sum. This time, we need
# to multiply by the combination weights gate_1 and gate_2.
# expert_output: [batch, num_experts * expert_capacity, output_size]
# backward_assigmnent: [batch, length, num_experts * expert_capacity]
# output: [batch, length, output_size]
backward_assigmnent = (
one_hot_1 * tf.cast(tf.expand_dims(gate_1, 2), inputs.dtype) +
one_hot_2 * tf.cast(tf.expand_dims(gate_2, 2), inputs.dtype))
output = tf.matmul(backward_assigmnent, expert_output)
# Compute a loss equal to the coefficient ov variation of the
# total gate value per expert per sequence.
# This loss causes the experts to be used about equally used per sequence.
importance = tf.reduce_sum(gates * (mask_1 + mask_2), 1)
loss = loss_coef * cv_squared(importance)
return output, loss
|
python
|
def local_moe_tpu(inputs,
hidden_size,
output_size,
num_experts,
loss_coef=1e-3,
overhead=1.0):
"""Local mixture of experts that works well on TPU.
See https://arxiv.org/abs/1701.06538
There are num_experts expert networks, each containing a relu-activated
hidden layer of size hidden_size, followed by an output projection.
The number of parameters is thus:
num_experts * (input_size * hidden_size + hidden_size * output_size)
The input is 3d: [batch, length, depth], consisting of the representations
of all positions in a batch of sequences.
Each position of each sequence is sent to 0-2 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
as opposed to on individual sequences. This would allow more freedom
for individual sequences to be unbalanced. Unfortunately, that would
slow down our hacked-up gather-by-matmul implementation.
TODO(noam): There is no real reason for a single sequence to be the unit
of equal allocation. Reshaping the inputs would allow us to pick a
different unit of equal allocation.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves. We also want to integrate this
gating/dispatching logic into multi-device mixtures-of-experts.
Args:
inputs: a Tensor with shape [batch, length, depth]
hidden_size: an integer
output_size: an integer
num_experts: an integer
loss_coef: a float scalar
overhead: multiplicative factor of how much spare capacity to assign
Returns:
outputs: a Tensor with shape [batch, length, output_size]
loss: a scalar
"""
batch, length, input_size = common_layers.shape_list(inputs)[:]
# Each sequence sends expert_capacity positions to each expert.
if isinstance(length, int):
expert_capacity = min(
length, int((length * 2 * overhead) / num_experts))
else:
expert_capacity = tf.minimum(
length, tf.to_int32(
tf.to_float(length) * 2 * overhead / num_experts))
expert_capacity_f = tf.to_float(expert_capacity)
# This is the learned gating function.
gates = tf.nn.softmax(
tf.to_float(common_layers.dense(inputs, num_experts, name="logits")))
# Find the top expert for each position.
gate_1, index_1 = common_layers.top_1_tpu(gates)
# [batch, length, num_experts]
mask_1 = tf.one_hot(index_1, num_experts)
# [batch, length, num_experts]
# This is the position within the expert's mini-batch for this sequence
position_in_expert_1 = common_layers.cumsum(
mask_1, axis=1, exclusive=True) * mask_1
# Remove the elements that don't fit.
mask_1 *= tf.to_float(tf.less(position_in_expert_1, expert_capacity_f))
# [batch, 1, num_experts]
# How many examples in this sequence go to this expert
mask_1_count = tf.reduce_sum(mask_1, axis=1, keepdims=True)
# [batch, length] - mostly ones, but zeros where something didn't fit
mask_1_flat = tf.reduce_sum(mask_1, axis=2)
position_in_expert_1 = tf.reduce_sum(position_in_expert_1, axis=2)
# Weight assigned to first expert.
gate_1 *= mask_1_flat
# Pick a second-place expert for each position.
# We first mask out the experts that we expect to be over-capacity
space_remaining = expert_capacity_f - mask_1_count
use_rate = (mask_1_count + 1.0) / tf.to_float(length)
# At what point in the sequence do we expect the expert to be full.
expected_exhaustion_pos = space_remaining / use_rate
# A Tensor with shape [batch, length, num_experts] representing a boolean
# - whether we expect that the expert will already be full.
expected_exhausted = tf.to_float(tf.greater(
tf.reshape(tf.to_float(tf.range(length)), [1, length, 1]),
expected_exhaustion_pos))
masked_gates = gates - mask_1 - expected_exhausted
# This section is similar to the section above.
gate_2, index_2 = common_layers.top_1_tpu(masked_gates)
# [batch, length, num_experts]
mask_2 = tf.one_hot(index_2, num_experts)
position_in_expert_2 = (
common_layers.cumsum(mask_2, axis=1, exclusive=True) + mask_1_count)
position_in_expert_2 *= mask_2
mask_2 *= tf.to_float(tf.less(position_in_expert_2, expert_capacity_f))
mask_2_count = tf.reduce_sum(mask_2, axis=1, keepdims=True)
mask_2_flat = tf.reduce_sum(mask_2, axis=2)
position_in_expert_2 = tf.reduce_sum(position_in_expert_2, axis=2)
gate_2 *= mask_2_flat
# What fraction didn't fit - show summaries
miss_rate_1 = 1.0 - tf.reduce_sum(mask_1_count) / tf.to_float(batch * length)
miss_rate_2 = 1.0 - tf.reduce_sum(mask_2_count) / tf.to_float(batch * length)
tf.summary.scalar("miss_rate_1", miss_rate_1)
tf.summary.scalar("miss_rate_2", miss_rate_2)
# renormalize the two gate values to add up to 1
denom = gate_1 + gate_2 + 1e-9
gate_1 /= denom
gate_2 /= denom
# inputs: [batch, length, input_size]
# forward_assignment: [batch, length, num_experts * expert_capacity]
# expert_inputs: [batch, num_experts * expert_capacity, input_size]
segment_ids_forward_1 = (
(index_1 * expert_capacity) +
tf.to_int32(position_in_expert_1) +
tf.to_int32(1.0 - mask_1_flat) * (num_experts * expert_capacity))
segment_ids_forward_2 = (
(index_2 * expert_capacity) +
tf.to_int32(position_in_expert_2) +
tf.to_int32(1.0 - mask_2_flat) * (num_experts * expert_capacity))
# Gather and scatter are painfully slow on TPU.
# We will use one_hot and matmul instead.
# [batch, length, num_experts * expert_capacity]
one_hot_1 = tf.one_hot(
segment_ids_forward_1, num_experts * expert_capacity, dtype=inputs.dtype)
one_hot_2 = tf.one_hot(
segment_ids_forward_2, num_experts * expert_capacity, dtype=inputs.dtype)
forward_assignment = (one_hot_1 + one_hot_2)
# [batch, num_experts * expert_capacity, input_size]
expert_inputs = tf.matmul(forward_assignment, inputs, transpose_a=True)
# [batch, num_experts, expert_capacity, input_size]
expert_inputs = tf.reshape(
expert_inputs, [batch, num_experts, expert_capacity, input_size])
# [num_experts, batch, expert_capacity, input_size]
expert_inputs = tf.transpose(expert_inputs, [1, 0, 2, 3])
# [num_experts, batch * expert_capacity, input_size]
expert_inputs = tf.reshape(
expert_inputs, [num_experts, batch * expert_capacity, input_size])
# Now feed the expert inputs through the experts.
h = common_layers.batch_dense(
expert_inputs, hidden_size, activation=tf.nn.relu, name="x0")
expert_output = common_layers.batch_dense(h, output_size, name="x1")
expert_output = tf.reshape(
expert_output, [num_experts, batch, expert_capacity, output_size])
# [batch, num_experts, expert_capacity, output_size]
expert_output = tf.transpose(expert_output, [1, 0, 2, 3])
expert_output = tf.reshape(
expert_output, [batch, num_experts * expert_capacity, output_size])
# Again, use matmul instead of unsorted_segment_sum. This time, we need
# to multiply by the combination weights gate_1 and gate_2.
# expert_output: [batch, num_experts * expert_capacity, output_size]
# backward_assigmnent: [batch, length, num_experts * expert_capacity]
# output: [batch, length, output_size]
backward_assigmnent = (
one_hot_1 * tf.cast(tf.expand_dims(gate_1, 2), inputs.dtype) +
one_hot_2 * tf.cast(tf.expand_dims(gate_2, 2), inputs.dtype))
output = tf.matmul(backward_assigmnent, expert_output)
# Compute a loss equal to the coefficient ov variation of the
# total gate value per expert per sequence.
# This loss causes the experts to be used about equally used per sequence.
importance = tf.reduce_sum(gates * (mask_1 + mask_2), 1)
loss = loss_coef * cv_squared(importance)
return output, loss
|
[
"def",
"local_moe_tpu",
"(",
"inputs",
",",
"hidden_size",
",",
"output_size",
",",
"num_experts",
",",
"loss_coef",
"=",
"1e-3",
",",
"overhead",
"=",
"1.0",
")",
":",
"batch",
",",
"length",
",",
"input_size",
"=",
"common_layers",
".",
"shape_list",
"(",
"inputs",
")",
"[",
":",
"]",
"# Each sequence sends expert_capacity positions to each expert.",
"if",
"isinstance",
"(",
"length",
",",
"int",
")",
":",
"expert_capacity",
"=",
"min",
"(",
"length",
",",
"int",
"(",
"(",
"length",
"*",
"2",
"*",
"overhead",
")",
"/",
"num_experts",
")",
")",
"else",
":",
"expert_capacity",
"=",
"tf",
".",
"minimum",
"(",
"length",
",",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"to_float",
"(",
"length",
")",
"*",
"2",
"*",
"overhead",
"/",
"num_experts",
")",
")",
"expert_capacity_f",
"=",
"tf",
".",
"to_float",
"(",
"expert_capacity",
")",
"# This is the learned gating function.",
"gates",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"tf",
".",
"to_float",
"(",
"common_layers",
".",
"dense",
"(",
"inputs",
",",
"num_experts",
",",
"name",
"=",
"\"logits\"",
")",
")",
")",
"# Find the top expert for each position.",
"gate_1",
",",
"index_1",
"=",
"common_layers",
".",
"top_1_tpu",
"(",
"gates",
")",
"# [batch, length, num_experts]",
"mask_1",
"=",
"tf",
".",
"one_hot",
"(",
"index_1",
",",
"num_experts",
")",
"# [batch, length, num_experts]",
"# This is the position within the expert's mini-batch for this sequence",
"position_in_expert_1",
"=",
"common_layers",
".",
"cumsum",
"(",
"mask_1",
",",
"axis",
"=",
"1",
",",
"exclusive",
"=",
"True",
")",
"*",
"mask_1",
"# Remove the elements that don't fit.",
"mask_1",
"*=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"less",
"(",
"position_in_expert_1",
",",
"expert_capacity_f",
")",
")",
"# [batch, 1, num_experts]",
"# How many examples in this sequence go to this expert",
"mask_1_count",
"=",
"tf",
".",
"reduce_sum",
"(",
"mask_1",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"# [batch, length] - mostly ones, but zeros where something didn't fit",
"mask_1_flat",
"=",
"tf",
".",
"reduce_sum",
"(",
"mask_1",
",",
"axis",
"=",
"2",
")",
"position_in_expert_1",
"=",
"tf",
".",
"reduce_sum",
"(",
"position_in_expert_1",
",",
"axis",
"=",
"2",
")",
"# Weight assigned to first expert.",
"gate_1",
"*=",
"mask_1_flat",
"# Pick a second-place expert for each position.",
"# We first mask out the experts that we expect to be over-capacity",
"space_remaining",
"=",
"expert_capacity_f",
"-",
"mask_1_count",
"use_rate",
"=",
"(",
"mask_1_count",
"+",
"1.0",
")",
"/",
"tf",
".",
"to_float",
"(",
"length",
")",
"# At what point in the sequence do we expect the expert to be full.",
"expected_exhaustion_pos",
"=",
"space_remaining",
"/",
"use_rate",
"# A Tensor with shape [batch, length, num_experts] representing a boolean",
"# - whether we expect that the expert will already be full.",
"expected_exhausted",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"greater",
"(",
"tf",
".",
"reshape",
"(",
"tf",
".",
"to_float",
"(",
"tf",
".",
"range",
"(",
"length",
")",
")",
",",
"[",
"1",
",",
"length",
",",
"1",
"]",
")",
",",
"expected_exhaustion_pos",
")",
")",
"masked_gates",
"=",
"gates",
"-",
"mask_1",
"-",
"expected_exhausted",
"# This section is similar to the section above.",
"gate_2",
",",
"index_2",
"=",
"common_layers",
".",
"top_1_tpu",
"(",
"masked_gates",
")",
"# [batch, length, num_experts]",
"mask_2",
"=",
"tf",
".",
"one_hot",
"(",
"index_2",
",",
"num_experts",
")",
"position_in_expert_2",
"=",
"(",
"common_layers",
".",
"cumsum",
"(",
"mask_2",
",",
"axis",
"=",
"1",
",",
"exclusive",
"=",
"True",
")",
"+",
"mask_1_count",
")",
"position_in_expert_2",
"*=",
"mask_2",
"mask_2",
"*=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"less",
"(",
"position_in_expert_2",
",",
"expert_capacity_f",
")",
")",
"mask_2_count",
"=",
"tf",
".",
"reduce_sum",
"(",
"mask_2",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"mask_2_flat",
"=",
"tf",
".",
"reduce_sum",
"(",
"mask_2",
",",
"axis",
"=",
"2",
")",
"position_in_expert_2",
"=",
"tf",
".",
"reduce_sum",
"(",
"position_in_expert_2",
",",
"axis",
"=",
"2",
")",
"gate_2",
"*=",
"mask_2_flat",
"# What fraction didn't fit - show summaries",
"miss_rate_1",
"=",
"1.0",
"-",
"tf",
".",
"reduce_sum",
"(",
"mask_1_count",
")",
"/",
"tf",
".",
"to_float",
"(",
"batch",
"*",
"length",
")",
"miss_rate_2",
"=",
"1.0",
"-",
"tf",
".",
"reduce_sum",
"(",
"mask_2_count",
")",
"/",
"tf",
".",
"to_float",
"(",
"batch",
"*",
"length",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"miss_rate_1\"",
",",
"miss_rate_1",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"miss_rate_2\"",
",",
"miss_rate_2",
")",
"# renormalize the two gate values to add up to 1",
"denom",
"=",
"gate_1",
"+",
"gate_2",
"+",
"1e-9",
"gate_1",
"/=",
"denom",
"gate_2",
"/=",
"denom",
"# inputs: [batch, length, input_size]",
"# forward_assignment: [batch, length, num_experts * expert_capacity]",
"# expert_inputs: [batch, num_experts * expert_capacity, input_size]",
"segment_ids_forward_1",
"=",
"(",
"(",
"index_1",
"*",
"expert_capacity",
")",
"+",
"tf",
".",
"to_int32",
"(",
"position_in_expert_1",
")",
"+",
"tf",
".",
"to_int32",
"(",
"1.0",
"-",
"mask_1_flat",
")",
"*",
"(",
"num_experts",
"*",
"expert_capacity",
")",
")",
"segment_ids_forward_2",
"=",
"(",
"(",
"index_2",
"*",
"expert_capacity",
")",
"+",
"tf",
".",
"to_int32",
"(",
"position_in_expert_2",
")",
"+",
"tf",
".",
"to_int32",
"(",
"1.0",
"-",
"mask_2_flat",
")",
"*",
"(",
"num_experts",
"*",
"expert_capacity",
")",
")",
"# Gather and scatter are painfully slow on TPU.",
"# We will use one_hot and matmul instead.",
"# [batch, length, num_experts * expert_capacity]",
"one_hot_1",
"=",
"tf",
".",
"one_hot",
"(",
"segment_ids_forward_1",
",",
"num_experts",
"*",
"expert_capacity",
",",
"dtype",
"=",
"inputs",
".",
"dtype",
")",
"one_hot_2",
"=",
"tf",
".",
"one_hot",
"(",
"segment_ids_forward_2",
",",
"num_experts",
"*",
"expert_capacity",
",",
"dtype",
"=",
"inputs",
".",
"dtype",
")",
"forward_assignment",
"=",
"(",
"one_hot_1",
"+",
"one_hot_2",
")",
"# [batch, num_experts * expert_capacity, input_size]",
"expert_inputs",
"=",
"tf",
".",
"matmul",
"(",
"forward_assignment",
",",
"inputs",
",",
"transpose_a",
"=",
"True",
")",
"# [batch, num_experts, expert_capacity, input_size]",
"expert_inputs",
"=",
"tf",
".",
"reshape",
"(",
"expert_inputs",
",",
"[",
"batch",
",",
"num_experts",
",",
"expert_capacity",
",",
"input_size",
"]",
")",
"# [num_experts, batch, expert_capacity, input_size]",
"expert_inputs",
"=",
"tf",
".",
"transpose",
"(",
"expert_inputs",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"# [num_experts, batch * expert_capacity, input_size]",
"expert_inputs",
"=",
"tf",
".",
"reshape",
"(",
"expert_inputs",
",",
"[",
"num_experts",
",",
"batch",
"*",
"expert_capacity",
",",
"input_size",
"]",
")",
"# Now feed the expert inputs through the experts.",
"h",
"=",
"common_layers",
".",
"batch_dense",
"(",
"expert_inputs",
",",
"hidden_size",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"\"x0\"",
")",
"expert_output",
"=",
"common_layers",
".",
"batch_dense",
"(",
"h",
",",
"output_size",
",",
"name",
"=",
"\"x1\"",
")",
"expert_output",
"=",
"tf",
".",
"reshape",
"(",
"expert_output",
",",
"[",
"num_experts",
",",
"batch",
",",
"expert_capacity",
",",
"output_size",
"]",
")",
"# [batch, num_experts, expert_capacity, output_size]",
"expert_output",
"=",
"tf",
".",
"transpose",
"(",
"expert_output",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"expert_output",
"=",
"tf",
".",
"reshape",
"(",
"expert_output",
",",
"[",
"batch",
",",
"num_experts",
"*",
"expert_capacity",
",",
"output_size",
"]",
")",
"# Again, use matmul instead of unsorted_segment_sum. This time, we need",
"# to multiply by the combination weights gate_1 and gate_2.",
"# expert_output: [batch, num_experts * expert_capacity, output_size]",
"# backward_assigmnent: [batch, length, num_experts * expert_capacity]",
"# output: [batch, length, output_size]",
"backward_assigmnent",
"=",
"(",
"one_hot_1",
"*",
"tf",
".",
"cast",
"(",
"tf",
".",
"expand_dims",
"(",
"gate_1",
",",
"2",
")",
",",
"inputs",
".",
"dtype",
")",
"+",
"one_hot_2",
"*",
"tf",
".",
"cast",
"(",
"tf",
".",
"expand_dims",
"(",
"gate_2",
",",
"2",
")",
",",
"inputs",
".",
"dtype",
")",
")",
"output",
"=",
"tf",
".",
"matmul",
"(",
"backward_assigmnent",
",",
"expert_output",
")",
"# Compute a loss equal to the coefficient ov variation of the",
"# total gate value per expert per sequence.",
"# This loss causes the experts to be used about equally used per sequence.",
"importance",
"=",
"tf",
".",
"reduce_sum",
"(",
"gates",
"*",
"(",
"mask_1",
"+",
"mask_2",
")",
",",
"1",
")",
"loss",
"=",
"loss_coef",
"*",
"cv_squared",
"(",
"importance",
")",
"return",
"output",
",",
"loss"
] |
Local mixture of experts that works well on TPU.
See https://arxiv.org/abs/1701.06538
There are num_experts expert networks, each containing a relu-activated
hidden layer of size hidden_size, followed by an output projection.
The number of parameters is thus:
num_experts * (input_size * hidden_size + hidden_size * output_size)
The input is 3d: [batch, length, depth], consisting of the representations
of all positions in a batch of sequences.
Each position of each sequence is sent to 0-2 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
as opposed to on individual sequences. This would allow more freedom
for individual sequences to be unbalanced. Unfortunately, that would
slow down our hacked-up gather-by-matmul implementation.
TODO(noam): There is no real reason for a single sequence to be the unit
of equal allocation. Reshaping the inputs would allow us to pick a
different unit of equal allocation.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves. We also want to integrate this
gating/dispatching logic into multi-device mixtures-of-experts.
Args:
inputs: a Tensor with shape [batch, length, depth]
hidden_size: an integer
output_size: an integer
num_experts: an integer
loss_coef: a float scalar
overhead: multiplicative factor of how much spare capacity to assign
Returns:
outputs: a Tensor with shape [batch, length, output_size]
loss: a scalar
|
[
"Local",
"mixture",
"of",
"experts",
"that",
"works",
"well",
"on",
"TPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1217-L1411
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
reduce_by_device
|
def reduce_by_device(parallelism, data, reduce_fn):
"""Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device.
"""
unique_devices = []
device_to_data = {}
for dev, datum in zip(parallelism.devices, data):
if dev not in device_to_data:
unique_devices.append(dev)
device_to_data[dev] = [datum]
else:
device_to_data[dev].append(datum)
device_parallelism = Parallelism(unique_devices)
grouped_data = [device_to_data[dev] for dev in unique_devices]
return device_parallelism, device_parallelism(reduce_fn, grouped_data)
|
python
|
def reduce_by_device(parallelism, data, reduce_fn):
"""Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device.
"""
unique_devices = []
device_to_data = {}
for dev, datum in zip(parallelism.devices, data):
if dev not in device_to_data:
unique_devices.append(dev)
device_to_data[dev] = [datum]
else:
device_to_data[dev].append(datum)
device_parallelism = Parallelism(unique_devices)
grouped_data = [device_to_data[dev] for dev in unique_devices]
return device_parallelism, device_parallelism(reduce_fn, grouped_data)
|
[
"def",
"reduce_by_device",
"(",
"parallelism",
",",
"data",
",",
"reduce_fn",
")",
":",
"unique_devices",
"=",
"[",
"]",
"device_to_data",
"=",
"{",
"}",
"for",
"dev",
",",
"datum",
"in",
"zip",
"(",
"parallelism",
".",
"devices",
",",
"data",
")",
":",
"if",
"dev",
"not",
"in",
"device_to_data",
":",
"unique_devices",
".",
"append",
"(",
"dev",
")",
"device_to_data",
"[",
"dev",
"]",
"=",
"[",
"datum",
"]",
"else",
":",
"device_to_data",
"[",
"dev",
"]",
".",
"append",
"(",
"datum",
")",
"device_parallelism",
"=",
"Parallelism",
"(",
"unique_devices",
")",
"grouped_data",
"=",
"[",
"device_to_data",
"[",
"dev",
"]",
"for",
"dev",
"in",
"unique_devices",
"]",
"return",
"device_parallelism",
",",
"device_parallelism",
"(",
"reduce_fn",
",",
"grouped_data",
")"
] |
Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device.
|
[
"Reduces",
"data",
"per",
"device",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1414-L1443
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
expand_by_device
|
def expand_by_device(original_parallelism, device_parallelism, data):
"""Opposite of reduce_by_device().
Args:
original_parallelism: a expert_utils.Parallelism object.
device_parallelism: a expert_utils.Parallelism object.
data: a list of tensors with length device_parallelism.n
Returns:
a list of Tensors with length original_parallelism.n
"""
device_to_datum = {
device_parallelism.devices[i]: data[i]
for i in range(device_parallelism.n)}
return [device_to_datum[d] for d in original_parallelism.devices]
|
python
|
def expand_by_device(original_parallelism, device_parallelism, data):
"""Opposite of reduce_by_device().
Args:
original_parallelism: a expert_utils.Parallelism object.
device_parallelism: a expert_utils.Parallelism object.
data: a list of tensors with length device_parallelism.n
Returns:
a list of Tensors with length original_parallelism.n
"""
device_to_datum = {
device_parallelism.devices[i]: data[i]
for i in range(device_parallelism.n)}
return [device_to_datum[d] for d in original_parallelism.devices]
|
[
"def",
"expand_by_device",
"(",
"original_parallelism",
",",
"device_parallelism",
",",
"data",
")",
":",
"device_to_datum",
"=",
"{",
"device_parallelism",
".",
"devices",
"[",
"i",
"]",
":",
"data",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"device_parallelism",
".",
"n",
")",
"}",
"return",
"[",
"device_to_datum",
"[",
"d",
"]",
"for",
"d",
"in",
"original_parallelism",
".",
"devices",
"]"
] |
Opposite of reduce_by_device().
Args:
original_parallelism: a expert_utils.Parallelism object.
device_parallelism: a expert_utils.Parallelism object.
data: a list of tensors with length device_parallelism.n
Returns:
a list of Tensors with length original_parallelism.n
|
[
"Opposite",
"of",
"reduce_by_device",
"()",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1446-L1460
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
all_reduce_ring
|
def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True):
"""Compute the sum of all Tensors and put the result everywhere.
Assumes that the devices are connected in a ring.
Args:
x: a list of Tensors with length parallelism.n
parallelism: a expert_utils.Parallelism object.
maybe_reduce: a boolean - first reduce per device.
use_bfloat16: a boolean - saves bandwidth but loses precision
Returns:
a list of Tensors with length parallelism.n
"""
if parallelism.n == 1:
return x
if maybe_reduce:
original_parallelism = parallelism
parallelism, x = reduce_by_device(parallelism, x, tf.add_n)
if parallelism.n == 1:
y = x
else:
# first shard the input:
x_flat = parallelism(tf.reshape, x, [[-1]] * parallelism.n)
# [device, shard]
x_split = parallelism(
common_layers.approximate_split, x_flat, parallelism.n, 0)
def _step(source_replica, target_replica, x_split, op="plus_eq"):
"""Helper function - one step of summing or copying.
If op == "plus_eq", then adds source_replica into target_replica
If op == "copy", then copies source_replica onto target_replica
These operations happen for all shards. The replica numbers are offset
by the shard numbers to keep all physical links busy.
Args:
source_replica: an integer
target_replica: an integer
x_split: a list of lists of tensors
op: a string
"""
for shard in range(parallelism.n):
source_device = (shard + source_replica) % parallelism.n
target_device = (shard + target_replica) % parallelism.n
source = x_split[source_device][shard]
if use_bfloat16:
with tf.device(parallelism.devices[source_device]):
source = tf.to_bfloat16(source)
with tf.device(parallelism.devices[target_device]):
source = tf.to_float(source)
if op == "plus_eq":
x_split[target_device][shard] += source
else:
assert op == "copy"
x_split[target_device][shard] = tf.identity(source)
center = parallelism.n // 2
# accumulate everything towards the center.
for i in reversed(range(center, parallelism.n - 1)):
_step(i + 1, i, x_split, op="plus_eq")
for i in range(center):
_step(i, i + 1, x_split, op="plus_eq")
# copy everything away from the center.
for i in range(center, parallelism.n - 1):
_step(i, i + 1, x_split, op="copy")
for i in reversed(range(center)):
_step(i + 1, i, x_split, op="copy")
x_concat = parallelism(tf.concat, x_split, 0)
y = parallelism(common_layers.reshape_like_all_dims, x_concat, x)
if maybe_reduce:
y = expand_by_device(original_parallelism, parallelism, y)
return y
|
python
|
def all_reduce_ring(x, parallelism, maybe_reduce=True, use_bfloat16=True):
"""Compute the sum of all Tensors and put the result everywhere.
Assumes that the devices are connected in a ring.
Args:
x: a list of Tensors with length parallelism.n
parallelism: a expert_utils.Parallelism object.
maybe_reduce: a boolean - first reduce per device.
use_bfloat16: a boolean - saves bandwidth but loses precision
Returns:
a list of Tensors with length parallelism.n
"""
if parallelism.n == 1:
return x
if maybe_reduce:
original_parallelism = parallelism
parallelism, x = reduce_by_device(parallelism, x, tf.add_n)
if parallelism.n == 1:
y = x
else:
# first shard the input:
x_flat = parallelism(tf.reshape, x, [[-1]] * parallelism.n)
# [device, shard]
x_split = parallelism(
common_layers.approximate_split, x_flat, parallelism.n, 0)
def _step(source_replica, target_replica, x_split, op="plus_eq"):
"""Helper function - one step of summing or copying.
If op == "plus_eq", then adds source_replica into target_replica
If op == "copy", then copies source_replica onto target_replica
These operations happen for all shards. The replica numbers are offset
by the shard numbers to keep all physical links busy.
Args:
source_replica: an integer
target_replica: an integer
x_split: a list of lists of tensors
op: a string
"""
for shard in range(parallelism.n):
source_device = (shard + source_replica) % parallelism.n
target_device = (shard + target_replica) % parallelism.n
source = x_split[source_device][shard]
if use_bfloat16:
with tf.device(parallelism.devices[source_device]):
source = tf.to_bfloat16(source)
with tf.device(parallelism.devices[target_device]):
source = tf.to_float(source)
if op == "plus_eq":
x_split[target_device][shard] += source
else:
assert op == "copy"
x_split[target_device][shard] = tf.identity(source)
center = parallelism.n // 2
# accumulate everything towards the center.
for i in reversed(range(center, parallelism.n - 1)):
_step(i + 1, i, x_split, op="plus_eq")
for i in range(center):
_step(i, i + 1, x_split, op="plus_eq")
# copy everything away from the center.
for i in range(center, parallelism.n - 1):
_step(i, i + 1, x_split, op="copy")
for i in reversed(range(center)):
_step(i + 1, i, x_split, op="copy")
x_concat = parallelism(tf.concat, x_split, 0)
y = parallelism(common_layers.reshape_like_all_dims, x_concat, x)
if maybe_reduce:
y = expand_by_device(original_parallelism, parallelism, y)
return y
|
[
"def",
"all_reduce_ring",
"(",
"x",
",",
"parallelism",
",",
"maybe_reduce",
"=",
"True",
",",
"use_bfloat16",
"=",
"True",
")",
":",
"if",
"parallelism",
".",
"n",
"==",
"1",
":",
"return",
"x",
"if",
"maybe_reduce",
":",
"original_parallelism",
"=",
"parallelism",
"parallelism",
",",
"x",
"=",
"reduce_by_device",
"(",
"parallelism",
",",
"x",
",",
"tf",
".",
"add_n",
")",
"if",
"parallelism",
".",
"n",
"==",
"1",
":",
"y",
"=",
"x",
"else",
":",
"# first shard the input:",
"x_flat",
"=",
"parallelism",
"(",
"tf",
".",
"reshape",
",",
"x",
",",
"[",
"[",
"-",
"1",
"]",
"]",
"*",
"parallelism",
".",
"n",
")",
"# [device, shard]",
"x_split",
"=",
"parallelism",
"(",
"common_layers",
".",
"approximate_split",
",",
"x_flat",
",",
"parallelism",
".",
"n",
",",
"0",
")",
"def",
"_step",
"(",
"source_replica",
",",
"target_replica",
",",
"x_split",
",",
"op",
"=",
"\"plus_eq\"",
")",
":",
"\"\"\"Helper function - one step of summing or copying.\n\n If op == \"plus_eq\", then adds source_replica into target_replica\n If op == \"copy\", then copies source_replica onto target_replica\n\n These operations happen for all shards. The replica numbers are offset\n by the shard numbers to keep all physical links busy.\n\n Args:\n source_replica: an integer\n target_replica: an integer\n x_split: a list of lists of tensors\n op: a string\n \"\"\"",
"for",
"shard",
"in",
"range",
"(",
"parallelism",
".",
"n",
")",
":",
"source_device",
"=",
"(",
"shard",
"+",
"source_replica",
")",
"%",
"parallelism",
".",
"n",
"target_device",
"=",
"(",
"shard",
"+",
"target_replica",
")",
"%",
"parallelism",
".",
"n",
"source",
"=",
"x_split",
"[",
"source_device",
"]",
"[",
"shard",
"]",
"if",
"use_bfloat16",
":",
"with",
"tf",
".",
"device",
"(",
"parallelism",
".",
"devices",
"[",
"source_device",
"]",
")",
":",
"source",
"=",
"tf",
".",
"to_bfloat16",
"(",
"source",
")",
"with",
"tf",
".",
"device",
"(",
"parallelism",
".",
"devices",
"[",
"target_device",
"]",
")",
":",
"source",
"=",
"tf",
".",
"to_float",
"(",
"source",
")",
"if",
"op",
"==",
"\"plus_eq\"",
":",
"x_split",
"[",
"target_device",
"]",
"[",
"shard",
"]",
"+=",
"source",
"else",
":",
"assert",
"op",
"==",
"\"copy\"",
"x_split",
"[",
"target_device",
"]",
"[",
"shard",
"]",
"=",
"tf",
".",
"identity",
"(",
"source",
")",
"center",
"=",
"parallelism",
".",
"n",
"//",
"2",
"# accumulate everything towards the center.",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"center",
",",
"parallelism",
".",
"n",
"-",
"1",
")",
")",
":",
"_step",
"(",
"i",
"+",
"1",
",",
"i",
",",
"x_split",
",",
"op",
"=",
"\"plus_eq\"",
")",
"for",
"i",
"in",
"range",
"(",
"center",
")",
":",
"_step",
"(",
"i",
",",
"i",
"+",
"1",
",",
"x_split",
",",
"op",
"=",
"\"plus_eq\"",
")",
"# copy everything away from the center.",
"for",
"i",
"in",
"range",
"(",
"center",
",",
"parallelism",
".",
"n",
"-",
"1",
")",
":",
"_step",
"(",
"i",
",",
"i",
"+",
"1",
",",
"x_split",
",",
"op",
"=",
"\"copy\"",
")",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"center",
")",
")",
":",
"_step",
"(",
"i",
"+",
"1",
",",
"i",
",",
"x_split",
",",
"op",
"=",
"\"copy\"",
")",
"x_concat",
"=",
"parallelism",
"(",
"tf",
".",
"concat",
",",
"x_split",
",",
"0",
")",
"y",
"=",
"parallelism",
"(",
"common_layers",
".",
"reshape_like_all_dims",
",",
"x_concat",
",",
"x",
")",
"if",
"maybe_reduce",
":",
"y",
"=",
"expand_by_device",
"(",
"original_parallelism",
",",
"parallelism",
",",
"y",
")",
"return",
"y"
] |
Compute the sum of all Tensors and put the result everywhere.
Assumes that the devices are connected in a ring.
Args:
x: a list of Tensors with length parallelism.n
parallelism: a expert_utils.Parallelism object.
maybe_reduce: a boolean - first reduce per device.
use_bfloat16: a boolean - saves bandwidth but loses precision
Returns:
a list of Tensors with length parallelism.n
|
[
"Compute",
"the",
"sum",
"of",
"all",
"Tensors",
"and",
"put",
"the",
"result",
"everywhere",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1463-L1537
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
Parallelism._maybe_repeat
|
def _maybe_repeat(self, x):
"""Utility function for processing arguments that are singletons or lists.
Args:
x: either a list of self.n elements, or not a list.
Returns:
a list of self.n elements.
"""
if isinstance(x, list):
assert len(x) == self.n
return x
else:
return [x] * self.n
|
python
|
def _maybe_repeat(self, x):
"""Utility function for processing arguments that are singletons or lists.
Args:
x: either a list of self.n elements, or not a list.
Returns:
a list of self.n elements.
"""
if isinstance(x, list):
assert len(x) == self.n
return x
else:
return [x] * self.n
|
[
"def",
"_maybe_repeat",
"(",
"self",
",",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"list",
")",
":",
"assert",
"len",
"(",
"x",
")",
"==",
"self",
".",
"n",
"return",
"x",
"else",
":",
"return",
"[",
"x",
"]",
"*",
"self",
".",
"n"
] |
Utility function for processing arguments that are singletons or lists.
Args:
x: either a list of self.n elements, or not a list.
Returns:
a list of self.n elements.
|
[
"Utility",
"function",
"for",
"processing",
"arguments",
"that",
"are",
"singletons",
"or",
"lists",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L251-L264
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
PadRemover.remove
|
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
"""
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not tf.executing_eagerly():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x
|
python
|
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
"""
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not tf.executing_eagerly():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x
|
[
"def",
"remove",
"(",
"self",
",",
"x",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"pad_reduce/remove\"",
")",
":",
"x_shape",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"x",
"=",
"tf",
".",
"gather_nd",
"(",
"x",
",",
"indices",
"=",
"self",
".",
"nonpad_ids",
",",
")",
"if",
"not",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"# This is a hack but for some reason, gather_nd return a tensor of",
"# undefined shape, so the shape is set up manually",
"x",
".",
"set_shape",
"(",
"[",
"None",
"]",
"+",
"x_shape",
"[",
"1",
":",
"]",
")",
"return",
"x"
] |
Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
|
[
"Remove",
"padding",
"from",
"the",
"given",
"tensor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L624-L643
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
PadRemover.restore
|
def restore(self, x):
"""Add padding back to the given tensor.
Args:
x (tf.Tensor): of shape [dim_compressed,...]
Returns:
a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
"""
with tf.name_scope("pad_reduce/restore"):
x = tf.scatter_nd(
indices=self.nonpad_ids,
updates=x,
shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
)
return x
|
python
|
def restore(self, x):
"""Add padding back to the given tensor.
Args:
x (tf.Tensor): of shape [dim_compressed,...]
Returns:
a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
"""
with tf.name_scope("pad_reduce/restore"):
x = tf.scatter_nd(
indices=self.nonpad_ids,
updates=x,
shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
)
return x
|
[
"def",
"restore",
"(",
"self",
",",
"x",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"pad_reduce/restore\"",
")",
":",
"x",
"=",
"tf",
".",
"scatter_nd",
"(",
"indices",
"=",
"self",
".",
"nonpad_ids",
",",
"updates",
"=",
"x",
",",
"shape",
"=",
"tf",
".",
"concat",
"(",
"[",
"self",
".",
"dim_origin",
",",
"tf",
".",
"shape",
"(",
"x",
")",
"[",
"1",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
",",
")",
"return",
"x"
] |
Add padding back to the given tensor.
Args:
x (tf.Tensor): of shape [dim_compressed,...]
Returns:
a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
|
[
"Add",
"padding",
"back",
"to",
"the",
"given",
"tensor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L645-L661
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
SparseDispatcher.dispatch
|
def dispatch(self, inp):
"""Create one input Tensor for each expert.
The `Tensor` for a expert `i` contains the slices of `inp` corresponding
to the batch elements `b` where `gates[b, i] > 0`.
Args:
inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]`
Returns:
a list of `num_experts` `Tensor`s with shapes
`[expert_batch_size_i, <extra_input_dims>]`.
"""
inp = tf.gather(inp, self._batch_index)
return tf.split(inp, self._part_sizes_tensor, 0, num=self._num_experts)
|
python
|
def dispatch(self, inp):
"""Create one input Tensor for each expert.
The `Tensor` for a expert `i` contains the slices of `inp` corresponding
to the batch elements `b` where `gates[b, i] > 0`.
Args:
inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]`
Returns:
a list of `num_experts` `Tensor`s with shapes
`[expert_batch_size_i, <extra_input_dims>]`.
"""
inp = tf.gather(inp, self._batch_index)
return tf.split(inp, self._part_sizes_tensor, 0, num=self._num_experts)
|
[
"def",
"dispatch",
"(",
"self",
",",
"inp",
")",
":",
"inp",
"=",
"tf",
".",
"gather",
"(",
"inp",
",",
"self",
".",
"_batch_index",
")",
"return",
"tf",
".",
"split",
"(",
"inp",
",",
"self",
".",
"_part_sizes_tensor",
",",
"0",
",",
"num",
"=",
"self",
".",
"_num_experts",
")"
] |
Create one input Tensor for each expert.
The `Tensor` for a expert `i` contains the slices of `inp` corresponding
to the batch elements `b` where `gates[b, i] > 0`.
Args:
inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]`
Returns:
a list of `num_experts` `Tensor`s with shapes
`[expert_batch_size_i, <extra_input_dims>]`.
|
[
"Create",
"one",
"input",
"Tensor",
"for",
"each",
"expert",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L794-L807
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
SparseDispatcher.combine
|
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# see comments on convert_gradient_to_tensor
stitched = common_layers.convert_gradient_to_tensor(
tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index,
tf.shape(self._gates)[0])
return combined
|
python
|
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# see comments on convert_gradient_to_tensor
stitched = common_layers.convert_gradient_to_tensor(
tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index,
tf.shape(self._gates)[0])
return combined
|
[
"def",
"combine",
"(",
"self",
",",
"expert_out",
",",
"multiply_by_gates",
"=",
"True",
")",
":",
"# see comments on convert_gradient_to_tensor",
"stitched",
"=",
"common_layers",
".",
"convert_gradient_to_tensor",
"(",
"tf",
".",
"concat",
"(",
"expert_out",
",",
"0",
")",
")",
"if",
"multiply_by_gates",
":",
"stitched",
"*=",
"tf",
".",
"expand_dims",
"(",
"self",
".",
"_nonzero_gates",
",",
"1",
")",
"combined",
"=",
"tf",
".",
"unsorted_segment_sum",
"(",
"stitched",
",",
"self",
".",
"_batch_index",
",",
"tf",
".",
"shape",
"(",
"self",
".",
"_gates",
")",
"[",
"0",
"]",
")",
"return",
"combined"
] |
Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
|
[
"Sum",
"together",
"the",
"expert",
"output",
"weighted",
"by",
"the",
"gates",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L810-L833
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
SparseDispatcher.expert_to_gates
|
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32`
and shapes `[expert_batch_size_i]`
"""
return tf.split(
self._nonzero_gates, self._part_sizes_tensor, 0, num=self._num_experts)
|
python
|
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32`
and shapes `[expert_batch_size_i]`
"""
return tf.split(
self._nonzero_gates, self._part_sizes_tensor, 0, num=self._num_experts)
|
[
"def",
"expert_to_gates",
"(",
"self",
")",
":",
"return",
"tf",
".",
"split",
"(",
"self",
".",
"_nonzero_gates",
",",
"self",
".",
"_part_sizes_tensor",
",",
"0",
",",
"num",
"=",
"self",
".",
"_num_experts",
")"
] |
Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32`
and shapes `[expert_batch_size_i]`
|
[
"Gate",
"values",
"corresponding",
"to",
"the",
"examples",
"in",
"the",
"per",
"-",
"expert",
"Tensor",
"s",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L835-L843
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
SparseDispatcher.expert_to_batch_indices
|
def expert_to_batch_indices(self):
"""Batch indices corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64`
and shapes `[expert_batch_size_i]`
"""
return tf.split(
self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)
|
python
|
def expert_to_batch_indices(self):
"""Batch indices corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64`
and shapes `[expert_batch_size_i]`
"""
return tf.split(
self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)
|
[
"def",
"expert_to_batch_indices",
"(",
"self",
")",
":",
"return",
"tf",
".",
"split",
"(",
"self",
".",
"_batch_index",
",",
"self",
".",
"_part_sizes_tensor",
",",
"0",
",",
"num",
"=",
"self",
".",
"_num_experts",
")"
] |
Batch indices corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64`
and shapes `[expert_batch_size_i]`
|
[
"Batch",
"indices",
"corresponding",
"to",
"the",
"examples",
"in",
"the",
"per",
"-",
"expert",
"Tensor",
"s",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L845-L853
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
DistributedSparseDispatcher.dispatch
|
def dispatch(self, inp):
"""Create one input Tensor for each expert.
Args:
inp: a list of length num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_input_dims>]`.
Returns:
a list of `num_experts` `Tensor`s with shapes
`[num_examples[i], <extra_input_dims>]`.
"""
dispatched = self._dp(lambda a, b: a.dispatch(b), self._dispatchers, inp)
ret = self._ep(tf.concat, transpose_list_of_lists(dispatched), 0)
if ret[0].dtype == tf.float32:
# see comments on common_layers.convert_gradient_to_tensor
ret = self._ep(common_layers.convert_gradient_to_tensor, ret)
return ret
|
python
|
def dispatch(self, inp):
"""Create one input Tensor for each expert.
Args:
inp: a list of length num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_input_dims>]`.
Returns:
a list of `num_experts` `Tensor`s with shapes
`[num_examples[i], <extra_input_dims>]`.
"""
dispatched = self._dp(lambda a, b: a.dispatch(b), self._dispatchers, inp)
ret = self._ep(tf.concat, transpose_list_of_lists(dispatched), 0)
if ret[0].dtype == tf.float32:
# see comments on common_layers.convert_gradient_to_tensor
ret = self._ep(common_layers.convert_gradient_to_tensor, ret)
return ret
|
[
"def",
"dispatch",
"(",
"self",
",",
"inp",
")",
":",
"dispatched",
"=",
"self",
".",
"_dp",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
".",
"dispatch",
"(",
"b",
")",
",",
"self",
".",
"_dispatchers",
",",
"inp",
")",
"ret",
"=",
"self",
".",
"_ep",
"(",
"tf",
".",
"concat",
",",
"transpose_list_of_lists",
"(",
"dispatched",
")",
",",
"0",
")",
"if",
"ret",
"[",
"0",
"]",
".",
"dtype",
"==",
"tf",
".",
"float32",
":",
"# see comments on common_layers.convert_gradient_to_tensor",
"ret",
"=",
"self",
".",
"_ep",
"(",
"common_layers",
".",
"convert_gradient_to_tensor",
",",
"ret",
")",
"return",
"ret"
] |
Create one input Tensor for each expert.
Args:
inp: a list of length num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_input_dims>]`.
Returns:
a list of `num_experts` `Tensor`s with shapes
`[num_examples[i], <extra_input_dims>]`.
|
[
"Create",
"one",
"input",
"Tensor",
"for",
"each",
"expert",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L890-L905
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
DistributedSparseDispatcher.combine
|
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, multiplied by the corresponding gates.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean.
Returns:
a list of num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_output_dims>]`.
"""
expert_part_sizes = tf.unstack(
tf.stack([d.part_sizes for d in self._dispatchers]),
num=self._ep.n,
axis=1)
# list of lists of shape [num_experts][num_datashards]
expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
def my_combine(dispatcher, parts):
return dispatcher.combine(
common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
multiply_by_gates=multiply_by_gates)
return self._dp(my_combine, self._dispatchers, expert_output_parts_t)
|
python
|
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, multiplied by the corresponding gates.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean.
Returns:
a list of num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_output_dims>]`.
"""
expert_part_sizes = tf.unstack(
tf.stack([d.part_sizes for d in self._dispatchers]),
num=self._ep.n,
axis=1)
# list of lists of shape [num_experts][num_datashards]
expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
def my_combine(dispatcher, parts):
return dispatcher.combine(
common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
multiply_by_gates=multiply_by_gates)
return self._dp(my_combine, self._dispatchers, expert_output_parts_t)
|
[
"def",
"combine",
"(",
"self",
",",
"expert_out",
",",
"multiply_by_gates",
"=",
"True",
")",
":",
"expert_part_sizes",
"=",
"tf",
".",
"unstack",
"(",
"tf",
".",
"stack",
"(",
"[",
"d",
".",
"part_sizes",
"for",
"d",
"in",
"self",
".",
"_dispatchers",
"]",
")",
",",
"num",
"=",
"self",
".",
"_ep",
".",
"n",
",",
"axis",
"=",
"1",
")",
"# list of lists of shape [num_experts][num_datashards]",
"expert_output_parts",
"=",
"self",
".",
"_ep",
"(",
"tf",
".",
"split",
",",
"expert_out",
",",
"expert_part_sizes",
")",
"expert_output_parts_t",
"=",
"transpose_list_of_lists",
"(",
"expert_output_parts",
")",
"def",
"my_combine",
"(",
"dispatcher",
",",
"parts",
")",
":",
"return",
"dispatcher",
".",
"combine",
"(",
"common_layers",
".",
"convert_gradient_to_tensor",
"(",
"tf",
".",
"concat",
"(",
"parts",
",",
"0",
")",
")",
",",
"multiply_by_gates",
"=",
"multiply_by_gates",
")",
"return",
"self",
".",
"_dp",
"(",
"my_combine",
",",
"self",
".",
"_dispatchers",
",",
"expert_output_parts_t",
")"
] |
Sum together the expert output, multiplied by the corresponding gates.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean.
Returns:
a list of num_datashards `Tensor`s with shapes
`[batch_size[d], <extra_output_dims>]`.
|
[
"Sum",
"together",
"the",
"expert",
"output",
"multiplied",
"by",
"the",
"corresponding",
"gates",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L907-L930
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
DistributedSparseDispatcher.expert_to_gates
|
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`.
"""
return self._ep(
tf.concat,
transpose_list_of_lists(
self._dp(lambda d: d.expert_to_gates(), self._dispatchers)), 0)
|
python
|
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`.
"""
return self._ep(
tf.concat,
transpose_list_of_lists(
self._dp(lambda d: d.expert_to_gates(), self._dispatchers)), 0)
|
[
"def",
"expert_to_gates",
"(",
"self",
")",
":",
"return",
"self",
".",
"_ep",
"(",
"tf",
".",
"concat",
",",
"transpose_list_of_lists",
"(",
"self",
".",
"_dp",
"(",
"lambda",
"d",
":",
"d",
".",
"expert_to_gates",
"(",
")",
",",
"self",
".",
"_dispatchers",
")",
")",
",",
"0",
")"
] |
Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`.
|
[
"Gate",
"values",
"corresponding",
"to",
"the",
"examples",
"in",
"the",
"per",
"-",
"expert",
"Tensor",
"s",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L932-L941
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
TruncatingDispatcher.dispatch
|
def dispatch(self, inp):
"""Send the inputs to the experts.
Args:
inp: a `Tensor` of shape "[batch, length, depth]`
Returns:
a tensor with shape [batch, num_experts, expert_capacity, depth]
"""
inp = tf.reshape(inp, [self._batch * self._length, -1])
# [batch, num_experts, expert_capacity, depth]
ret = tf.gather(inp, self._flat_indices)
return ret
|
python
|
def dispatch(self, inp):
"""Send the inputs to the experts.
Args:
inp: a `Tensor` of shape "[batch, length, depth]`
Returns:
a tensor with shape [batch, num_experts, expert_capacity, depth]
"""
inp = tf.reshape(inp, [self._batch * self._length, -1])
# [batch, num_experts, expert_capacity, depth]
ret = tf.gather(inp, self._flat_indices)
return ret
|
[
"def",
"dispatch",
"(",
"self",
",",
"inp",
")",
":",
"inp",
"=",
"tf",
".",
"reshape",
"(",
"inp",
",",
"[",
"self",
".",
"_batch",
"*",
"self",
".",
"_length",
",",
"-",
"1",
"]",
")",
"# [batch, num_experts, expert_capacity, depth]",
"ret",
"=",
"tf",
".",
"gather",
"(",
"inp",
",",
"self",
".",
"_flat_indices",
")",
"return",
"ret"
] |
Send the inputs to the experts.
Args:
inp: a `Tensor` of shape "[batch, length, depth]`
Returns:
a tensor with shape [batch, num_experts, expert_capacity, depth]
|
[
"Send",
"the",
"inputs",
"to",
"the",
"experts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1158-L1169
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
TruncatingDispatcher.combine
|
def combine(self, x):
"""Return the output from the experts.
When one example goes to multiple experts, the outputs are summed.
Args:
x: a Tensor with shape [batch, num_experts, expert_capacity, depth]
Returns:
a `Tensor` with shape `[batch, length, depth]
"""
depth = tf.shape(x)[-1]
x *= tf.expand_dims(self._nonpadding, -1)
ret = tf.unsorted_segment_sum(
x, self._flat_indices, num_segments=self._batch * self._length)
ret = tf.reshape(ret, [self._batch, self._length, depth])
return ret
|
python
|
def combine(self, x):
"""Return the output from the experts.
When one example goes to multiple experts, the outputs are summed.
Args:
x: a Tensor with shape [batch, num_experts, expert_capacity, depth]
Returns:
a `Tensor` with shape `[batch, length, depth]
"""
depth = tf.shape(x)[-1]
x *= tf.expand_dims(self._nonpadding, -1)
ret = tf.unsorted_segment_sum(
x, self._flat_indices, num_segments=self._batch * self._length)
ret = tf.reshape(ret, [self._batch, self._length, depth])
return ret
|
[
"def",
"combine",
"(",
"self",
",",
"x",
")",
":",
"depth",
"=",
"tf",
".",
"shape",
"(",
"x",
")",
"[",
"-",
"1",
"]",
"x",
"*=",
"tf",
".",
"expand_dims",
"(",
"self",
".",
"_nonpadding",
",",
"-",
"1",
")",
"ret",
"=",
"tf",
".",
"unsorted_segment_sum",
"(",
"x",
",",
"self",
".",
"_flat_indices",
",",
"num_segments",
"=",
"self",
".",
"_batch",
"*",
"self",
".",
"_length",
")",
"ret",
"=",
"tf",
".",
"reshape",
"(",
"ret",
",",
"[",
"self",
".",
"_batch",
",",
"self",
".",
"_length",
",",
"depth",
"]",
")",
"return",
"ret"
] |
Return the output from the experts.
When one example goes to multiple experts, the outputs are summed.
Args:
x: a Tensor with shape [batch, num_experts, expert_capacity, depth]
Returns:
a `Tensor` with shape `[batch, length, depth]
|
[
"Return",
"the",
"output",
"from",
"the",
"experts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L1172-L1188
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
make_env
|
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]()
|
python
|
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]()
|
[
"def",
"make_env",
"(",
"env_type",
",",
"real_env",
",",
"sim_env_kwargs",
")",
":",
"return",
"{",
"\"real\"",
":",
"lambda",
":",
"real_env",
".",
"new_like",
"(",
"# pylint: disable=g-long-lambda",
"batch_size",
"=",
"sim_env_kwargs",
"[",
"\"batch_size\"",
"]",
",",
"store_rollouts",
"=",
"False",
",",
")",
",",
"\"simulated\"",
":",
"lambda",
":",
"rl_utils",
".",
"SimulatedBatchGymEnvWithFixedInitialFrames",
"(",
"# pylint: disable=g-long-lambda",
"*",
"*",
"sim_env_kwargs",
")",
",",
"}",
"[",
"env_type",
"]",
"(",
")"
] |
Factory function for envs.
|
[
"Factory",
"function",
"for",
"envs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L240-L250
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
make_agent
|
def make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs_fn=None, frame_stack_size=None, rollout_agent_type=None,
batch_size=None, inner_batch_size=None, env_type=None, **planner_kwargs
):
"""Factory function for Agents."""
if batch_size is None:
batch_size = env.batch_size
return {
"random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space
),
"policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space,
policy_hparams, policy_dir, sampling_temp
),
"planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda
batch_size, make_agent(
rollout_agent_type, env, policy_hparams, policy_dir,
sampling_temp, batch_size=inner_batch_size
), make_env(env_type, env.env, sim_env_kwargs_fn()),
lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size),
discount_factor=policy_hparams.gae_gamma, **planner_kwargs
),
}[agent_type]()
|
python
|
def make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs_fn=None, frame_stack_size=None, rollout_agent_type=None,
batch_size=None, inner_batch_size=None, env_type=None, **planner_kwargs
):
"""Factory function for Agents."""
if batch_size is None:
batch_size = env.batch_size
return {
"random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space
),
"policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space,
policy_hparams, policy_dir, sampling_temp
),
"planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda
batch_size, make_agent(
rollout_agent_type, env, policy_hparams, policy_dir,
sampling_temp, batch_size=inner_batch_size
), make_env(env_type, env.env, sim_env_kwargs_fn()),
lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size),
discount_factor=policy_hparams.gae_gamma, **planner_kwargs
),
}[agent_type]()
|
[
"def",
"make_agent",
"(",
"agent_type",
",",
"env",
",",
"policy_hparams",
",",
"policy_dir",
",",
"sampling_temp",
",",
"sim_env_kwargs_fn",
"=",
"None",
",",
"frame_stack_size",
"=",
"None",
",",
"rollout_agent_type",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"inner_batch_size",
"=",
"None",
",",
"env_type",
"=",
"None",
",",
"*",
"*",
"planner_kwargs",
")",
":",
"if",
"batch_size",
"is",
"None",
":",
"batch_size",
"=",
"env",
".",
"batch_size",
"return",
"{",
"\"random\"",
":",
"lambda",
":",
"rl_utils",
".",
"RandomAgent",
"(",
"# pylint: disable=g-long-lambda",
"batch_size",
",",
"env",
".",
"observation_space",
",",
"env",
".",
"action_space",
")",
",",
"\"policy\"",
":",
"lambda",
":",
"rl_utils",
".",
"PolicyAgent",
"(",
"# pylint: disable=g-long-lambda",
"batch_size",
",",
"env",
".",
"observation_space",
",",
"env",
".",
"action_space",
",",
"policy_hparams",
",",
"policy_dir",
",",
"sampling_temp",
")",
",",
"\"planner\"",
":",
"lambda",
":",
"rl_utils",
".",
"PlannerAgent",
"(",
"# pylint: disable=g-long-lambda",
"batch_size",
",",
"make_agent",
"(",
"rollout_agent_type",
",",
"env",
",",
"policy_hparams",
",",
"policy_dir",
",",
"sampling_temp",
",",
"batch_size",
"=",
"inner_batch_size",
")",
",",
"make_env",
"(",
"env_type",
",",
"env",
".",
"env",
",",
"sim_env_kwargs_fn",
"(",
")",
")",
",",
"lambda",
"env",
":",
"rl_utils",
".",
"BatchStackWrapper",
"(",
"env",
",",
"frame_stack_size",
")",
",",
"discount_factor",
"=",
"policy_hparams",
".",
"gae_gamma",
",",
"*",
"*",
"planner_kwargs",
")",
",",
"}",
"[",
"agent_type",
"]",
"(",
")"
] |
Factory function for Agents.
|
[
"Factory",
"function",
"for",
"Agents",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L253-L277
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
collect_frames_for_random_starts
|
def collect_frames_for_random_starts(
storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit,
log_every_steps=None
):
"""Collects frames from real env for random starts of simulated env."""
del frame_stack_size
storage_env.start_new_epoch(0)
tf.logging.info(
"Collecting %d frames for random starts.", random_starts_step_limit
)
rl_utils.run_rollouts(
stacked_env, agent, stacked_env.reset(),
step_limit=random_starts_step_limit,
many_rollouts_from_each_env=True,
log_every_steps=log_every_steps,
)
# Save unfinished rollouts to history.
stacked_env.reset()
|
python
|
def collect_frames_for_random_starts(
storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit,
log_every_steps=None
):
"""Collects frames from real env for random starts of simulated env."""
del frame_stack_size
storage_env.start_new_epoch(0)
tf.logging.info(
"Collecting %d frames for random starts.", random_starts_step_limit
)
rl_utils.run_rollouts(
stacked_env, agent, stacked_env.reset(),
step_limit=random_starts_step_limit,
many_rollouts_from_each_env=True,
log_every_steps=log_every_steps,
)
# Save unfinished rollouts to history.
stacked_env.reset()
|
[
"def",
"collect_frames_for_random_starts",
"(",
"storage_env",
",",
"stacked_env",
",",
"agent",
",",
"frame_stack_size",
",",
"random_starts_step_limit",
",",
"log_every_steps",
"=",
"None",
")",
":",
"del",
"frame_stack_size",
"storage_env",
".",
"start_new_epoch",
"(",
"0",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Collecting %d frames for random starts.\"",
",",
"random_starts_step_limit",
")",
"rl_utils",
".",
"run_rollouts",
"(",
"stacked_env",
",",
"agent",
",",
"stacked_env",
".",
"reset",
"(",
")",
",",
"step_limit",
"=",
"random_starts_step_limit",
",",
"many_rollouts_from_each_env",
"=",
"True",
",",
"log_every_steps",
"=",
"log_every_steps",
",",
")",
"# Save unfinished rollouts to history.",
"stacked_env",
".",
"reset",
"(",
")"
] |
Collects frames from real env for random starts of simulated env.
|
[
"Collects",
"frames",
"from",
"real",
"env",
"for",
"random",
"starts",
"of",
"simulated",
"env",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L280-L297
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
make_agent_from_hparams
|
def make_agent_from_hparams(
agent_type, base_env, stacked_env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=()
):
"""Creates an Agent from hparams."""
def sim_env_kwargs_fn():
return rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
planner_kwargs = planner_hparams.values()
planner_kwargs.pop("batch_size")
planner_kwargs.pop("rollout_agent_type")
planner_kwargs.pop("env_type")
return make_agent(
agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs_fn, loop_hparams.frame_stack_size,
planner_hparams.rollout_agent_type,
inner_batch_size=planner_hparams.batch_size,
env_type=planner_hparams.env_type,
video_writers=video_writers, **planner_kwargs
)
|
python
|
def make_agent_from_hparams(
agent_type, base_env, stacked_env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=()
):
"""Creates an Agent from hparams."""
def sim_env_kwargs_fn():
return rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
planner_kwargs = planner_hparams.values()
planner_kwargs.pop("batch_size")
planner_kwargs.pop("rollout_agent_type")
planner_kwargs.pop("env_type")
return make_agent(
agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs_fn, loop_hparams.frame_stack_size,
planner_hparams.rollout_agent_type,
inner_batch_size=planner_hparams.batch_size,
env_type=planner_hparams.env_type,
video_writers=video_writers, **planner_kwargs
)
|
[
"def",
"make_agent_from_hparams",
"(",
"agent_type",
",",
"base_env",
",",
"stacked_env",
",",
"loop_hparams",
",",
"policy_hparams",
",",
"planner_hparams",
",",
"model_dir",
",",
"policy_dir",
",",
"sampling_temp",
",",
"video_writers",
"=",
"(",
")",
")",
":",
"def",
"sim_env_kwargs_fn",
"(",
")",
":",
"return",
"rl",
".",
"make_simulated_env_kwargs",
"(",
"base_env",
",",
"loop_hparams",
",",
"batch_size",
"=",
"planner_hparams",
".",
"batch_size",
",",
"model_dir",
"=",
"model_dir",
")",
"planner_kwargs",
"=",
"planner_hparams",
".",
"values",
"(",
")",
"planner_kwargs",
".",
"pop",
"(",
"\"batch_size\"",
")",
"planner_kwargs",
".",
"pop",
"(",
"\"rollout_agent_type\"",
")",
"planner_kwargs",
".",
"pop",
"(",
"\"env_type\"",
")",
"return",
"make_agent",
"(",
"agent_type",
",",
"stacked_env",
",",
"policy_hparams",
",",
"policy_dir",
",",
"sampling_temp",
",",
"sim_env_kwargs_fn",
",",
"loop_hparams",
".",
"frame_stack_size",
",",
"planner_hparams",
".",
"rollout_agent_type",
",",
"inner_batch_size",
"=",
"planner_hparams",
".",
"batch_size",
",",
"env_type",
"=",
"planner_hparams",
".",
"env_type",
",",
"video_writers",
"=",
"video_writers",
",",
"*",
"*",
"planner_kwargs",
")"
] |
Creates an Agent from hparams.
|
[
"Creates",
"an",
"Agent",
"from",
"hparams",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L300-L321
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
make_eval_fn_with_agent
|
def make_eval_fn_with_agent(
agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=None,
video_writers=(), random_starts_step_limit=None
):
"""Returns an out-of-graph eval_fn using the Agent API."""
def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp):
"""Eval function."""
base_env = env
env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size)
agent = make_agent_from_hparams(
agent_type, base_env, env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir, sampling_temp, video_writers
)
if eval_mode == "agent_simulated":
real_env = base_env.new_like(batch_size=1)
stacked_env = rl_utils.BatchStackWrapper(
real_env, loop_hparams.frame_stack_size
)
collect_frames_for_random_starts(
real_env, stacked_env, agent, loop_hparams.frame_stack_size,
random_starts_step_limit, log_every_steps
)
initial_frame_chooser = rl_utils.make_initial_frame_chooser(
real_env, loop_hparams.frame_stack_size,
simulation_random_starts=True,
simulation_flip_first_random_for_beginning=False,
split=None,
)
env_fn = rl.make_simulated_env_fn_from_hparams(
real_env, loop_hparams, batch_size=loop_hparams.eval_batch_size,
initial_frame_chooser=initial_frame_chooser, model_dir=model_dir
)
sim_env = env_fn(in_graph=False)
env = rl_utils.BatchStackWrapper(sim_env, loop_hparams.frame_stack_size)
kwargs = {}
if not agent.records_own_videos:
kwargs["video_writers"] = video_writers
step_limit = base_env.rl_env_max_episode_steps
if step_limit == -1:
step_limit = None
rl_utils.run_rollouts(
env, agent, env.reset(), log_every_steps=log_every_steps,
step_limit=step_limit, **kwargs
)
if eval_mode == "agent_real":
assert len(base_env.current_epoch_rollouts()) == env.batch_size
return eval_fn
|
python
|
def make_eval_fn_with_agent(
agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=None,
video_writers=(), random_starts_step_limit=None
):
"""Returns an out-of-graph eval_fn using the Agent API."""
def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp):
"""Eval function."""
base_env = env
env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size)
agent = make_agent_from_hparams(
agent_type, base_env, env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir, sampling_temp, video_writers
)
if eval_mode == "agent_simulated":
real_env = base_env.new_like(batch_size=1)
stacked_env = rl_utils.BatchStackWrapper(
real_env, loop_hparams.frame_stack_size
)
collect_frames_for_random_starts(
real_env, stacked_env, agent, loop_hparams.frame_stack_size,
random_starts_step_limit, log_every_steps
)
initial_frame_chooser = rl_utils.make_initial_frame_chooser(
real_env, loop_hparams.frame_stack_size,
simulation_random_starts=True,
simulation_flip_first_random_for_beginning=False,
split=None,
)
env_fn = rl.make_simulated_env_fn_from_hparams(
real_env, loop_hparams, batch_size=loop_hparams.eval_batch_size,
initial_frame_chooser=initial_frame_chooser, model_dir=model_dir
)
sim_env = env_fn(in_graph=False)
env = rl_utils.BatchStackWrapper(sim_env, loop_hparams.frame_stack_size)
kwargs = {}
if not agent.records_own_videos:
kwargs["video_writers"] = video_writers
step_limit = base_env.rl_env_max_episode_steps
if step_limit == -1:
step_limit = None
rl_utils.run_rollouts(
env, agent, env.reset(), log_every_steps=log_every_steps,
step_limit=step_limit, **kwargs
)
if eval_mode == "agent_real":
assert len(base_env.current_epoch_rollouts()) == env.batch_size
return eval_fn
|
[
"def",
"make_eval_fn_with_agent",
"(",
"agent_type",
",",
"eval_mode",
",",
"planner_hparams",
",",
"model_dir",
",",
"log_every_steps",
"=",
"None",
",",
"video_writers",
"=",
"(",
")",
",",
"random_starts_step_limit",
"=",
"None",
")",
":",
"def",
"eval_fn",
"(",
"env",
",",
"loop_hparams",
",",
"policy_hparams",
",",
"policy_dir",
",",
"sampling_temp",
")",
":",
"\"\"\"Eval function.\"\"\"",
"base_env",
"=",
"env",
"env",
"=",
"rl_utils",
".",
"BatchStackWrapper",
"(",
"env",
",",
"loop_hparams",
".",
"frame_stack_size",
")",
"agent",
"=",
"make_agent_from_hparams",
"(",
"agent_type",
",",
"base_env",
",",
"env",
",",
"loop_hparams",
",",
"policy_hparams",
",",
"planner_hparams",
",",
"model_dir",
",",
"policy_dir",
",",
"sampling_temp",
",",
"video_writers",
")",
"if",
"eval_mode",
"==",
"\"agent_simulated\"",
":",
"real_env",
"=",
"base_env",
".",
"new_like",
"(",
"batch_size",
"=",
"1",
")",
"stacked_env",
"=",
"rl_utils",
".",
"BatchStackWrapper",
"(",
"real_env",
",",
"loop_hparams",
".",
"frame_stack_size",
")",
"collect_frames_for_random_starts",
"(",
"real_env",
",",
"stacked_env",
",",
"agent",
",",
"loop_hparams",
".",
"frame_stack_size",
",",
"random_starts_step_limit",
",",
"log_every_steps",
")",
"initial_frame_chooser",
"=",
"rl_utils",
".",
"make_initial_frame_chooser",
"(",
"real_env",
",",
"loop_hparams",
".",
"frame_stack_size",
",",
"simulation_random_starts",
"=",
"True",
",",
"simulation_flip_first_random_for_beginning",
"=",
"False",
",",
"split",
"=",
"None",
",",
")",
"env_fn",
"=",
"rl",
".",
"make_simulated_env_fn_from_hparams",
"(",
"real_env",
",",
"loop_hparams",
",",
"batch_size",
"=",
"loop_hparams",
".",
"eval_batch_size",
",",
"initial_frame_chooser",
"=",
"initial_frame_chooser",
",",
"model_dir",
"=",
"model_dir",
")",
"sim_env",
"=",
"env_fn",
"(",
"in_graph",
"=",
"False",
")",
"env",
"=",
"rl_utils",
".",
"BatchStackWrapper",
"(",
"sim_env",
",",
"loop_hparams",
".",
"frame_stack_size",
")",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"agent",
".",
"records_own_videos",
":",
"kwargs",
"[",
"\"video_writers\"",
"]",
"=",
"video_writers",
"step_limit",
"=",
"base_env",
".",
"rl_env_max_episode_steps",
"if",
"step_limit",
"==",
"-",
"1",
":",
"step_limit",
"=",
"None",
"rl_utils",
".",
"run_rollouts",
"(",
"env",
",",
"agent",
",",
"env",
".",
"reset",
"(",
")",
",",
"log_every_steps",
"=",
"log_every_steps",
",",
"step_limit",
"=",
"step_limit",
",",
"*",
"*",
"kwargs",
")",
"if",
"eval_mode",
"==",
"\"agent_real\"",
":",
"assert",
"len",
"(",
"base_env",
".",
"current_epoch_rollouts",
"(",
")",
")",
"==",
"env",
".",
"batch_size",
"return",
"eval_fn"
] |
Returns an out-of-graph eval_fn using the Agent API.
|
[
"Returns",
"an",
"out",
"-",
"of",
"-",
"graph",
"eval_fn",
"using",
"the",
"Agent",
"API",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L324-L372
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
evaluate_world_model
|
def evaluate_world_model(
agent_type, loop_hparams, planner_hparams, model_dir, policy_dir,
random_starts_step_limit, debug_video_path, log_every_steps
):
"""Evaluates the world model."""
if debug_video_path:
debug_video_path = os.path.join(debug_video_path, "0.avi")
storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0)
stacked_env = rl_utils.BatchStackWrapper(
storage_env, loop_hparams.frame_stack_size
)
policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params)
agent = make_agent_from_hparams(
agent_type, storage_env, stacked_env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir,
# TODO(koz4k): Loop over eval_sampling_temps?
sampling_temp=loop_hparams.eval_sampling_temps[0],
)
collect_frames_for_random_starts(
storage_env, stacked_env, agent, loop_hparams.frame_stack_size,
random_starts_step_limit, log_every_steps
)
return rl_utils.evaluate_world_model(
storage_env, loop_hparams, model_dir, debug_video_path, split=None
)
|
python
|
def evaluate_world_model(
agent_type, loop_hparams, planner_hparams, model_dir, policy_dir,
random_starts_step_limit, debug_video_path, log_every_steps
):
"""Evaluates the world model."""
if debug_video_path:
debug_video_path = os.path.join(debug_video_path, "0.avi")
storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0)
stacked_env = rl_utils.BatchStackWrapper(
storage_env, loop_hparams.frame_stack_size
)
policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params)
agent = make_agent_from_hparams(
agent_type, storage_env, stacked_env, loop_hparams, policy_hparams,
planner_hparams, model_dir, policy_dir,
# TODO(koz4k): Loop over eval_sampling_temps?
sampling_temp=loop_hparams.eval_sampling_temps[0],
)
collect_frames_for_random_starts(
storage_env, stacked_env, agent, loop_hparams.frame_stack_size,
random_starts_step_limit, log_every_steps
)
return rl_utils.evaluate_world_model(
storage_env, loop_hparams, model_dir, debug_video_path, split=None
)
|
[
"def",
"evaluate_world_model",
"(",
"agent_type",
",",
"loop_hparams",
",",
"planner_hparams",
",",
"model_dir",
",",
"policy_dir",
",",
"random_starts_step_limit",
",",
"debug_video_path",
",",
"log_every_steps",
")",
":",
"if",
"debug_video_path",
":",
"debug_video_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"debug_video_path",
",",
"\"0.avi\"",
")",
"storage_env",
"=",
"rl_utils",
".",
"setup_env",
"(",
"loop_hparams",
",",
"batch_size",
"=",
"1",
",",
"max_num_noops",
"=",
"0",
")",
"stacked_env",
"=",
"rl_utils",
".",
"BatchStackWrapper",
"(",
"storage_env",
",",
"loop_hparams",
".",
"frame_stack_size",
")",
"policy_hparams",
"=",
"trainer_lib",
".",
"create_hparams",
"(",
"loop_hparams",
".",
"base_algo_params",
")",
"agent",
"=",
"make_agent_from_hparams",
"(",
"agent_type",
",",
"storage_env",
",",
"stacked_env",
",",
"loop_hparams",
",",
"policy_hparams",
",",
"planner_hparams",
",",
"model_dir",
",",
"policy_dir",
",",
"# TODO(koz4k): Loop over eval_sampling_temps?",
"sampling_temp",
"=",
"loop_hparams",
".",
"eval_sampling_temps",
"[",
"0",
"]",
",",
")",
"collect_frames_for_random_starts",
"(",
"storage_env",
",",
"stacked_env",
",",
"agent",
",",
"loop_hparams",
".",
"frame_stack_size",
",",
"random_starts_step_limit",
",",
"log_every_steps",
")",
"return",
"rl_utils",
".",
"evaluate_world_model",
"(",
"storage_env",
",",
"loop_hparams",
",",
"model_dir",
",",
"debug_video_path",
",",
"split",
"=",
"None",
")"
] |
Evaluates the world model.
|
[
"Evaluates",
"the",
"world",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L375-L400
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
evaluate
|
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_mode, eval_with_learner, log_every_steps, debug_video_path,
num_debug_videos=1, random_starts_step_limit=None,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writers = ()
kwargs = {}
if eval_mode in ["agent_real", "agent_simulated"]:
if not eval_with_learner:
if debug_video_path:
tf.gfile.MakeDirs(debug_video_path)
video_writers = [
common_video.WholeVideoWriter( # pylint: disable=g-complex-comprehension
fps=10,
output_path=os.path.join(debug_video_path, "{}.avi".format(i)),
file_format="avi",
)
for i in range(num_debug_videos)
]
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, eval_mode, planner_hparams, model_dir,
log_every_steps=log_every_steps,
video_writers=video_writers,
random_starts_step_limit=random_starts_step_limit
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
else:
eval_metrics = evaluate_world_model(
agent_type, loop_hparams, planner_hparams, model_dir, policy_dir,
random_starts_step_limit, debug_video_path, log_every_steps
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
for video_writer in video_writers:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
|
python
|
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_mode, eval_with_learner, log_every_steps, debug_video_path,
num_debug_videos=1, random_starts_step_limit=None,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writers = ()
kwargs = {}
if eval_mode in ["agent_real", "agent_simulated"]:
if not eval_with_learner:
if debug_video_path:
tf.gfile.MakeDirs(debug_video_path)
video_writers = [
common_video.WholeVideoWriter( # pylint: disable=g-complex-comprehension
fps=10,
output_path=os.path.join(debug_video_path, "{}.avi".format(i)),
file_format="avi",
)
for i in range(num_debug_videos)
]
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, eval_mode, planner_hparams, model_dir,
log_every_steps=log_every_steps,
video_writers=video_writers,
random_starts_step_limit=random_starts_step_limit
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
else:
eval_metrics = evaluate_world_model(
agent_type, loop_hparams, planner_hparams, model_dir, policy_dir,
random_starts_step_limit, debug_video_path, log_every_steps
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
for video_writer in video_writers:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
|
[
"def",
"evaluate",
"(",
"loop_hparams",
",",
"planner_hparams",
",",
"policy_dir",
",",
"model_dir",
",",
"eval_metrics_dir",
",",
"agent_type",
",",
"eval_mode",
",",
"eval_with_learner",
",",
"log_every_steps",
",",
"debug_video_path",
",",
"num_debug_videos",
"=",
"1",
",",
"random_starts_step_limit",
"=",
"None",
",",
"report_fn",
"=",
"None",
",",
"report_metric",
"=",
"None",
")",
":",
"if",
"eval_with_learner",
":",
"assert",
"agent_type",
"==",
"\"policy\"",
"if",
"report_fn",
":",
"assert",
"report_metric",
"is",
"not",
"None",
"eval_metrics_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"eval_metrics_dir",
")",
"video_writers",
"=",
"(",
")",
"kwargs",
"=",
"{",
"}",
"if",
"eval_mode",
"in",
"[",
"\"agent_real\"",
",",
"\"agent_simulated\"",
"]",
":",
"if",
"not",
"eval_with_learner",
":",
"if",
"debug_video_path",
":",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"debug_video_path",
")",
"video_writers",
"=",
"[",
"common_video",
".",
"WholeVideoWriter",
"(",
"# pylint: disable=g-complex-comprehension",
"fps",
"=",
"10",
",",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"debug_video_path",
",",
"\"{}.avi\"",
".",
"format",
"(",
"i",
")",
")",
",",
"file_format",
"=",
"\"avi\"",
",",
")",
"for",
"i",
"in",
"range",
"(",
"num_debug_videos",
")",
"]",
"kwargs",
"[",
"\"eval_fn\"",
"]",
"=",
"make_eval_fn_with_agent",
"(",
"agent_type",
",",
"eval_mode",
",",
"planner_hparams",
",",
"model_dir",
",",
"log_every_steps",
"=",
"log_every_steps",
",",
"video_writers",
"=",
"video_writers",
",",
"random_starts_step_limit",
"=",
"random_starts_step_limit",
")",
"eval_metrics",
"=",
"rl_utils",
".",
"evaluate_all_configs",
"(",
"loop_hparams",
",",
"policy_dir",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"eval_metrics",
"=",
"evaluate_world_model",
"(",
"agent_type",
",",
"loop_hparams",
",",
"planner_hparams",
",",
"model_dir",
",",
"policy_dir",
",",
"random_starts_step_limit",
",",
"debug_video_path",
",",
"log_every_steps",
")",
"rl_utils",
".",
"summarize_metrics",
"(",
"eval_metrics_writer",
",",
"eval_metrics",
",",
"0",
")",
"for",
"video_writer",
"in",
"video_writers",
":",
"video_writer",
".",
"finish_to_disk",
"(",
")",
"# Report metrics",
"if",
"report_fn",
":",
"if",
"report_metric",
"==",
"\"mean_reward\"",
":",
"metric_name",
"=",
"rl_utils",
".",
"get_metric_name",
"(",
"sampling_temp",
"=",
"loop_hparams",
".",
"eval_sampling_temps",
"[",
"0",
"]",
",",
"max_num_noops",
"=",
"loop_hparams",
".",
"eval_max_num_noops",
",",
"clipped",
"=",
"False",
")",
"report_fn",
"(",
"eval_metrics",
"[",
"metric_name",
"]",
",",
"0",
")",
"else",
":",
"report_fn",
"(",
"eval_metrics",
"[",
"report_metric",
"]",
",",
"0",
")",
"return",
"eval_metrics"
] |
Evaluate.
|
[
"Evaluate",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L403-L461
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/evaluator.py
|
get_game_for_worker
|
def get_game_for_worker(map_name, directory_id):
"""Get game for the given worker (directory) id."""
if map_name == "v100unfriendly":
games = ["chopper_command", "boxing", "asterix", "seaquest"]
worker_per_game = 5
elif map_name == "human_nice":
games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE
worker_per_game = 5
else:
raise ValueError("Unknown worker to game map name: %s" % map_name)
games.sort()
game_id = (directory_id - 1) // worker_per_game
tf.logging.info("Getting game %d from %s." % (game_id, games))
return games[game_id]
|
python
|
def get_game_for_worker(map_name, directory_id):
"""Get game for the given worker (directory) id."""
if map_name == "v100unfriendly":
games = ["chopper_command", "boxing", "asterix", "seaquest"]
worker_per_game = 5
elif map_name == "human_nice":
games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE
worker_per_game = 5
else:
raise ValueError("Unknown worker to game map name: %s" % map_name)
games.sort()
game_id = (directory_id - 1) // worker_per_game
tf.logging.info("Getting game %d from %s." % (game_id, games))
return games[game_id]
|
[
"def",
"get_game_for_worker",
"(",
"map_name",
",",
"directory_id",
")",
":",
"if",
"map_name",
"==",
"\"v100unfriendly\"",
":",
"games",
"=",
"[",
"\"chopper_command\"",
",",
"\"boxing\"",
",",
"\"asterix\"",
",",
"\"seaquest\"",
"]",
"worker_per_game",
"=",
"5",
"elif",
"map_name",
"==",
"\"human_nice\"",
":",
"games",
"=",
"gym_env",
".",
"ATARI_GAMES_WITH_HUMAN_SCORE_NICE",
"worker_per_game",
"=",
"5",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown worker to game map name: %s\"",
"%",
"map_name",
")",
"games",
".",
"sort",
"(",
")",
"game_id",
"=",
"(",
"directory_id",
"-",
"1",
")",
"//",
"worker_per_game",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Getting game %d from %s.\"",
"%",
"(",
"game_id",
",",
"games",
")",
")",
"return",
"games",
"[",
"game_id",
"]"
] |
Get game for the given worker (directory) id.
|
[
"Get",
"game",
"for",
"the",
"given",
"worker",
"(",
"directory",
")",
"id",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L464-L477
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/envs/tic_tac_toe_env.py
|
get_open_spaces
|
def get_open_spaces(board):
"""Given a representation of the board, returns a list of open spaces."""
open_spaces = []
for i in range(3):
for j in range(3):
if board[i][j] == 0:
open_spaces.append(encode_pos(i, j))
return open_spaces
|
python
|
def get_open_spaces(board):
"""Given a representation of the board, returns a list of open spaces."""
open_spaces = []
for i in range(3):
for j in range(3):
if board[i][j] == 0:
open_spaces.append(encode_pos(i, j))
return open_spaces
|
[
"def",
"get_open_spaces",
"(",
"board",
")",
":",
"open_spaces",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"for",
"j",
"in",
"range",
"(",
"3",
")",
":",
"if",
"board",
"[",
"i",
"]",
"[",
"j",
"]",
"==",
"0",
":",
"open_spaces",
".",
"append",
"(",
"encode_pos",
"(",
"i",
",",
"j",
")",
")",
"return",
"open_spaces"
] |
Given a representation of the board, returns a list of open spaces.
|
[
"Given",
"a",
"representation",
"of",
"the",
"board",
"returns",
"a",
"list",
"of",
"open",
"spaces",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/tic_tac_toe_env.py#L46-L53
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/envs/tic_tac_toe_env.py
|
get_reward_and_done
|
def get_reward_and_done(board):
"""Given a representation of the board, returns reward and done."""
# Returns (reward, done) where:
# reward: -1 means lost, +1 means win, 0 means draw or continuing.
# done: True if the game is over, i.e. someone won or it is a draw.
# Sum all rows ...
all_sums = [np.sum(board[i, :]) for i in range(3)]
# ... all columns
all_sums.extend([np.sum(board[:, i]) for i in range(3)])
# and both diagonals.
all_sums.append(np.sum([board[i, i] for i in range(3)]))
all_sums.append(np.sum([board[i, 2 - i] for i in range(3)]))
if -3 in all_sums:
return -1, True
if 3 in all_sums:
return 1, True
done = True
if get_open_spaces(board):
done = False
return 0, done
|
python
|
def get_reward_and_done(board):
"""Given a representation of the board, returns reward and done."""
# Returns (reward, done) where:
# reward: -1 means lost, +1 means win, 0 means draw or continuing.
# done: True if the game is over, i.e. someone won or it is a draw.
# Sum all rows ...
all_sums = [np.sum(board[i, :]) for i in range(3)]
# ... all columns
all_sums.extend([np.sum(board[:, i]) for i in range(3)])
# and both diagonals.
all_sums.append(np.sum([board[i, i] for i in range(3)]))
all_sums.append(np.sum([board[i, 2 - i] for i in range(3)]))
if -3 in all_sums:
return -1, True
if 3 in all_sums:
return 1, True
done = True
if get_open_spaces(board):
done = False
return 0, done
|
[
"def",
"get_reward_and_done",
"(",
"board",
")",
":",
"# Returns (reward, done) where:",
"# reward: -1 means lost, +1 means win, 0 means draw or continuing.",
"# done: True if the game is over, i.e. someone won or it is a draw.",
"# Sum all rows ...",
"all_sums",
"=",
"[",
"np",
".",
"sum",
"(",
"board",
"[",
"i",
",",
":",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
"# ... all columns",
"all_sums",
".",
"extend",
"(",
"[",
"np",
".",
"sum",
"(",
"board",
"[",
":",
",",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
")",
"# and both diagonals.",
"all_sums",
".",
"append",
"(",
"np",
".",
"sum",
"(",
"[",
"board",
"[",
"i",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
")",
")",
"all_sums",
".",
"append",
"(",
"np",
".",
"sum",
"(",
"[",
"board",
"[",
"i",
",",
"2",
"-",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
")",
")",
"if",
"-",
"3",
"in",
"all_sums",
":",
"return",
"-",
"1",
",",
"True",
"if",
"3",
"in",
"all_sums",
":",
"return",
"1",
",",
"True",
"done",
"=",
"True",
"if",
"get_open_spaces",
"(",
"board",
")",
":",
"done",
"=",
"False",
"return",
"0",
",",
"done"
] |
Given a representation of the board, returns reward and done.
|
[
"Given",
"a",
"representation",
"of",
"the",
"board",
"returns",
"reward",
"and",
"done",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/tic_tac_toe_env.py#L56-L80
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
decode_hparams
|
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = hparam.HParams(
save_images=False,
log_results=True,
extra_length=100,
min_length_ratio=0.0,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
insertion_parallel=False,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1, # Number of examples to decode.
delimiter="\n",
decode_to_file="", # str. Prefix for filename to write decodings to.
decode_reference="", # str. Filename to read references from.
decode_in_memory=False,
# How much decode should wait for the next checkpoint
decode_timeout_mins=240,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
shard_google_format=False, # If True use Google shard naming format.
num_decodes=1, # Number of times to go over the dataset.
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# number of videos displayed = max_display_outputs * max_display_decodes
max_display_outputs=10,
max_display_decodes=5,
# Used in computation of VGG feature based video metrics.
# Set this to be the path to a trained VGG ckpt to output
# useful metrics.
vgg_ckpt_path="",
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp
|
python
|
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = hparam.HParams(
save_images=False,
log_results=True,
extra_length=100,
min_length_ratio=0.0,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
insertion_parallel=False,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1, # Number of examples to decode.
delimiter="\n",
decode_to_file="", # str. Prefix for filename to write decodings to.
decode_reference="", # str. Filename to read references from.
decode_in_memory=False,
# How much decode should wait for the next checkpoint
decode_timeout_mins=240,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
shard_google_format=False, # If True use Google shard naming format.
num_decodes=1, # Number of times to go over the dataset.
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# number of videos displayed = max_display_outputs * max_display_decodes
max_display_outputs=10,
max_display_decodes=5,
# Used in computation of VGG feature based video metrics.
# Set this to be the path to a trained VGG ckpt to output
# useful metrics.
vgg_ckpt_path="",
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp
|
[
"def",
"decode_hparams",
"(",
"overrides",
"=",
"\"\"",
")",
":",
"hp",
"=",
"hparam",
".",
"HParams",
"(",
"save_images",
"=",
"False",
",",
"log_results",
"=",
"True",
",",
"extra_length",
"=",
"100",
",",
"min_length_ratio",
"=",
"0.0",
",",
"batch_size",
"=",
"0",
",",
"beam_size",
"=",
"4",
",",
"alpha",
"=",
"0.6",
",",
"eos_penalty",
"=",
"0.0",
",",
"block_size",
"=",
"0",
",",
"guess_and_check_top_k",
"=",
"0",
",",
"guess_and_check_epsilon",
"=",
"-",
"1",
",",
"insertion_parallel",
"=",
"False",
",",
"return_beams",
"=",
"False",
",",
"write_beam_scores",
"=",
"False",
",",
"max_input_size",
"=",
"-",
"1",
",",
"identity_output",
"=",
"False",
",",
"num_samples",
"=",
"-",
"1",
",",
"# Number of examples to decode.",
"delimiter",
"=",
"\"\\n\"",
",",
"decode_to_file",
"=",
"\"\"",
",",
"# str. Prefix for filename to write decodings to.",
"decode_reference",
"=",
"\"\"",
",",
"# str. Filename to read references from.",
"decode_in_memory",
"=",
"False",
",",
"# How much decode should wait for the next checkpoint",
"decode_timeout_mins",
"=",
"240",
",",
"summaries_log_dir",
"=",
"\"decode\"",
",",
"# Directory to write hook summaries.",
"shards",
"=",
"1",
",",
"# How many shards of data to decode (treating 1 as None).",
"shard_id",
"=",
"0",
",",
"# Which shard are we decoding if more than 1 above.",
"shards_start_offset",
"=",
"0",
",",
"# Number of the first shard to decode.",
"shard_google_format",
"=",
"False",
",",
"# If True use Google shard naming format.",
"num_decodes",
"=",
"1",
",",
"# Number of times to go over the dataset.",
"force_decode_length",
"=",
"False",
",",
"display_decoded_images",
"=",
"False",
",",
"# Multi-problem decoding task id.",
"multiproblem_task_id",
"=",
"-",
"1",
",",
"# Used for video decoding.",
"frames_per_second",
"=",
"10",
",",
"skip_eos_postprocess",
"=",
"False",
",",
"# Creates a blue/red border covering border_percent of the frame.",
"border_percent",
"=",
"2",
",",
"# Maximum number of videos displayed.",
"# number of videos displayed = max_display_outputs * max_display_decodes",
"max_display_outputs",
"=",
"10",
",",
"max_display_decodes",
"=",
"5",
",",
"# Used in computation of VGG feature based video metrics.",
"# Set this to be the path to a trained VGG ckpt to output",
"# useful metrics.",
"vgg_ckpt_path",
"=",
"\"\"",
",",
"# Used for MLPerf compliance logging.",
"mlperf_decode_step",
"=",
"0.0",
",",
"mlperf_threshold",
"=",
"25.0",
",",
"mlperf_success",
"=",
"False",
")",
"hp",
".",
"parse",
"(",
"overrides",
")",
"return",
"hp"
] |
Hyperparameters for decoding.
|
[
"Hyperparameters",
"for",
"decoding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L47-L101
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
log_decode_results
|
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
output_dir=None,
identity_output=False,
log_results=True,
skip_eos_postprocess=False):
"""Log inference results."""
# TODO(lukaszkaiser) refactor this into feature_encoder
is_video = "video" in problem_name or "gym" in problem_name
if is_video:
def fix_and_save_video(vid, prefix):
save_path_template = os.path.join(
output_dir,
"%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx))
# this is only required for predictions
if vid.shape[-1] == 1:
vid = np.squeeze(vid, axis=-1)
save_video(vid, save_path_template)
tf.logging.info("Saving video: {}".format(prediction_idx))
fix_and_save_video(inputs, "inputs")
fix_and_save_video(outputs, "outputs")
fix_and_save_video(targets, "targets")
is_image = "image" in problem_name
is_text2class = isinstance(registry.problem(problem_name),
text_problems.Text2ClassProblem)
skip_eos_postprocess = is_image or is_text2class or skip_eos_postprocess
decoded_inputs = None
if is_image and save_images:
save_path = os.path.join(
output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs is not None and inputs_vocab:
if identity_output:
decoded_inputs = " ".join(map(str, inputs.flatten()))
else:
decoded_inputs = inputs_vocab.decode(_save_until_eos(
inputs, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
decoded_outputs = None
if identity_output:
decoded_outputs = " ".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = " ".join(map(str, targets.flatten()))
else:
decoded_outputs = targets_vocab.decode(_save_until_eos(
outputs, skip_eos_postprocess))
if targets is not None and log_results:
decoded_targets = targets_vocab.decode(_save_until_eos(
targets, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None and log_results and not is_video:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_inputs, decoded_outputs, decoded_targets
|
python
|
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
output_dir=None,
identity_output=False,
log_results=True,
skip_eos_postprocess=False):
"""Log inference results."""
# TODO(lukaszkaiser) refactor this into feature_encoder
is_video = "video" in problem_name or "gym" in problem_name
if is_video:
def fix_and_save_video(vid, prefix):
save_path_template = os.path.join(
output_dir,
"%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx))
# this is only required for predictions
if vid.shape[-1] == 1:
vid = np.squeeze(vid, axis=-1)
save_video(vid, save_path_template)
tf.logging.info("Saving video: {}".format(prediction_idx))
fix_and_save_video(inputs, "inputs")
fix_and_save_video(outputs, "outputs")
fix_and_save_video(targets, "targets")
is_image = "image" in problem_name
is_text2class = isinstance(registry.problem(problem_name),
text_problems.Text2ClassProblem)
skip_eos_postprocess = is_image or is_text2class or skip_eos_postprocess
decoded_inputs = None
if is_image and save_images:
save_path = os.path.join(
output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs is not None and inputs_vocab:
if identity_output:
decoded_inputs = " ".join(map(str, inputs.flatten()))
else:
decoded_inputs = inputs_vocab.decode(_save_until_eos(
inputs, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
decoded_outputs = None
if identity_output:
decoded_outputs = " ".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = " ".join(map(str, targets.flatten()))
else:
decoded_outputs = targets_vocab.decode(_save_until_eos(
outputs, skip_eos_postprocess))
if targets is not None and log_results:
decoded_targets = targets_vocab.decode(_save_until_eos(
targets, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None and log_results and not is_video:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_inputs, decoded_outputs, decoded_targets
|
[
"def",
"log_decode_results",
"(",
"inputs",
",",
"outputs",
",",
"problem_name",
",",
"prediction_idx",
",",
"inputs_vocab",
",",
"targets_vocab",
",",
"targets",
"=",
"None",
",",
"save_images",
"=",
"False",
",",
"output_dir",
"=",
"None",
",",
"identity_output",
"=",
"False",
",",
"log_results",
"=",
"True",
",",
"skip_eos_postprocess",
"=",
"False",
")",
":",
"# TODO(lukaszkaiser) refactor this into feature_encoder",
"is_video",
"=",
"\"video\"",
"in",
"problem_name",
"or",
"\"gym\"",
"in",
"problem_name",
"if",
"is_video",
":",
"def",
"fix_and_save_video",
"(",
"vid",
",",
"prefix",
")",
":",
"save_path_template",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"%s_%s_%05d_{:05d}.png\"",
"%",
"(",
"problem_name",
",",
"prefix",
",",
"prediction_idx",
")",
")",
"# this is only required for predictions",
"if",
"vid",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"1",
":",
"vid",
"=",
"np",
".",
"squeeze",
"(",
"vid",
",",
"axis",
"=",
"-",
"1",
")",
"save_video",
"(",
"vid",
",",
"save_path_template",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Saving video: {}\"",
".",
"format",
"(",
"prediction_idx",
")",
")",
"fix_and_save_video",
"(",
"inputs",
",",
"\"inputs\"",
")",
"fix_and_save_video",
"(",
"outputs",
",",
"\"outputs\"",
")",
"fix_and_save_video",
"(",
"targets",
",",
"\"targets\"",
")",
"is_image",
"=",
"\"image\"",
"in",
"problem_name",
"is_text2class",
"=",
"isinstance",
"(",
"registry",
".",
"problem",
"(",
"problem_name",
")",
",",
"text_problems",
".",
"Text2ClassProblem",
")",
"skip_eos_postprocess",
"=",
"is_image",
"or",
"is_text2class",
"or",
"skip_eos_postprocess",
"decoded_inputs",
"=",
"None",
"if",
"is_image",
"and",
"save_images",
":",
"save_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"%s_prediction_%d.jpg\"",
"%",
"(",
"problem_name",
",",
"prediction_idx",
")",
")",
"show_and_save_image",
"(",
"inputs",
"/",
"255.",
",",
"save_path",
")",
"elif",
"inputs",
"is",
"not",
"None",
"and",
"inputs_vocab",
":",
"if",
"identity_output",
":",
"decoded_inputs",
"=",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"inputs",
".",
"flatten",
"(",
")",
")",
")",
"else",
":",
"decoded_inputs",
"=",
"inputs_vocab",
".",
"decode",
"(",
"_save_until_eos",
"(",
"inputs",
",",
"skip_eos_postprocess",
")",
")",
"if",
"log_results",
"and",
"not",
"is_video",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Inference results INPUT: %s\"",
"%",
"decoded_inputs",
")",
"decoded_targets",
"=",
"None",
"decoded_outputs",
"=",
"None",
"if",
"identity_output",
":",
"decoded_outputs",
"=",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"outputs",
".",
"flatten",
"(",
")",
")",
")",
"if",
"targets",
"is",
"not",
"None",
":",
"decoded_targets",
"=",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"targets",
".",
"flatten",
"(",
")",
")",
")",
"else",
":",
"decoded_outputs",
"=",
"targets_vocab",
".",
"decode",
"(",
"_save_until_eos",
"(",
"outputs",
",",
"skip_eos_postprocess",
")",
")",
"if",
"targets",
"is",
"not",
"None",
"and",
"log_results",
":",
"decoded_targets",
"=",
"targets_vocab",
".",
"decode",
"(",
"_save_until_eos",
"(",
"targets",
",",
"skip_eos_postprocess",
")",
")",
"if",
"log_results",
"and",
"not",
"is_video",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Inference results OUTPUT: %s\"",
"%",
"decoded_outputs",
")",
"if",
"targets",
"is",
"not",
"None",
"and",
"log_results",
"and",
"not",
"is_video",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Inference results TARGET: %s\"",
"%",
"decoded_targets",
")",
"return",
"decoded_inputs",
",",
"decoded_outputs",
",",
"decoded_targets"
] |
Log inference results.
|
[
"Log",
"inference",
"results",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L104-L170
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
decode_from_dataset
|
def decode_from_dataset(estimator,
problem_name,
hparams,
decode_hp,
decode_to_file=None,
dataset_split=None,
checkpoint_path=None):
"""Perform decoding from dataset."""
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_name))
# We assume that worker_id corresponds to shard number.
shard = decode_hp.shard_id if decode_hp.shards > 1 else None
# Setup output directory for any artifacts that may be written out.
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
# If decode_hp.batch_size is specified, use a fixed batch size
if decode_hp.batch_size:
hparams.batch_size = decode_hp.batch_size
hparams.use_fixed_batch_size = True
dataset_kwargs = {
"shard": shard,
"dataset_split": dataset_split,
"max_records": decode_hp.num_samples
}
# Build the inference input function
problem = hparams.problem
infer_input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)
predictions, output_dirs = [], []
for decode_id in range(decode_hp.num_decodes):
tf.logging.info("Decoding {}".format(decode_id))
# Create decode directory if not in-memory decoding.
if not decode_hp.decode_in_memory:
output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id)
tf.gfile.MakeDirs(output_dir)
output_dirs.append(output_dir)
result = decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=decode_hp.log_results,
checkpoint_path=checkpoint_path)
if decode_hp.decode_in_memory:
output_dirs = [output_dir]
predictions.append(result)
if decode_hp.decode_to_file:
decode_hp.decode_to_file = _decode_filename(
decode_hp.decode_to_file, problem_name, decode_hp)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=problem,
output_dirs=output_dirs,
hparams=hparams,
decode_hparams=decode_hp,
predictions=predictions
), dataset_split)
return predictions
|
python
|
def decode_from_dataset(estimator,
problem_name,
hparams,
decode_hp,
decode_to_file=None,
dataset_split=None,
checkpoint_path=None):
"""Perform decoding from dataset."""
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_name))
# We assume that worker_id corresponds to shard number.
shard = decode_hp.shard_id if decode_hp.shards > 1 else None
# Setup output directory for any artifacts that may be written out.
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
# If decode_hp.batch_size is specified, use a fixed batch size
if decode_hp.batch_size:
hparams.batch_size = decode_hp.batch_size
hparams.use_fixed_batch_size = True
dataset_kwargs = {
"shard": shard,
"dataset_split": dataset_split,
"max_records": decode_hp.num_samples
}
# Build the inference input function
problem = hparams.problem
infer_input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)
predictions, output_dirs = [], []
for decode_id in range(decode_hp.num_decodes):
tf.logging.info("Decoding {}".format(decode_id))
# Create decode directory if not in-memory decoding.
if not decode_hp.decode_in_memory:
output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id)
tf.gfile.MakeDirs(output_dir)
output_dirs.append(output_dir)
result = decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=decode_hp.log_results,
checkpoint_path=checkpoint_path)
if decode_hp.decode_in_memory:
output_dirs = [output_dir]
predictions.append(result)
if decode_hp.decode_to_file:
decode_hp.decode_to_file = _decode_filename(
decode_hp.decode_to_file, problem_name, decode_hp)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=problem,
output_dirs=output_dirs,
hparams=hparams,
decode_hparams=decode_hp,
predictions=predictions
), dataset_split)
return predictions
|
[
"def",
"decode_from_dataset",
"(",
"estimator",
",",
"problem_name",
",",
"hparams",
",",
"decode_hp",
",",
"decode_to_file",
"=",
"None",
",",
"dataset_split",
"=",
"None",
",",
"checkpoint_path",
"=",
"None",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Performing local inference from dataset for %s.\"",
",",
"str",
"(",
"problem_name",
")",
")",
"# We assume that worker_id corresponds to shard number.",
"shard",
"=",
"decode_hp",
".",
"shard_id",
"if",
"decode_hp",
".",
"shards",
">",
"1",
"else",
"None",
"# Setup output directory for any artifacts that may be written out.",
"output_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"estimator",
".",
"model_dir",
",",
"\"decode\"",
")",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"output_dir",
")",
"# If decode_hp.batch_size is specified, use a fixed batch size",
"if",
"decode_hp",
".",
"batch_size",
":",
"hparams",
".",
"batch_size",
"=",
"decode_hp",
".",
"batch_size",
"hparams",
".",
"use_fixed_batch_size",
"=",
"True",
"dataset_kwargs",
"=",
"{",
"\"shard\"",
":",
"shard",
",",
"\"dataset_split\"",
":",
"dataset_split",
",",
"\"max_records\"",
":",
"decode_hp",
".",
"num_samples",
"}",
"# Build the inference input function",
"problem",
"=",
"hparams",
".",
"problem",
"infer_input_fn",
"=",
"problem",
".",
"make_estimator_input_fn",
"(",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"PREDICT",
",",
"hparams",
",",
"dataset_kwargs",
"=",
"dataset_kwargs",
")",
"predictions",
",",
"output_dirs",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"decode_id",
"in",
"range",
"(",
"decode_hp",
".",
"num_decodes",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Decoding {}\"",
".",
"format",
"(",
"decode_id",
")",
")",
"# Create decode directory if not in-memory decoding.",
"if",
"not",
"decode_hp",
".",
"decode_in_memory",
":",
"output_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"estimator",
".",
"model_dir",
",",
"\"decode_%05d\"",
"%",
"decode_id",
")",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"output_dir",
")",
"output_dirs",
".",
"append",
"(",
"output_dir",
")",
"result",
"=",
"decode_once",
"(",
"estimator",
",",
"problem_name",
",",
"hparams",
",",
"infer_input_fn",
",",
"decode_hp",
",",
"decode_to_file",
",",
"output_dir",
",",
"log_results",
"=",
"decode_hp",
".",
"log_results",
",",
"checkpoint_path",
"=",
"checkpoint_path",
")",
"if",
"decode_hp",
".",
"decode_in_memory",
":",
"output_dirs",
"=",
"[",
"output_dir",
"]",
"predictions",
".",
"append",
"(",
"result",
")",
"if",
"decode_hp",
".",
"decode_to_file",
":",
"decode_hp",
".",
"decode_to_file",
"=",
"_decode_filename",
"(",
"decode_hp",
".",
"decode_to_file",
",",
"problem_name",
",",
"decode_hp",
")",
"run_postdecode_hooks",
"(",
"DecodeHookArgs",
"(",
"estimator",
"=",
"estimator",
",",
"problem",
"=",
"problem",
",",
"output_dirs",
"=",
"output_dirs",
",",
"hparams",
"=",
"hparams",
",",
"decode_hparams",
"=",
"decode_hp",
",",
"predictions",
"=",
"predictions",
")",
",",
"dataset_split",
")",
"return",
"predictions"
] |
Perform decoding from dataset.
|
[
"Perform",
"decoding",
"from",
"dataset",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L173-L242
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
decode_once
|
def decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=True,
checkpoint_path=None):
"""Decodes once.
Args:
estimator: tf.estimator.Estimator instance. Used to generate encoded
predictions.
problem_name: str. Name of problem.
hparams: HParams instance. HParams for model training.
infer_input_fn: zero-arg function. Input function for estimator.
decode_hp: HParams instance. See decode_hparams() above.
decode_to_file: str. Prefix for filenames. Used to generated filenames to
which decoded predictions are written.
output_dir: str. Output directory. Only used for writing images.
log_results: bool. If False, return encoded predictions without any
further processing.
checkpoint_path: str. Path to load model checkpoint from. If unspecified,
Estimator's default is used.
Returns:
If decode_hp.decode_in_memory is True:
List of dicts, one per example. Values are either numpy arrays or decoded
strings.
If decode_hp.decode_in_memory is False:
An empty list.
"""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn,
checkpoint_path=checkpoint_path)
if not log_results:
return list(predictions)
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
parts[-1] = "inputs"
input_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
input_file = tf.gfile.Open(input_filepath, "w")
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = "inputs" in problem_hparams.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary["targets"]
num_eval_samples = 0
# all_outputs[i][j] = (input: str, output: str, target: str). Input,
# decoded output, and target strings for example i, beam rank j.
all_outputs = []
for num_predictions, prediction in enumerate(predictions):
num_eval_samples += 1
num_predictions += 1
inputs = prediction.get("inputs")
targets = prediction.get("targets")
outputs = prediction.get("outputs")
# Log predictions
decoded_outputs = [] # [(str, str, str)]. See all_outputs above.
if decode_hp.decode_in_memory:
all_outputs.append(decoded_outputs)
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if "scores" in prediction:
scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
score = scores and scores[i]
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):
# Skip if all padding
if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input):
continue
beam_score_str = ""
if decode_hp.write_beam_scores:
beam_score_str = "\t%.2f" % decoded_scores[i]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE,
value=num_eval_samples,
hparams=hparams)
if decode_to_file:
output_file.close()
target_file.close()
input_file.close()
return all_outputs
|
python
|
def decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=True,
checkpoint_path=None):
"""Decodes once.
Args:
estimator: tf.estimator.Estimator instance. Used to generate encoded
predictions.
problem_name: str. Name of problem.
hparams: HParams instance. HParams for model training.
infer_input_fn: zero-arg function. Input function for estimator.
decode_hp: HParams instance. See decode_hparams() above.
decode_to_file: str. Prefix for filenames. Used to generated filenames to
which decoded predictions are written.
output_dir: str. Output directory. Only used for writing images.
log_results: bool. If False, return encoded predictions without any
further processing.
checkpoint_path: str. Path to load model checkpoint from. If unspecified,
Estimator's default is used.
Returns:
If decode_hp.decode_in_memory is True:
List of dicts, one per example. Values are either numpy arrays or decoded
strings.
If decode_hp.decode_in_memory is False:
An empty list.
"""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn,
checkpoint_path=checkpoint_path)
if not log_results:
return list(predictions)
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
parts[-1] = "inputs"
input_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
input_file = tf.gfile.Open(input_filepath, "w")
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = "inputs" in problem_hparams.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary["targets"]
num_eval_samples = 0
# all_outputs[i][j] = (input: str, output: str, target: str). Input,
# decoded output, and target strings for example i, beam rank j.
all_outputs = []
for num_predictions, prediction in enumerate(predictions):
num_eval_samples += 1
num_predictions += 1
inputs = prediction.get("inputs")
targets = prediction.get("targets")
outputs = prediction.get("outputs")
# Log predictions
decoded_outputs = [] # [(str, str, str)]. See all_outputs above.
if decode_hp.decode_in_memory:
all_outputs.append(decoded_outputs)
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if "scores" in prediction:
scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
score = scores and scores[i]
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):
# Skip if all padding
if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input):
continue
beam_score_str = ""
if decode_hp.write_beam_scores:
beam_score_str = "\t%.2f" % decoded_scores[i]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE,
value=num_eval_samples,
hparams=hparams)
if decode_to_file:
output_file.close()
target_file.close()
input_file.close()
return all_outputs
|
[
"def",
"decode_once",
"(",
"estimator",
",",
"problem_name",
",",
"hparams",
",",
"infer_input_fn",
",",
"decode_hp",
",",
"decode_to_file",
",",
"output_dir",
",",
"log_results",
"=",
"True",
",",
"checkpoint_path",
"=",
"None",
")",
":",
"# Get the predictions as an iterable",
"predictions",
"=",
"estimator",
".",
"predict",
"(",
"infer_input_fn",
",",
"checkpoint_path",
"=",
"checkpoint_path",
")",
"if",
"not",
"log_results",
":",
"return",
"list",
"(",
"predictions",
")",
"# Prepare output file writers if decode_to_file passed",
"decode_to_file",
"=",
"decode_to_file",
"or",
"decode_hp",
".",
"decode_to_file",
"if",
"decode_to_file",
":",
"output_filepath",
"=",
"_decode_filename",
"(",
"decode_to_file",
",",
"problem_name",
",",
"decode_hp",
")",
"parts",
"=",
"output_filepath",
".",
"split",
"(",
"\".\"",
")",
"parts",
"[",
"-",
"1",
"]",
"=",
"\"targets\"",
"target_filepath",
"=",
"\".\"",
".",
"join",
"(",
"parts",
")",
"parts",
"[",
"-",
"1",
"]",
"=",
"\"inputs\"",
"input_filepath",
"=",
"\".\"",
".",
"join",
"(",
"parts",
")",
"output_file",
"=",
"tf",
".",
"gfile",
".",
"Open",
"(",
"output_filepath",
",",
"\"w\"",
")",
"target_file",
"=",
"tf",
".",
"gfile",
".",
"Open",
"(",
"target_filepath",
",",
"\"w\"",
")",
"input_file",
"=",
"tf",
".",
"gfile",
".",
"Open",
"(",
"input_filepath",
",",
"\"w\"",
")",
"problem_hparams",
"=",
"hparams",
".",
"problem_hparams",
"# Inputs vocabulary is set to targets if there are no inputs in the problem,",
"# e.g., for language models where the inputs are just a prefix of targets.",
"has_input",
"=",
"\"inputs\"",
"in",
"problem_hparams",
".",
"vocabulary",
"inputs_vocab_key",
"=",
"\"inputs\"",
"if",
"has_input",
"else",
"\"targets\"",
"inputs_vocab",
"=",
"problem_hparams",
".",
"vocabulary",
"[",
"inputs_vocab_key",
"]",
"targets_vocab",
"=",
"problem_hparams",
".",
"vocabulary",
"[",
"\"targets\"",
"]",
"num_eval_samples",
"=",
"0",
"# all_outputs[i][j] = (input: str, output: str, target: str). Input,",
"# decoded output, and target strings for example i, beam rank j.",
"all_outputs",
"=",
"[",
"]",
"for",
"num_predictions",
",",
"prediction",
"in",
"enumerate",
"(",
"predictions",
")",
":",
"num_eval_samples",
"+=",
"1",
"num_predictions",
"+=",
"1",
"inputs",
"=",
"prediction",
".",
"get",
"(",
"\"inputs\"",
")",
"targets",
"=",
"prediction",
".",
"get",
"(",
"\"targets\"",
")",
"outputs",
"=",
"prediction",
".",
"get",
"(",
"\"outputs\"",
")",
"# Log predictions",
"decoded_outputs",
"=",
"[",
"]",
"# [(str, str, str)]. See all_outputs above.",
"if",
"decode_hp",
".",
"decode_in_memory",
":",
"all_outputs",
".",
"append",
"(",
"decoded_outputs",
")",
"decoded_scores",
"=",
"[",
"]",
"if",
"decode_hp",
".",
"return_beams",
":",
"output_beams",
"=",
"np",
".",
"split",
"(",
"outputs",
",",
"decode_hp",
".",
"beam_size",
",",
"axis",
"=",
"0",
")",
"scores",
"=",
"None",
"if",
"\"scores\"",
"in",
"prediction",
":",
"scores",
"=",
"np",
".",
"split",
"(",
"prediction",
"[",
"\"scores\"",
"]",
",",
"decode_hp",
".",
"beam_size",
",",
"axis",
"=",
"0",
")",
"for",
"i",
",",
"beam",
"in",
"enumerate",
"(",
"output_beams",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"BEAM %d:\"",
"%",
"i",
")",
"score",
"=",
"scores",
"and",
"scores",
"[",
"i",
"]",
"decoded",
"=",
"log_decode_results",
"(",
"inputs",
",",
"beam",
",",
"problem_name",
",",
"num_predictions",
",",
"inputs_vocab",
",",
"targets_vocab",
",",
"save_images",
"=",
"decode_hp",
".",
"save_images",
",",
"output_dir",
"=",
"output_dir",
",",
"identity_output",
"=",
"decode_hp",
".",
"identity_output",
",",
"targets",
"=",
"targets",
",",
"log_results",
"=",
"log_results",
")",
"decoded_outputs",
".",
"append",
"(",
"decoded",
")",
"if",
"decode_hp",
".",
"write_beam_scores",
":",
"decoded_scores",
".",
"append",
"(",
"score",
")",
"else",
":",
"decoded",
"=",
"log_decode_results",
"(",
"inputs",
",",
"outputs",
",",
"problem_name",
",",
"num_predictions",
",",
"inputs_vocab",
",",
"targets_vocab",
",",
"save_images",
"=",
"decode_hp",
".",
"save_images",
",",
"output_dir",
"=",
"output_dir",
",",
"identity_output",
"=",
"decode_hp",
".",
"identity_output",
",",
"targets",
"=",
"targets",
",",
"log_results",
"=",
"log_results",
",",
"skip_eos_postprocess",
"=",
"decode_hp",
".",
"skip_eos_postprocess",
")",
"decoded_outputs",
".",
"append",
"(",
"decoded",
")",
"# Write out predictions if decode_to_file passed",
"if",
"decode_to_file",
":",
"for",
"i",
",",
"(",
"d_input",
",",
"d_output",
",",
"d_target",
")",
"in",
"enumerate",
"(",
"decoded_outputs",
")",
":",
"# Skip if all padding",
"if",
"d_input",
"and",
"re",
".",
"match",
"(",
"\"^({})+$\"",
".",
"format",
"(",
"text_encoder",
".",
"PAD",
")",
",",
"d_input",
")",
":",
"continue",
"beam_score_str",
"=",
"\"\"",
"if",
"decode_hp",
".",
"write_beam_scores",
":",
"beam_score_str",
"=",
"\"\\t%.2f\"",
"%",
"decoded_scores",
"[",
"i",
"]",
"output_file",
".",
"write",
"(",
"str",
"(",
"d_output",
")",
"+",
"beam_score_str",
"+",
"decode_hp",
".",
"delimiter",
")",
"target_file",
".",
"write",
"(",
"str",
"(",
"d_target",
")",
"+",
"decode_hp",
".",
"delimiter",
")",
"input_file",
".",
"write",
"(",
"str",
"(",
"d_input",
")",
"+",
"decode_hp",
".",
"delimiter",
")",
"if",
"(",
"decode_hp",
".",
"num_samples",
">=",
"0",
"and",
"num_predictions",
">=",
"decode_hp",
".",
"num_samples",
")",
":",
"break",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"EVAL_SIZE",
",",
"value",
"=",
"num_eval_samples",
",",
"hparams",
"=",
"hparams",
")",
"if",
"decode_to_file",
":",
"output_file",
".",
"close",
"(",
")",
"target_file",
".",
"close",
"(",
")",
"input_file",
".",
"close",
"(",
")",
"return",
"all_outputs"
] |
Decodes once.
Args:
estimator: tf.estimator.Estimator instance. Used to generate encoded
predictions.
problem_name: str. Name of problem.
hparams: HParams instance. HParams for model training.
infer_input_fn: zero-arg function. Input function for estimator.
decode_hp: HParams instance. See decode_hparams() above.
decode_to_file: str. Prefix for filenames. Used to generated filenames to
which decoded predictions are written.
output_dir: str. Output directory. Only used for writing images.
log_results: bool. If False, return encoded predictions without any
further processing.
checkpoint_path: str. Path to load model checkpoint from. If unspecified,
Estimator's default is used.
Returns:
If decode_hp.decode_in_memory is True:
List of dicts, one per example. Values are either numpy arrays or decoded
strings.
If decode_hp.decode_in_memory is False:
An empty list.
|
[
"Decodes",
"once",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L245-L391
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
decode_from_file
|
def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
filename = _add_shard_to_filename(filename, decode_hp)
tf.logging.info("Performing decoding from file (%s)." % filename)
if has_input:
sorted_inputs, sorted_keys = _get_sorted_inputs(
filename, decode_hp.delimiter)
else:
sorted_inputs = _get_language_modeling_inputs(
filename, decode_hp.delimiter, repeat=decode_hp.num_decodes)
sorted_keys = range(len(sorted_inputs))
num_sentences = len(sorted_inputs)
num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1
if estimator.config.use_tpu:
length = getattr(hparams, "length", 0) or hparams.max_length
batch_ids = []
for line in sorted_inputs:
if has_input:
ids = inputs_vocab.encode(line.strip()) + [1]
else:
ids = targets_vocab.encode(line)
if len(ids) < length:
ids.extend([0] * (length - len(ids)))
else:
ids = ids[:length]
batch_ids.append(ids)
np_ids = np.array(batch_ids, dtype=np.int32)
def input_fn(params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids})
dataset = dataset.map(
lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))})
dataset = dataset.batch(batch_size)
return dataset
else:
def input_fn():
input_gen = _decode_batch_input_fn(
num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size,
task_id=decode_hp.multiproblem_task_id, has_input=has_input)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
duration = time.time() - start_time
tf.logging.info("Elapsed Time: %5.5f" % duration)
tf.logging.info("Averaged Single Token Generation Time: %5.7f "
"(time %5.7f count %d)" %
(total_time_per_step / total_cnt,
total_time_per_step, total_cnt))
if decode_hp.batch_size == 1:
tf.logging.info("Inference time %.4f seconds "
"(Latency = %.4f ms/setences)" %
(duration, 1000.0*duration/num_sentences))
else:
tf.logging.info("Inference time %.4f seconds "
"(Throughput = %.4f sentences/second)" %
(duration, num_sentences/duration))
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
else:
decode_filename = _add_shard_to_filename(decode_filename, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
outfile.flush()
outfile.close()
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=hparams.problem,
output_dirs=[output_dir],
hparams=hparams,
decode_hparams=decode_hp,
predictions=list(result_iter)
), None)
|
python
|
def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
filename = _add_shard_to_filename(filename, decode_hp)
tf.logging.info("Performing decoding from file (%s)." % filename)
if has_input:
sorted_inputs, sorted_keys = _get_sorted_inputs(
filename, decode_hp.delimiter)
else:
sorted_inputs = _get_language_modeling_inputs(
filename, decode_hp.delimiter, repeat=decode_hp.num_decodes)
sorted_keys = range(len(sorted_inputs))
num_sentences = len(sorted_inputs)
num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1
if estimator.config.use_tpu:
length = getattr(hparams, "length", 0) or hparams.max_length
batch_ids = []
for line in sorted_inputs:
if has_input:
ids = inputs_vocab.encode(line.strip()) + [1]
else:
ids = targets_vocab.encode(line)
if len(ids) < length:
ids.extend([0] * (length - len(ids)))
else:
ids = ids[:length]
batch_ids.append(ids)
np_ids = np.array(batch_ids, dtype=np.int32)
def input_fn(params):
batch_size = params["batch_size"]
dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids})
dataset = dataset.map(
lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))})
dataset = dataset.batch(batch_size)
return dataset
else:
def input_fn():
input_gen = _decode_batch_input_fn(
num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size,
task_id=decode_hp.multiproblem_task_id, has_input=has_input)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results,
skip_eos_postprocess=decode_hp.skip_eos_postprocess)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
duration = time.time() - start_time
tf.logging.info("Elapsed Time: %5.5f" % duration)
tf.logging.info("Averaged Single Token Generation Time: %5.7f "
"(time %5.7f count %d)" %
(total_time_per_step / total_cnt,
total_time_per_step, total_cnt))
if decode_hp.batch_size == 1:
tf.logging.info("Inference time %.4f seconds "
"(Latency = %.4f ms/setences)" %
(duration, 1000.0*duration/num_sentences))
else:
tf.logging.info("Inference time %.4f seconds "
"(Throughput = %.4f sentences/second)" %
(duration, num_sentences/duration))
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
else:
decode_filename = _add_shard_to_filename(decode_filename, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
outfile.flush()
outfile.close()
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=hparams.problem,
output_dirs=[output_dir],
hparams=hparams,
decode_hparams=decode_hp,
predictions=list(result_iter)
), None)
|
[
"def",
"decode_from_file",
"(",
"estimator",
",",
"filename",
",",
"hparams",
",",
"decode_hp",
",",
"decode_to_file",
"=",
"None",
",",
"checkpoint_path",
"=",
"None",
")",
":",
"if",
"not",
"decode_hp",
".",
"batch_size",
":",
"decode_hp",
".",
"batch_size",
"=",
"32",
"tf",
".",
"logging",
".",
"info",
"(",
"\"decode_hp.batch_size not specified; default=%d\"",
"%",
"decode_hp",
".",
"batch_size",
")",
"# Inputs vocabulary is set to targets if there are no inputs in the problem,",
"# e.g., for language models where the inputs are just a prefix of targets.",
"p_hp",
"=",
"hparams",
".",
"problem_hparams",
"has_input",
"=",
"\"inputs\"",
"in",
"p_hp",
".",
"vocabulary",
"inputs_vocab_key",
"=",
"\"inputs\"",
"if",
"has_input",
"else",
"\"targets\"",
"inputs_vocab",
"=",
"p_hp",
".",
"vocabulary",
"[",
"inputs_vocab_key",
"]",
"targets_vocab",
"=",
"p_hp",
".",
"vocabulary",
"[",
"\"targets\"",
"]",
"problem_name",
"=",
"FLAGS",
".",
"problem",
"filename",
"=",
"_add_shard_to_filename",
"(",
"filename",
",",
"decode_hp",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Performing decoding from file (%s).\"",
"%",
"filename",
")",
"if",
"has_input",
":",
"sorted_inputs",
",",
"sorted_keys",
"=",
"_get_sorted_inputs",
"(",
"filename",
",",
"decode_hp",
".",
"delimiter",
")",
"else",
":",
"sorted_inputs",
"=",
"_get_language_modeling_inputs",
"(",
"filename",
",",
"decode_hp",
".",
"delimiter",
",",
"repeat",
"=",
"decode_hp",
".",
"num_decodes",
")",
"sorted_keys",
"=",
"range",
"(",
"len",
"(",
"sorted_inputs",
")",
")",
"num_sentences",
"=",
"len",
"(",
"sorted_inputs",
")",
"num_decode_batches",
"=",
"(",
"num_sentences",
"-",
"1",
")",
"//",
"decode_hp",
".",
"batch_size",
"+",
"1",
"if",
"estimator",
".",
"config",
".",
"use_tpu",
":",
"length",
"=",
"getattr",
"(",
"hparams",
",",
"\"length\"",
",",
"0",
")",
"or",
"hparams",
".",
"max_length",
"batch_ids",
"=",
"[",
"]",
"for",
"line",
"in",
"sorted_inputs",
":",
"if",
"has_input",
":",
"ids",
"=",
"inputs_vocab",
".",
"encode",
"(",
"line",
".",
"strip",
"(",
")",
")",
"+",
"[",
"1",
"]",
"else",
":",
"ids",
"=",
"targets_vocab",
".",
"encode",
"(",
"line",
")",
"if",
"len",
"(",
"ids",
")",
"<",
"length",
":",
"ids",
".",
"extend",
"(",
"[",
"0",
"]",
"*",
"(",
"length",
"-",
"len",
"(",
"ids",
")",
")",
")",
"else",
":",
"ids",
"=",
"ids",
"[",
":",
"length",
"]",
"batch_ids",
".",
"append",
"(",
"ids",
")",
"np_ids",
"=",
"np",
".",
"array",
"(",
"batch_ids",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"def",
"input_fn",
"(",
"params",
")",
":",
"batch_size",
"=",
"params",
"[",
"\"batch_size\"",
"]",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"{",
"\"inputs\"",
":",
"np_ids",
"}",
")",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"lambda",
"ex",
":",
"{",
"\"inputs\"",
":",
"tf",
".",
"reshape",
"(",
"ex",
"[",
"\"inputs\"",
"]",
",",
"(",
"length",
",",
"1",
",",
"1",
")",
")",
"}",
")",
"dataset",
"=",
"dataset",
".",
"batch",
"(",
"batch_size",
")",
"return",
"dataset",
"else",
":",
"def",
"input_fn",
"(",
")",
":",
"input_gen",
"=",
"_decode_batch_input_fn",
"(",
"num_decode_batches",
",",
"sorted_inputs",
",",
"inputs_vocab",
",",
"decode_hp",
".",
"batch_size",
",",
"decode_hp",
".",
"max_input_size",
",",
"task_id",
"=",
"decode_hp",
".",
"multiproblem_task_id",
",",
"has_input",
"=",
"has_input",
")",
"gen_fn",
"=",
"make_input_fn_from_generator",
"(",
"input_gen",
")",
"example",
"=",
"gen_fn",
"(",
")",
"return",
"_decode_input_tensor_to_features_dict",
"(",
"example",
",",
"hparams",
")",
"decodes",
"=",
"[",
"]",
"result_iter",
"=",
"estimator",
".",
"predict",
"(",
"input_fn",
",",
"checkpoint_path",
"=",
"checkpoint_path",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"total_time_per_step",
"=",
"0",
"total_cnt",
"=",
"0",
"def",
"timer",
"(",
"gen",
")",
":",
"while",
"True",
":",
"try",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"item",
"=",
"next",
"(",
"gen",
")",
"elapsed_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"yield",
"elapsed_time",
",",
"item",
"except",
"StopIteration",
":",
"break",
"for",
"elapsed_time",
",",
"result",
"in",
"timer",
"(",
"result_iter",
")",
":",
"if",
"decode_hp",
".",
"return_beams",
":",
"beam_decodes",
"=",
"[",
"]",
"beam_scores",
"=",
"[",
"]",
"output_beams",
"=",
"np",
".",
"split",
"(",
"result",
"[",
"\"outputs\"",
"]",
",",
"decode_hp",
".",
"beam_size",
",",
"axis",
"=",
"0",
")",
"scores",
"=",
"None",
"if",
"\"scores\"",
"in",
"result",
":",
"if",
"np",
".",
"isscalar",
"(",
"result",
"[",
"\"scores\"",
"]",
")",
":",
"result",
"[",
"\"scores\"",
"]",
"=",
"result",
"[",
"\"scores\"",
"]",
".",
"reshape",
"(",
"1",
")",
"scores",
"=",
"np",
".",
"split",
"(",
"result",
"[",
"\"scores\"",
"]",
",",
"decode_hp",
".",
"beam_size",
",",
"axis",
"=",
"0",
")",
"for",
"k",
",",
"beam",
"in",
"enumerate",
"(",
"output_beams",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"BEAM %d:\"",
"%",
"k",
")",
"score",
"=",
"scores",
"and",
"scores",
"[",
"k",
"]",
"_",
",",
"decoded_outputs",
",",
"_",
"=",
"log_decode_results",
"(",
"result",
"[",
"\"inputs\"",
"]",
",",
"beam",
",",
"problem_name",
",",
"None",
",",
"inputs_vocab",
",",
"targets_vocab",
",",
"log_results",
"=",
"decode_hp",
".",
"log_results",
",",
"skip_eos_postprocess",
"=",
"decode_hp",
".",
"skip_eos_postprocess",
")",
"beam_decodes",
".",
"append",
"(",
"decoded_outputs",
")",
"if",
"decode_hp",
".",
"write_beam_scores",
":",
"beam_scores",
".",
"append",
"(",
"score",
")",
"if",
"decode_hp",
".",
"write_beam_scores",
":",
"decodes",
".",
"append",
"(",
"\"\\t\"",
".",
"join",
"(",
"[",
"\"\\t\"",
".",
"join",
"(",
"[",
"d",
",",
"\"%.2f\"",
"%",
"s",
"]",
")",
"for",
"d",
",",
"s",
"in",
"zip",
"(",
"beam_decodes",
",",
"beam_scores",
")",
"]",
")",
")",
"else",
":",
"decodes",
".",
"append",
"(",
"\"\\t\"",
".",
"join",
"(",
"beam_decodes",
")",
")",
"else",
":",
"_",
",",
"decoded_outputs",
",",
"_",
"=",
"log_decode_results",
"(",
"result",
"[",
"\"inputs\"",
"]",
",",
"result",
"[",
"\"outputs\"",
"]",
",",
"problem_name",
",",
"None",
",",
"inputs_vocab",
",",
"targets_vocab",
",",
"log_results",
"=",
"decode_hp",
".",
"log_results",
",",
"skip_eos_postprocess",
"=",
"decode_hp",
".",
"skip_eos_postprocess",
")",
"decodes",
".",
"append",
"(",
"decoded_outputs",
")",
"total_time_per_step",
"+=",
"elapsed_time",
"total_cnt",
"+=",
"result",
"[",
"\"outputs\"",
"]",
".",
"shape",
"[",
"-",
"1",
"]",
"duration",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Elapsed Time: %5.5f\"",
"%",
"duration",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Averaged Single Token Generation Time: %5.7f \"",
"\"(time %5.7f count %d)\"",
"%",
"(",
"total_time_per_step",
"/",
"total_cnt",
",",
"total_time_per_step",
",",
"total_cnt",
")",
")",
"if",
"decode_hp",
".",
"batch_size",
"==",
"1",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Inference time %.4f seconds \"",
"\"(Latency = %.4f ms/setences)\"",
"%",
"(",
"duration",
",",
"1000.0",
"*",
"duration",
"/",
"num_sentences",
")",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Inference time %.4f seconds \"",
"\"(Throughput = %.4f sentences/second)\"",
"%",
"(",
"duration",
",",
"num_sentences",
"/",
"duration",
")",
")",
"# If decode_to_file was provided use it as the output filename without change",
"# (except for adding shard_id if using more shards for decoding).",
"# Otherwise, use the input filename plus model, hp, problem, beam, alpha.",
"decode_filename",
"=",
"decode_to_file",
"if",
"decode_to_file",
"else",
"filename",
"if",
"not",
"decode_to_file",
":",
"decode_filename",
"=",
"_decode_filename",
"(",
"decode_filename",
",",
"problem_name",
",",
"decode_hp",
")",
"else",
":",
"decode_filename",
"=",
"_add_shard_to_filename",
"(",
"decode_filename",
",",
"decode_hp",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Writing decodes into %s\"",
"%",
"decode_filename",
")",
"outfile",
"=",
"tf",
".",
"gfile",
".",
"Open",
"(",
"decode_filename",
",",
"\"w\"",
")",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"sorted_inputs",
")",
")",
":",
"outfile",
".",
"write",
"(",
"\"%s%s\"",
"%",
"(",
"decodes",
"[",
"sorted_keys",
"[",
"index",
"]",
"]",
",",
"decode_hp",
".",
"delimiter",
")",
")",
"outfile",
".",
"flush",
"(",
")",
"outfile",
".",
"close",
"(",
")",
"output_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"estimator",
".",
"model_dir",
",",
"\"decode\"",
")",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"output_dir",
")",
"run_postdecode_hooks",
"(",
"DecodeHookArgs",
"(",
"estimator",
"=",
"estimator",
",",
"problem",
"=",
"hparams",
".",
"problem",
",",
"output_dirs",
"=",
"[",
"output_dir",
"]",
",",
"hparams",
"=",
"hparams",
",",
"decode_hparams",
"=",
"decode_hp",
",",
"predictions",
"=",
"list",
"(",
"result_iter",
")",
")",
",",
"None",
")"
] |
Compute predictions on entries in filename and write them out.
|
[
"Compute",
"predictions",
"on",
"entries",
"in",
"filename",
"and",
"write",
"them",
"out",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L394-L559
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_decode_filename
|
def _decode_filename(base_filename, problem_name, decode_hp):
"""Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename.
"""
if decode_hp.shards > 1:
base_filename = _add_shard_to_filename(base_filename, decode_hp)
if ("beam{beam}.alpha{alpha}.decodes".format(
beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
in base_filename):
return base_filename
else:
return (
"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha)))
|
python
|
def _decode_filename(base_filename, problem_name, decode_hp):
"""Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename.
"""
if decode_hp.shards > 1:
base_filename = _add_shard_to_filename(base_filename, decode_hp)
if ("beam{beam}.alpha{alpha}.decodes".format(
beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
in base_filename):
return base_filename
else:
return (
"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha)))
|
[
"def",
"_decode_filename",
"(",
"base_filename",
",",
"problem_name",
",",
"decode_hp",
")",
":",
"if",
"decode_hp",
".",
"shards",
">",
"1",
":",
"base_filename",
"=",
"_add_shard_to_filename",
"(",
"base_filename",
",",
"decode_hp",
")",
"if",
"(",
"\"beam{beam}.alpha{alpha}.decodes\"",
".",
"format",
"(",
"beam",
"=",
"str",
"(",
"decode_hp",
".",
"beam_size",
")",
",",
"alpha",
"=",
"str",
"(",
"decode_hp",
".",
"alpha",
")",
")",
"in",
"base_filename",
")",
":",
"return",
"base_filename",
"else",
":",
"return",
"(",
"\"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes\"",
".",
"format",
"(",
"base",
"=",
"base_filename",
",",
"model",
"=",
"FLAGS",
".",
"model",
",",
"hp",
"=",
"FLAGS",
".",
"hparams_set",
",",
"problem",
"=",
"problem_name",
",",
"beam",
"=",
"str",
"(",
"decode_hp",
".",
"beam_size",
")",
",",
"alpha",
"=",
"str",
"(",
"decode_hp",
".",
"alpha",
")",
")",
")"
] |
Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename.
|
[
"Generates",
"decode",
"filename",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L573-L598
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
make_input_fn_from_generator
|
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
|
python
|
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
|
[
"def",
"make_input_fn_from_generator",
"(",
"gen",
")",
":",
"first_ex",
"=",
"six",
".",
"next",
"(",
"gen",
")",
"flattened",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest",
".",
"flatten",
"(",
"first_ex",
")",
"types",
"=",
"[",
"t",
".",
"dtype",
"for",
"t",
"in",
"flattened",
"]",
"shapes",
"=",
"[",
"[",
"None",
"]",
"*",
"len",
"(",
"t",
".",
"shape",
")",
"for",
"t",
"in",
"flattened",
"]",
"first_ex_list",
"=",
"[",
"first_ex",
"]",
"def",
"py_func",
"(",
")",
":",
"if",
"first_ex_list",
":",
"example",
"=",
"first_ex_list",
".",
"pop",
"(",
")",
"else",
":",
"example",
"=",
"six",
".",
"next",
"(",
"gen",
")",
"return",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest",
".",
"flatten",
"(",
"example",
")",
"def",
"input_fn",
"(",
")",
":",
"flat_example",
"=",
"tf",
".",
"py_func",
"(",
"py_func",
",",
"[",
"]",
",",
"types",
")",
"_",
"=",
"[",
"t",
".",
"set_shape",
"(",
"shape",
")",
"for",
"t",
",",
"shape",
"in",
"zip",
"(",
"flat_example",
",",
"shapes",
")",
"]",
"example",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest",
".",
"pack_sequence_as",
"(",
"first_ex",
",",
"flat_example",
")",
"return",
"example",
"return",
"input_fn"
] |
Use py_func to yield elements from the given generator.
|
[
"Use",
"py_func",
"to",
"yield",
"elements",
"from",
"the",
"given",
"generator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L601-L622
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
decode_interactively
|
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None):
"""Interactive decoding."""
is_image = "image" in hparams.problem.name
is_text2class = isinstance(hparams.problem,
text_problems.Text2ClassProblem)
skip_eos_postprocess = (
is_image or is_text2class or decode_hp.skip_eos_postprocess)
def input_fn():
gen_fn = make_input_fn_from_generator(
_interactive_input_fn(hparams, decode_hp))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
for result in result_iter:
targets_vocab = hparams.problem_hparams.vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(
beam, skip_eos_postprocess))
if scores is not None:
tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info("\"%s\"" % beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(
result["outputs"], skip_eos_postprocess)))
|
python
|
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None):
"""Interactive decoding."""
is_image = "image" in hparams.problem.name
is_text2class = isinstance(hparams.problem,
text_problems.Text2ClassProblem)
skip_eos_postprocess = (
is_image or is_text2class or decode_hp.skip_eos_postprocess)
def input_fn():
gen_fn = make_input_fn_from_generator(
_interactive_input_fn(hparams, decode_hp))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
for result in result_iter:
targets_vocab = hparams.problem_hparams.vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(
beam, skip_eos_postprocess))
if scores is not None:
tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info("\"%s\"" % beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(
result["outputs"], skip_eos_postprocess)))
|
[
"def",
"decode_interactively",
"(",
"estimator",
",",
"hparams",
",",
"decode_hp",
",",
"checkpoint_path",
"=",
"None",
")",
":",
"is_image",
"=",
"\"image\"",
"in",
"hparams",
".",
"problem",
".",
"name",
"is_text2class",
"=",
"isinstance",
"(",
"hparams",
".",
"problem",
",",
"text_problems",
".",
"Text2ClassProblem",
")",
"skip_eos_postprocess",
"=",
"(",
"is_image",
"or",
"is_text2class",
"or",
"decode_hp",
".",
"skip_eos_postprocess",
")",
"def",
"input_fn",
"(",
")",
":",
"gen_fn",
"=",
"make_input_fn_from_generator",
"(",
"_interactive_input_fn",
"(",
"hparams",
",",
"decode_hp",
")",
")",
"example",
"=",
"gen_fn",
"(",
")",
"example",
"=",
"_interactive_input_tensor_to_features_dict",
"(",
"example",
",",
"hparams",
")",
"return",
"example",
"result_iter",
"=",
"estimator",
".",
"predict",
"(",
"input_fn",
",",
"checkpoint_path",
"=",
"checkpoint_path",
")",
"for",
"result",
"in",
"result_iter",
":",
"targets_vocab",
"=",
"hparams",
".",
"problem_hparams",
".",
"vocabulary",
"[",
"\"targets\"",
"]",
"if",
"decode_hp",
".",
"return_beams",
":",
"beams",
"=",
"np",
".",
"split",
"(",
"result",
"[",
"\"outputs\"",
"]",
",",
"decode_hp",
".",
"beam_size",
",",
"axis",
"=",
"0",
")",
"scores",
"=",
"None",
"if",
"\"scores\"",
"in",
"result",
":",
"if",
"np",
".",
"isscalar",
"(",
"result",
"[",
"\"scores\"",
"]",
")",
":",
"result",
"[",
"\"scores\"",
"]",
"=",
"result",
"[",
"\"scores\"",
"]",
".",
"reshape",
"(",
"1",
")",
"scores",
"=",
"np",
".",
"split",
"(",
"result",
"[",
"\"scores\"",
"]",
",",
"decode_hp",
".",
"beam_size",
",",
"axis",
"=",
"0",
")",
"for",
"k",
",",
"beam",
"in",
"enumerate",
"(",
"beams",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"BEAM %d:\"",
"%",
"k",
")",
"beam_string",
"=",
"targets_vocab",
".",
"decode",
"(",
"_save_until_eos",
"(",
"beam",
",",
"skip_eos_postprocess",
")",
")",
"if",
"scores",
"is",
"not",
"None",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"\\\"%s\\\"\\tScore:%f\"",
"%",
"(",
"beam_string",
",",
"scores",
"[",
"k",
"]",
")",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"\\\"%s\\\"\"",
"%",
"beam_string",
")",
"else",
":",
"if",
"decode_hp",
".",
"identity_output",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"result",
"[",
"\"outputs\"",
"]",
".",
"flatten",
"(",
")",
")",
")",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"targets_vocab",
".",
"decode",
"(",
"_save_until_eos",
"(",
"result",
"[",
"\"outputs\"",
"]",
",",
"skip_eos_postprocess",
")",
")",
")"
] |
Interactive decoding.
|
[
"Interactive",
"decoding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L625-L666
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_decode_batch_input_fn
|
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary,
batch_size, max_input_size,
task_id=-1, has_input=True):
"""Generator to produce batches of inputs."""
tf.logging.info(" batch %d" % num_decode_batches)
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
if has_input or task_id > -1: # Do not append EOS for pure LM tasks.
final_id = text_encoder.EOS_ID if task_id < 0 else task_id
input_ids.append(final_id)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
}
|
python
|
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary,
batch_size, max_input_size,
task_id=-1, has_input=True):
"""Generator to produce batches of inputs."""
tf.logging.info(" batch %d" % num_decode_batches)
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
if has_input or task_id > -1: # Do not append EOS for pure LM tasks.
final_id = text_encoder.EOS_ID if task_id < 0 else task_id
input_ids.append(final_id)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
}
|
[
"def",
"_decode_batch_input_fn",
"(",
"num_decode_batches",
",",
"sorted_inputs",
",",
"vocabulary",
",",
"batch_size",
",",
"max_input_size",
",",
"task_id",
"=",
"-",
"1",
",",
"has_input",
"=",
"True",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\" batch %d\"",
"%",
"num_decode_batches",
")",
"for",
"b",
"in",
"range",
"(",
"num_decode_batches",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Decoding batch %d\"",
"%",
"b",
")",
"batch_length",
"=",
"0",
"batch_inputs",
"=",
"[",
"]",
"for",
"inputs",
"in",
"sorted_inputs",
"[",
"b",
"*",
"batch_size",
":",
"(",
"b",
"+",
"1",
")",
"*",
"batch_size",
"]",
":",
"input_ids",
"=",
"vocabulary",
".",
"encode",
"(",
"inputs",
")",
"if",
"max_input_size",
">",
"0",
":",
"# Subtract 1 for the EOS_ID.",
"input_ids",
"=",
"input_ids",
"[",
":",
"max_input_size",
"-",
"1",
"]",
"if",
"has_input",
"or",
"task_id",
">",
"-",
"1",
":",
"# Do not append EOS for pure LM tasks.",
"final_id",
"=",
"text_encoder",
".",
"EOS_ID",
"if",
"task_id",
"<",
"0",
"else",
"task_id",
"input_ids",
".",
"append",
"(",
"final_id",
")",
"batch_inputs",
".",
"append",
"(",
"input_ids",
")",
"if",
"len",
"(",
"input_ids",
")",
">",
"batch_length",
":",
"batch_length",
"=",
"len",
"(",
"input_ids",
")",
"final_batch_inputs",
"=",
"[",
"]",
"for",
"input_ids",
"in",
"batch_inputs",
":",
"assert",
"len",
"(",
"input_ids",
")",
"<=",
"batch_length",
"x",
"=",
"input_ids",
"+",
"[",
"0",
"]",
"*",
"(",
"batch_length",
"-",
"len",
"(",
"input_ids",
")",
")",
"final_batch_inputs",
".",
"append",
"(",
"x",
")",
"yield",
"{",
"\"inputs\"",
":",
"np",
".",
"array",
"(",
"final_batch_inputs",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
",",
"}"
] |
Generator to produce batches of inputs.
|
[
"Generator",
"to",
"produce",
"batches",
"of",
"inputs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L669-L697
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_interactive_input_fn
|
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length,
"source_string" if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
|
python
|
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length,
"source_string" if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
|
[
"def",
"_interactive_input_fn",
"(",
"hparams",
",",
"decode_hp",
")",
":",
"num_samples",
"=",
"decode_hp",
".",
"num_samples",
"if",
"decode_hp",
".",
"num_samples",
">",
"0",
"else",
"1",
"decode_length",
"=",
"decode_hp",
".",
"extra_length",
"input_type",
"=",
"\"text\"",
"p_hparams",
"=",
"hparams",
".",
"problem_hparams",
"has_input",
"=",
"\"inputs\"",
"in",
"p_hparams",
".",
"modality",
"vocabulary",
"=",
"p_hparams",
".",
"vocabulary",
"[",
"\"inputs\"",
"if",
"has_input",
"else",
"\"targets\"",
"]",
"# This should be longer than the longest input.",
"const_array_size",
"=",
"10000",
"# Import readline if available for command line editing and recall.",
"try",
":",
"import",
"readline",
"# pylint: disable=g-import-not-at-top,unused-variable",
"except",
"ImportError",
":",
"pass",
"while",
"True",
":",
"prompt",
"=",
"(",
"\"INTERACTIVE MODE num_samples=%d decode_length=%d \\n\"",
"\" it=<input_type> ('text' or 'image' or 'label', default: \"",
"\"text)\\n\"",
"\" ns=<num_samples> (changes number of samples, default: 1)\\n\"",
"\" dl=<decode_length> (changes decode length, default: 100)\\n\"",
"\" <%s> (decode)\\n\"",
"\" q (quit)\\n\"",
"\">\"",
"%",
"(",
"num_samples",
",",
"decode_length",
",",
"\"source_string\"",
"if",
"has_input",
"else",
"\"target_prefix\"",
")",
")",
"input_string",
"=",
"input",
"(",
"prompt",
")",
"if",
"input_string",
"==",
"\"q\"",
":",
"return",
"elif",
"input_string",
"[",
":",
"3",
"]",
"==",
"\"ns=\"",
":",
"num_samples",
"=",
"int",
"(",
"input_string",
"[",
"3",
":",
"]",
")",
"elif",
"input_string",
"[",
":",
"3",
"]",
"==",
"\"dl=\"",
":",
"decode_length",
"=",
"int",
"(",
"input_string",
"[",
"3",
":",
"]",
")",
"elif",
"input_string",
"[",
":",
"3",
"]",
"==",
"\"it=\"",
":",
"input_type",
"=",
"input_string",
"[",
"3",
":",
"]",
"else",
":",
"if",
"input_type",
"==",
"\"text\"",
":",
"input_ids",
"=",
"vocabulary",
".",
"encode",
"(",
"input_string",
")",
"if",
"has_input",
":",
"input_ids",
".",
"append",
"(",
"text_encoder",
".",
"EOS_ID",
")",
"x",
"=",
"[",
"num_samples",
",",
"decode_length",
",",
"len",
"(",
"input_ids",
")",
"]",
"+",
"input_ids",
"assert",
"len",
"(",
"x",
")",
"<",
"const_array_size",
"x",
"+=",
"[",
"0",
"]",
"*",
"(",
"const_array_size",
"-",
"len",
"(",
"x",
")",
")",
"features",
"=",
"{",
"\"inputs\"",
":",
"np",
".",
"array",
"(",
"x",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
",",
"}",
"elif",
"input_type",
"==",
"\"image\"",
":",
"input_path",
"=",
"input_string",
"img",
"=",
"vocabulary",
".",
"encode",
"(",
"input_path",
")",
"features",
"=",
"{",
"\"inputs\"",
":",
"img",
".",
"astype",
"(",
"np",
".",
"int32",
")",
",",
"}",
"elif",
"input_type",
"==",
"\"label\"",
":",
"input_ids",
"=",
"[",
"int",
"(",
"input_string",
")",
"]",
"x",
"=",
"[",
"num_samples",
",",
"decode_length",
",",
"len",
"(",
"input_ids",
")",
"]",
"+",
"input_ids",
"features",
"=",
"{",
"\"inputs\"",
":",
"np",
".",
"array",
"(",
"x",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
",",
"}",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported input type.\"",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"problem_lib",
".",
"problem_hparams_to_features",
"(",
"p_hparams",
")",
")",
":",
"features",
"[",
"k",
"]",
"=",
"np",
".",
"array",
"(",
"v",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"yield",
"features"
] |
Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
|
[
"Generator",
"that",
"reads",
"from",
"the",
"terminal",
"and",
"yields",
"interactive",
"inputs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L700-L779
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
save_video
|
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp)
|
python
|
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp)
|
[
"def",
"save_video",
"(",
"video",
",",
"save_path_template",
")",
":",
"try",
":",
"from",
"PIL",
"import",
"Image",
"# pylint: disable=g-import-not-at-top",
"except",
"ImportError",
"as",
"e",
":",
"tf",
".",
"logging",
".",
"warning",
"(",
"\"Showing and saving an image requires PIL library to be \"",
"\"installed: %s\"",
",",
"e",
")",
"raise",
"NotImplementedError",
"(",
"\"Image display and save not implemented.\"",
")",
"for",
"i",
",",
"frame",
"in",
"enumerate",
"(",
"video",
")",
":",
"save_path",
"=",
"save_path_template",
".",
"format",
"(",
"i",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"save_path",
",",
"\"wb\"",
")",
"as",
"sp",
":",
"Image",
".",
"fromarray",
"(",
"np",
".",
"uint8",
"(",
"frame",
")",
")",
".",
"save",
"(",
"sp",
")"
] |
Save frames of the videos into files.
|
[
"Save",
"frames",
"of",
"the",
"videos",
"into",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L782-L795
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
show_and_save_image
|
def show_and_save_image(img, save_path):
"""Shows an image using matplotlib and saves it."""
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp)
|
python
|
def show_and_save_image(img, save_path):
"""Shows an image using matplotlib and saves it."""
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp)
|
[
"def",
"show_and_save_image",
"(",
"img",
",",
"save_path",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# pylint: disable=g-import-not-at-top",
"except",
"ImportError",
"as",
"e",
":",
"tf",
".",
"logging",
".",
"warning",
"(",
"\"Showing and saving an image requires matplotlib to be \"",
"\"installed: %s\"",
",",
"e",
")",
"raise",
"NotImplementedError",
"(",
"\"Image display and save not implemented.\"",
")",
"plt",
".",
"imshow",
"(",
"img",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"save_path",
",",
"\"wb\"",
")",
"as",
"sp",
":",
"plt",
".",
"savefig",
"(",
"sp",
")"
] |
Shows an image using matplotlib and saves it.
|
[
"Shows",
"an",
"image",
"using",
"matplotlib",
"and",
"saves",
"it",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L798-L809
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_get_language_modeling_inputs
|
def _get_language_modeling_inputs(filename,
delimiter="\n",
repeat=1,
append_space_to_final_punctionation=True):
"""Read a file of partial texts to continue.
The purpose of append_space_to_final_punctionation is that SubwordTokenizer
groups punctuation and the ensuing space in the same token. Adding a space
causes the token to be completed.
Args:
filename: a string
delimiter: a string
repeat: an integer - we repeat the entire file that many times.
append_space_to_final_punctionation: a boolean
Returns:
a list of strings
"""
with tf.gfile.Open(filename) as f:
text = f.read()
inputs = text.split(delimiter)
if not inputs[-1]:
inputs.pop()
inputs *= repeat
if append_space_to_final_punctionation:
inputs = [
s + " " if s and s[-1] in string.punctuation else s for s in inputs]
return inputs
|
python
|
def _get_language_modeling_inputs(filename,
delimiter="\n",
repeat=1,
append_space_to_final_punctionation=True):
"""Read a file of partial texts to continue.
The purpose of append_space_to_final_punctionation is that SubwordTokenizer
groups punctuation and the ensuing space in the same token. Adding a space
causes the token to be completed.
Args:
filename: a string
delimiter: a string
repeat: an integer - we repeat the entire file that many times.
append_space_to_final_punctionation: a boolean
Returns:
a list of strings
"""
with tf.gfile.Open(filename) as f:
text = f.read()
inputs = text.split(delimiter)
if not inputs[-1]:
inputs.pop()
inputs *= repeat
if append_space_to_final_punctionation:
inputs = [
s + " " if s and s[-1] in string.punctuation else s for s in inputs]
return inputs
|
[
"def",
"_get_language_modeling_inputs",
"(",
"filename",
",",
"delimiter",
"=",
"\"\\n\"",
",",
"repeat",
"=",
"1",
",",
"append_space_to_final_punctionation",
"=",
"True",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filename",
")",
"as",
"f",
":",
"text",
"=",
"f",
".",
"read",
"(",
")",
"inputs",
"=",
"text",
".",
"split",
"(",
"delimiter",
")",
"if",
"not",
"inputs",
"[",
"-",
"1",
"]",
":",
"inputs",
".",
"pop",
"(",
")",
"inputs",
"*=",
"repeat",
"if",
"append_space_to_final_punctionation",
":",
"inputs",
"=",
"[",
"s",
"+",
"\" \"",
"if",
"s",
"and",
"s",
"[",
"-",
"1",
"]",
"in",
"string",
".",
"punctuation",
"else",
"s",
"for",
"s",
"in",
"inputs",
"]",
"return",
"inputs"
] |
Read a file of partial texts to continue.
The purpose of append_space_to_final_punctionation is that SubwordTokenizer
groups punctuation and the ensuing space in the same token. Adding a space
causes the token to be completed.
Args:
filename: a string
delimiter: a string
repeat: an integer - we repeat the entire file that many times.
append_space_to_final_punctionation: a boolean
Returns:
a list of strings
|
[
"Read",
"a",
"file",
"of",
"partial",
"texts",
"to",
"continue",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L812-L840
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_get_sorted_inputs
|
def _get_sorted_inputs(filename, delimiter="\n"):
"""Returning inputs sorted according to decreasing length.
This causes inputs of similar lengths to be processed in the same batch,
facilitating early stopping for short sequences.
Longer sequences are sorted first so that if you're going to get OOMs,
you'll see it in the first batch.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
with tf.gfile.Open(filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
|
python
|
def _get_sorted_inputs(filename, delimiter="\n"):
"""Returning inputs sorted according to decreasing length.
This causes inputs of similar lengths to be processed in the same batch,
facilitating early stopping for short sequences.
Longer sequences are sorted first so that if you're going to get OOMs,
you'll see it in the first batch.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
with tf.gfile.Open(filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
|
[
"def",
"_get_sorted_inputs",
"(",
"filename",
",",
"delimiter",
"=",
"\"\\n\"",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Getting sorted inputs\"",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filename",
")",
"as",
"f",
":",
"text",
"=",
"f",
".",
"read",
"(",
")",
"records",
"=",
"text",
".",
"split",
"(",
"delimiter",
")",
"inputs",
"=",
"[",
"record",
".",
"strip",
"(",
")",
"for",
"record",
"in",
"records",
"]",
"# Strip the last empty line.",
"if",
"not",
"inputs",
"[",
"-",
"1",
"]",
":",
"inputs",
".",
"pop",
"(",
")",
"input_lens",
"=",
"[",
"(",
"i",
",",
"-",
"len",
"(",
"line",
".",
"split",
"(",
")",
")",
")",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"inputs",
")",
"]",
"sorted_input_lens",
"=",
"sorted",
"(",
"input_lens",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
")",
"# We'll need the keys to rearrange the inputs back into their original order",
"sorted_keys",
"=",
"{",
"}",
"sorted_inputs",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"index",
",",
"_",
")",
"in",
"enumerate",
"(",
"sorted_input_lens",
")",
":",
"sorted_inputs",
".",
"append",
"(",
"inputs",
"[",
"index",
"]",
")",
"sorted_keys",
"[",
"index",
"]",
"=",
"i",
"return",
"sorted_inputs",
",",
"sorted_keys"
] |
Returning inputs sorted according to decreasing length.
This causes inputs of similar lengths to be processed in the same batch,
facilitating early stopping for short sequences.
Longer sequences are sorted first so that if you're going to get OOMs,
you'll see it in the first batch.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
|
[
"Returning",
"inputs",
"sorted",
"according",
"to",
"decreasing",
"length",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L843-L876
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_save_until_eos
|
def _save_until_eos(ids, skip=False):
"""Strips everything after the first <EOS> token, which is normally 1."""
ids = ids.flatten()
if skip:
return ids
try:
index = list(ids).index(text_encoder.EOS_ID)
return ids[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return ids
|
python
|
def _save_until_eos(ids, skip=False):
"""Strips everything after the first <EOS> token, which is normally 1."""
ids = ids.flatten()
if skip:
return ids
try:
index = list(ids).index(text_encoder.EOS_ID)
return ids[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return ids
|
[
"def",
"_save_until_eos",
"(",
"ids",
",",
"skip",
"=",
"False",
")",
":",
"ids",
"=",
"ids",
".",
"flatten",
"(",
")",
"if",
"skip",
":",
"return",
"ids",
"try",
":",
"index",
"=",
"list",
"(",
"ids",
")",
".",
"index",
"(",
"text_encoder",
".",
"EOS_ID",
")",
"return",
"ids",
"[",
"0",
":",
"index",
"]",
"except",
"ValueError",
":",
"# No EOS_ID: return the array as-is.",
"return",
"ids"
] |
Strips everything after the first <EOS> token, which is normally 1.
|
[
"Strips",
"everything",
"after",
"the",
"first",
"<EOS",
">",
"token",
"which",
"is",
"normally",
"1",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L879-L889
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_interactive_input_tensor_to_features_dict
|
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
x = inputs
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problem_hparams
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else inputs[1])
features["inputs"] = x
return features
|
python
|
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
x = inputs
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problem_hparams
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else inputs[1])
features["inputs"] = x
return features
|
[
"def",
"_interactive_input_tensor_to_features_dict",
"(",
"feature_map",
",",
"hparams",
")",
":",
"inputs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"feature_map",
"[",
"\"inputs\"",
"]",
")",
"input_is_image",
"=",
"False",
"if",
"len",
"(",
"inputs",
".",
"get_shape",
"(",
")",
")",
"<",
"3",
"else",
"True",
"x",
"=",
"inputs",
"if",
"input_is_image",
":",
"x",
"=",
"tf",
".",
"image",
".",
"resize_images",
"(",
"x",
",",
"[",
"299",
",",
"299",
"]",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"1",
",",
"299",
",",
"299",
",",
"-",
"1",
"]",
")",
"x",
"=",
"tf",
".",
"to_int32",
"(",
"x",
")",
"else",
":",
"# Remove the batch dimension.",
"num_samples",
"=",
"x",
"[",
"0",
"]",
"length",
"=",
"x",
"[",
"2",
"]",
"x",
"=",
"tf",
".",
"slice",
"(",
"x",
",",
"[",
"3",
"]",
",",
"tf",
".",
"to_int32",
"(",
"[",
"length",
"]",
")",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
",",
"1",
"]",
")",
"# Transform into a batch of size num_samples to get that many random",
"# decodes.",
"x",
"=",
"tf",
".",
"tile",
"(",
"x",
",",
"tf",
".",
"to_int32",
"(",
"[",
"num_samples",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
")",
"p_hparams",
"=",
"hparams",
".",
"problem_hparams",
"input_space_id",
"=",
"tf",
".",
"constant",
"(",
"p_hparams",
".",
"input_space_id",
")",
"target_space_id",
"=",
"tf",
".",
"constant",
"(",
"p_hparams",
".",
"target_space_id",
")",
"features",
"=",
"{",
"}",
"features",
"[",
"\"input_space_id\"",
"]",
"=",
"input_space_id",
"features",
"[",
"\"target_space_id\"",
"]",
"=",
"target_space_id",
"features",
"[",
"\"decode_length\"",
"]",
"=",
"(",
"IMAGE_DECODE_LENGTH",
"if",
"input_is_image",
"else",
"inputs",
"[",
"1",
"]",
")",
"features",
"[",
"\"inputs\"",
"]",
"=",
"x",
"return",
"features"
] |
Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
|
[
"Convert",
"the",
"interactive",
"input",
"format",
"(",
"see",
"above",
")",
"to",
"a",
"dictionary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L892-L930
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
_decode_input_tensor_to_features_dict
|
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
x = inputs
p_hparams = hparams.problem_hparams
# Add a third empty dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
|
python
|
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
x = inputs
p_hparams = hparams.problem_hparams
# Add a third empty dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
|
[
"def",
"_decode_input_tensor_to_features_dict",
"(",
"feature_map",
",",
"hparams",
")",
":",
"inputs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"feature_map",
"[",
"\"inputs\"",
"]",
")",
"input_is_image",
"=",
"False",
"x",
"=",
"inputs",
"p_hparams",
"=",
"hparams",
".",
"problem_hparams",
"# Add a third empty dimension",
"x",
"=",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"axis",
"=",
"[",
"2",
"]",
")",
"x",
"=",
"tf",
".",
"to_int32",
"(",
"x",
")",
"input_space_id",
"=",
"tf",
".",
"constant",
"(",
"p_hparams",
".",
"input_space_id",
")",
"target_space_id",
"=",
"tf",
".",
"constant",
"(",
"p_hparams",
".",
"target_space_id",
")",
"features",
"=",
"{",
"}",
"features",
"[",
"\"input_space_id\"",
"]",
"=",
"input_space_id",
"features",
"[",
"\"target_space_id\"",
"]",
"=",
"target_space_id",
"features",
"[",
"\"decode_length\"",
"]",
"=",
"(",
"IMAGE_DECODE_LENGTH",
"if",
"input_is_image",
"else",
"tf",
".",
"shape",
"(",
"x",
")",
"[",
"1",
"]",
"+",
"50",
")",
"features",
"[",
"\"inputs\"",
"]",
"=",
"x",
"return",
"features"
] |
Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
|
[
"Convert",
"the",
"interactive",
"input",
"format",
"(",
"see",
"above",
")",
"to",
"a",
"dictionary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L933-L960
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/utils/decoding.py
|
run_postdecode_hooks
|
def run_postdecode_hooks(decode_hook_args, dataset_split):
"""Run hooks after decodes have run."""
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
child_dir = decode_hook_args.decode_hparams.summaries_log_dir
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
final_dir = os.path.join(parent_dir, child_dir)
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.")
|
python
|
def run_postdecode_hooks(decode_hook_args, dataset_split):
"""Run hooks after decodes have run."""
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
child_dir = decode_hook_args.decode_hparams.summaries_log_dir
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
final_dir = os.path.join(parent_dir, child_dir)
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.")
|
[
"def",
"run_postdecode_hooks",
"(",
"decode_hook_args",
",",
"dataset_split",
")",
":",
"hooks",
"=",
"decode_hook_args",
".",
"problem",
".",
"decode_hooks",
"if",
"not",
"hooks",
":",
"return",
"global_step",
"=",
"latest_checkpoint_step",
"(",
"decode_hook_args",
".",
"estimator",
".",
"model_dir",
")",
"if",
"global_step",
"is",
"None",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Skipping decode hooks because no checkpoint yet available.\"",
")",
"return",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Running decode hooks.\"",
")",
"parent_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"decode_hook_args",
".",
"output_dirs",
"[",
"0",
"]",
",",
"os",
".",
"pardir",
")",
"child_dir",
"=",
"decode_hook_args",
".",
"decode_hparams",
".",
"summaries_log_dir",
"if",
"dataset_split",
"is",
"not",
"None",
":",
"child_dir",
"+=",
"\"_{}\"",
".",
"format",
"(",
"dataset_split",
")",
"final_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"child_dir",
")",
"summary_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"final_dir",
")",
"for",
"hook",
"in",
"hooks",
":",
"# Isolate each hook in case it creates TF ops",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"summaries",
"=",
"hook",
"(",
"decode_hook_args",
")",
"if",
"summaries",
":",
"summary",
"=",
"tf",
".",
"Summary",
"(",
"value",
"=",
"list",
"(",
"summaries",
")",
")",
"summary_writer",
".",
"add_summary",
"(",
"summary",
",",
"global_step",
")",
"summary_writer",
".",
"close",
"(",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Decode hooks done.\"",
")"
] |
Run hooks after decodes have run.
|
[
"Run",
"hooks",
"after",
"decodes",
"have",
"run",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L982-L1008
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/style_transfer.py
|
StyleTransferProblemShakespeare.dataset_splits
|
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": _TRAIN_SHARDS,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": _DEV_SHARDS,
}]
|
python
|
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": _TRAIN_SHARDS,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": _DEV_SHARDS,
}]
|
[
"def",
"dataset_splits",
"(",
"self",
")",
":",
"return",
"[",
"{",
"\"split\"",
":",
"problem",
".",
"DatasetSplit",
".",
"TRAIN",
",",
"\"shards\"",
":",
"_TRAIN_SHARDS",
",",
"}",
",",
"{",
"\"split\"",
":",
"problem",
".",
"DatasetSplit",
".",
"EVAL",
",",
"\"shards\"",
":",
"_DEV_SHARDS",
",",
"}",
"]"
] |
Splits of data to produce and number of output shards for each.
|
[
"Splits",
"of",
"data",
"to",
"produce",
"and",
"number",
"of",
"output",
"shards",
"for",
"each",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/style_transfer.py#L87-L95
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
local_attention1d_spatial_decoder
|
def local_attention1d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length)
num_w_blocks_dim = mtf.Dimension("num_wblocks",
length_dim.size // blocks_w_dim.size)
x = mtf.reshape(
x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim]))
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_w_dim=blocks_w_dim,
mask_right=True,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
python
|
def local_attention1d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length)
num_w_blocks_dim = mtf.Dimension("num_wblocks",
length_dim.size // blocks_w_dim.size)
x = mtf.reshape(
x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim]))
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_w_dim=blocks_w_dim,
mask_right=True,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
[
"def",
"local_attention1d_spatial_decoder",
"(",
"x",
",",
"kv_dim",
",",
"heads_dim",
",",
"feedforward_dim",
",",
"hparams",
")",
":",
"batch_dim",
",",
"length_dim",
",",
"model_dim",
"=",
"x",
".",
"shape",
".",
"dims",
"blocks_w_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"blocksw\"",
",",
"hparams",
".",
"block_length",
")",
"num_w_blocks_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"num_wblocks\"",
",",
"length_dim",
".",
"size",
"//",
"blocks_w_dim",
".",
"size",
")",
"x",
"=",
"mtf",
".",
"reshape",
"(",
"x",
",",
"mtf",
".",
"Shape",
"(",
"[",
"batch_dim",
",",
"num_w_blocks_dim",
",",
"blocks_w_dim",
",",
"model_dim",
"]",
")",
")",
"# [ self attention - ffn - residual + dropout] x n",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_decoder_layers",
")",
":",
"layer_name",
"=",
"\"decoder_layer_%d\"",
"%",
"layer",
"with",
"tf",
".",
"variable_scope",
"(",
"layer_name",
")",
":",
"# Self attention layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"local_self_attention_spatial_blocks",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_att\"",
")",
",",
"kv_dim",
",",
"heads_dim",
",",
"memory_w_dim",
"=",
"blocks_w_dim",
",",
"mask_right",
"=",
"True",
",",
"name",
"=",
"\"self_att\"",
")",
",",
"hparams",
")",
"# ffn layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"dense_relu_dense",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_ffn\"",
")",
",",
"feedforward_dim",
",",
"hparams",
".",
"dropout",
",",
"dropout_broadcast_dims",
"=",
"[",
"length_dim",
"]",
")",
",",
"hparams",
")",
"output",
"=",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"final_layer_norm\"",
")",
"return",
"output"
] |
Image Transformer decoder with local1D spatial layers.
|
[
"Image",
"Transformer",
"decoder",
"with",
"local1D",
"spatial",
"layers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L252-L283
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
local_attention2d_spatial_decoder
|
def local_attention2d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local2D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_h_dim = mtf.Dimension("blocksh", hparams.block_height)
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_width)
num_h_blocks_dim = mtf.Dimension("num_h_blocks",
hparams.img_len // hparams.block_height)
num_w_blocks_dim = mtf.Dimension(
"num_w_blocks",
hparams.img_len * hparams.num_channels // hparams.block_width)
x = mtf.transpose(
mtf.reshape(
x,
mtf.Shape([
batch_dim, num_h_blocks_dim, blocks_h_dim,
num_w_blocks_dim, blocks_w_dim, model_dim
])),
mtf.Shape([
batch_dim, num_h_blocks_dim, num_w_blocks_dim,
blocks_h_dim, blocks_w_dim, model_dim
]))
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_2d_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_h_dim=num_h_blocks_dim,
memory_w_dim=num_w_blocks_dim,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
python
|
def local_attention2d_spatial_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local2D spatial layers."""
batch_dim, length_dim, model_dim = x.shape.dims
blocks_h_dim = mtf.Dimension("blocksh", hparams.block_height)
blocks_w_dim = mtf.Dimension("blocksw", hparams.block_width)
num_h_blocks_dim = mtf.Dimension("num_h_blocks",
hparams.img_len // hparams.block_height)
num_w_blocks_dim = mtf.Dimension(
"num_w_blocks",
hparams.img_len * hparams.num_channels // hparams.block_width)
x = mtf.transpose(
mtf.reshape(
x,
mtf.Shape([
batch_dim, num_h_blocks_dim, blocks_h_dim,
num_w_blocks_dim, blocks_w_dim, model_dim
])),
mtf.Shape([
batch_dim, num_h_blocks_dim, num_w_blocks_dim,
blocks_h_dim, blocks_w_dim, model_dim
]))
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf.layers.local_2d_self_attention_spatial_blocks(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
memory_h_dim=num_h_blocks_dim,
memory_w_dim=num_w_blocks_dim,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
[
"def",
"local_attention2d_spatial_decoder",
"(",
"x",
",",
"kv_dim",
",",
"heads_dim",
",",
"feedforward_dim",
",",
"hparams",
")",
":",
"batch_dim",
",",
"length_dim",
",",
"model_dim",
"=",
"x",
".",
"shape",
".",
"dims",
"blocks_h_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"blocksh\"",
",",
"hparams",
".",
"block_height",
")",
"blocks_w_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"blocksw\"",
",",
"hparams",
".",
"block_width",
")",
"num_h_blocks_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"num_h_blocks\"",
",",
"hparams",
".",
"img_len",
"//",
"hparams",
".",
"block_height",
")",
"num_w_blocks_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"num_w_blocks\"",
",",
"hparams",
".",
"img_len",
"*",
"hparams",
".",
"num_channels",
"//",
"hparams",
".",
"block_width",
")",
"x",
"=",
"mtf",
".",
"transpose",
"(",
"mtf",
".",
"reshape",
"(",
"x",
",",
"mtf",
".",
"Shape",
"(",
"[",
"batch_dim",
",",
"num_h_blocks_dim",
",",
"blocks_h_dim",
",",
"num_w_blocks_dim",
",",
"blocks_w_dim",
",",
"model_dim",
"]",
")",
")",
",",
"mtf",
".",
"Shape",
"(",
"[",
"batch_dim",
",",
"num_h_blocks_dim",
",",
"num_w_blocks_dim",
",",
"blocks_h_dim",
",",
"blocks_w_dim",
",",
"model_dim",
"]",
")",
")",
"# Image Transformer Decoder",
"# [ self attention - ffn - residual + dropout] x n",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_decoder_layers",
")",
":",
"layer_name",
"=",
"\"decoder_layer_%d\"",
"%",
"layer",
"with",
"tf",
".",
"variable_scope",
"(",
"layer_name",
")",
":",
"# Self attention layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"local_2d_self_attention_spatial_blocks",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_att\"",
")",
",",
"kv_dim",
",",
"heads_dim",
",",
"memory_h_dim",
"=",
"num_h_blocks_dim",
",",
"memory_w_dim",
"=",
"num_w_blocks_dim",
",",
"name",
"=",
"\"self_att\"",
")",
",",
"hparams",
")",
"# ffn layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"dense_relu_dense",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_ffn\"",
")",
",",
"feedforward_dim",
",",
"hparams",
".",
"dropout",
",",
"dropout_broadcast_dims",
"=",
"[",
"length_dim",
"]",
")",
",",
"hparams",
")",
"output",
"=",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"final_layer_norm\"",
")",
"return",
"output"
] |
Image Transformer decoder with local2D spatial layers.
|
[
"Image",
"Transformer",
"decoder",
"with",
"local2D",
"spatial",
"layers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L286-L331
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
local_attention1d_masked_decoder
|
def local_attention1d_masked_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D masked layers."""
print(x)
_, length_dim, model_dim = x.shape.dims
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
length_per_split = mtf.tensor_dim_to_size_per_split(
hparams.layout, hparams.mesh_shape, length_dim)
x += layer_prepostprocess_dropout(
mtf.layers.masked_local_attention_1d(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
window_size=hparams.block_length,
length_per_split=length_per_split,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
python
|
def local_attention1d_masked_decoder(x, kv_dim, heads_dim,
feedforward_dim, hparams):
"""Image Transformer decoder with local1D masked layers."""
print(x)
_, length_dim, model_dim = x.shape.dims
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
length_per_split = mtf.tensor_dim_to_size_per_split(
hparams.layout, hparams.mesh_shape, length_dim)
x += layer_prepostprocess_dropout(
mtf.layers.masked_local_attention_1d(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"),
kv_dim,
heads_dim,
window_size=hparams.block_length,
length_per_split=length_per_split,
name="self_att"), hparams)
# ffn layer
x += layer_prepostprocess_dropout(
mtf.layers.dense_relu_dense(
mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
feedforward_dim,
hparams.dropout,
dropout_broadcast_dims=[length_dim]), hparams)
output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm")
return output
|
[
"def",
"local_attention1d_masked_decoder",
"(",
"x",
",",
"kv_dim",
",",
"heads_dim",
",",
"feedforward_dim",
",",
"hparams",
")",
":",
"print",
"(",
"x",
")",
"_",
",",
"length_dim",
",",
"model_dim",
"=",
"x",
".",
"shape",
".",
"dims",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_decoder_layers",
")",
":",
"layer_name",
"=",
"\"decoder_layer_%d\"",
"%",
"layer",
"with",
"tf",
".",
"variable_scope",
"(",
"layer_name",
")",
":",
"# Self attention layer",
"length_per_split",
"=",
"mtf",
".",
"tensor_dim_to_size_per_split",
"(",
"hparams",
".",
"layout",
",",
"hparams",
".",
"mesh_shape",
",",
"length_dim",
")",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"masked_local_attention_1d",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_att\"",
")",
",",
"kv_dim",
",",
"heads_dim",
",",
"window_size",
"=",
"hparams",
".",
"block_length",
",",
"length_per_split",
"=",
"length_per_split",
",",
"name",
"=",
"\"self_att\"",
")",
",",
"hparams",
")",
"# ffn layer",
"x",
"+=",
"layer_prepostprocess_dropout",
"(",
"mtf",
".",
"layers",
".",
"dense_relu_dense",
"(",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"layer_norm_ffn\"",
")",
",",
"feedforward_dim",
",",
"hparams",
".",
"dropout",
",",
"dropout_broadcast_dims",
"=",
"[",
"length_dim",
"]",
")",
",",
"hparams",
")",
"output",
"=",
"mtf",
".",
"layers",
".",
"layer_norm",
"(",
"x",
",",
"model_dim",
",",
"name",
"=",
"\"final_layer_norm\"",
")",
"return",
"output"
] |
Image Transformer decoder with local1D masked layers.
|
[
"Image",
"Transformer",
"decoder",
"with",
"local1D",
"masked",
"layers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L334-L362
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base
|
def mtf_image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 1
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("mtf_mode", True)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 6)
hparams.add_hparam("attention_key_size", 256)
hparams.add_hparam("attention_value_size", 256)
# Share weights between input and target embeddings
hparams.shared_embedding = True
# mixture of experts hparams
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 64)
hparams.add_hparam("d_ff", 2048)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("unconditional", True)
# Local Attention related params
hparams.add_hparam("block_length", 128)
hparams.add_hparam("block_height", 16)
hparams.add_hparam("block_width", 16)
hparams.add_hparam("attention_type", "local1d")
return hparams
|
python
|
def mtf_image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 1
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("mtf_mode", True)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 6)
hparams.add_hparam("attention_key_size", 256)
hparams.add_hparam("attention_value_size", 256)
# Share weights between input and target embeddings
hparams.shared_embedding = True
# mixture of experts hparams
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 64)
hparams.add_hparam("d_ff", 2048)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("unconditional", True)
# Local Attention related params
hparams.add_hparam("block_length", 128)
hparams.add_hparam("block_height", 16)
hparams.add_hparam("block_width", 16)
hparams.add_hparam("attention_type", "local1d")
return hparams
|
[
"def",
"mtf_image_transformer_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"no_data_parallelism",
"=",
"True",
"hparams",
".",
"use_fixed_batch_size",
"=",
"True",
"hparams",
".",
"batch_size",
"=",
"1",
"hparams",
".",
"max_length",
"=",
"3072",
"hparams",
".",
"hidden_size",
"=",
"256",
"hparams",
".",
"label_smoothing",
"=",
"0.0",
"# 8-way model-parallelism",
"hparams",
".",
"add_hparam",
"(",
"\"mesh_shape\"",
",",
"\"batch:8\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"layout\"",
",",
"\"batch:batch\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"mtf_mode\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_heads\"",
",",
"8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"filter_size\"",
",",
"1024",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_encoder_layers\"",
",",
"0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_decoder_layers\"",
",",
"6",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_key_size\"",
",",
"256",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_value_size\"",
",",
"256",
")",
"# Share weights between input and target embeddings",
"hparams",
".",
"shared_embedding",
"=",
"True",
"# mixture of experts hparams",
"hparams",
".",
"add_hparam",
"(",
"\"ffn_layer\"",
",",
"\"dense_relu_dense\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"moe_overhead_train\"",
",",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"moe_overhead_eval\"",
",",
"2.0",
")",
"hparams",
".",
"moe_num_experts",
"=",
"16",
"hparams",
".",
"moe_loss_coef",
"=",
"1e-3",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"True",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"rsqrt_decay\"",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"10000",
"hparams",
".",
"add_hparam",
"(",
"\"d_kv\"",
",",
"64",
")",
"hparams",
".",
"add_hparam",
"(",
"\"d_ff\"",
",",
"2048",
")",
"# Image related hparams",
"hparams",
".",
"add_hparam",
"(",
"\"img_len\"",
",",
"32",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_channels\"",
",",
"3",
")",
"hparams",
".",
"add_hparam",
"(",
"\"unconditional\"",
",",
"True",
")",
"# Local Attention related params",
"hparams",
".",
"add_hparam",
"(",
"\"block_length\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"block_height\"",
",",
"16",
")",
"hparams",
".",
"add_hparam",
"(",
"\"block_width\"",
",",
"16",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_type\"",
",",
"\"local1d\"",
")",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L366-L412
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_tiny
|
def mtf_image_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_image_transformer_base()
hparams.hidden_size = 128
hparams.d_ff = 256
hparams.batch_size = 4
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 4
hparams.num_heads = 4
hparams.attention_key_size = 128
hparams.attention_value_size = 128
hparams.block_length = 32
# data parallelism and model-parallelism
hparams.mesh_shape = "batch:2"
hparams.layout = "batch:batch"
return hparams
|
python
|
def mtf_image_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_image_transformer_base()
hparams.hidden_size = 128
hparams.d_ff = 256
hparams.batch_size = 4
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 4
hparams.num_heads = 4
hparams.attention_key_size = 128
hparams.attention_value_size = 128
hparams.block_length = 32
# data parallelism and model-parallelism
hparams.mesh_shape = "batch:2"
hparams.layout = "batch:batch"
return hparams
|
[
"def",
"mtf_image_transformer_tiny",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base",
"(",
")",
"hparams",
".",
"hidden_size",
"=",
"128",
"hparams",
".",
"d_ff",
"=",
"256",
"hparams",
".",
"batch_size",
"=",
"4",
"hparams",
".",
"num_encoder_layers",
"=",
"1",
"hparams",
".",
"num_decoder_layers",
"=",
"4",
"hparams",
".",
"num_heads",
"=",
"4",
"hparams",
".",
"attention_key_size",
"=",
"128",
"hparams",
".",
"attention_value_size",
"=",
"128",
"hparams",
".",
"block_length",
"=",
"32",
"# data parallelism and model-parallelism",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:2\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch\"",
"return",
"hparams"
] |
Catch bugs locally...
|
[
"Catch",
"bugs",
"locally",
"..."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L416-L431
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_single
|
def mtf_image_transformer_single():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = ""
hparams.layout = ""
hparams.hidden_size = 32
hparams.filter_size = 32
hparams.batch_size = 1
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 1
hparams.num_heads = 2
hparams.attention_key_size = 32
hparams.attention_value_size = 32
hparams.block_length = 16
return hparams
|
python
|
def mtf_image_transformer_single():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = ""
hparams.layout = ""
hparams.hidden_size = 32
hparams.filter_size = 32
hparams.batch_size = 1
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 1
hparams.num_heads = 2
hparams.attention_key_size = 32
hparams.attention_value_size = 32
hparams.block_length = 16
return hparams
|
[
"def",
"mtf_image_transformer_single",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_tiny",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"\"",
"hparams",
".",
"layout",
"=",
"\"\"",
"hparams",
".",
"hidden_size",
"=",
"32",
"hparams",
".",
"filter_size",
"=",
"32",
"hparams",
".",
"batch_size",
"=",
"1",
"hparams",
".",
"num_encoder_layers",
"=",
"1",
"hparams",
".",
"num_decoder_layers",
"=",
"1",
"hparams",
".",
"num_heads",
"=",
"2",
"hparams",
".",
"attention_key_size",
"=",
"32",
"hparams",
".",
"attention_value_size",
"=",
"32",
"hparams",
".",
"block_length",
"=",
"16",
"return",
"hparams"
] |
Small single parameters.
|
[
"Small",
"single",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L435-L449
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_single
|
def mtf_image_transformer_base_single():
"""Small single parameters."""
hparams = mtf_image_transformer_base()
hparams.num_decoder_layers = 6
hparams.filter_size = 256
hparams.block_length = 128
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
|
python
|
def mtf_image_transformer_base_single():
"""Small single parameters."""
hparams = mtf_image_transformer_base()
hparams.num_decoder_layers = 6
hparams.filter_size = 256
hparams.block_length = 128
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
|
[
"def",
"mtf_image_transformer_base_single",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base",
"(",
")",
"hparams",
".",
"num_decoder_layers",
"=",
"6",
"hparams",
".",
"filter_size",
"=",
"256",
"hparams",
".",
"block_length",
"=",
"128",
"hparams",
".",
"mesh_shape",
"=",
"\"\"",
"hparams",
".",
"layout",
"=",
"\"\"",
"return",
"hparams"
] |
Small single parameters.
|
[
"Small",
"single",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L453-L461
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_tiny_spatial1d
|
def mtf_image_transformer_tiny_spatial1d():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.num_decoder_layers = 6
hparams.filter_size = 128
hparams.block_height = 8
hparams.block_width = 8
hparams.attention_type = "local1d_spatial"
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
|
python
|
def mtf_image_transformer_tiny_spatial1d():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.num_decoder_layers = 6
hparams.filter_size = 128
hparams.block_height = 8
hparams.block_width = 8
hparams.attention_type = "local1d_spatial"
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
|
[
"def",
"mtf_image_transformer_tiny_spatial1d",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_tiny",
"(",
")",
"hparams",
".",
"num_decoder_layers",
"=",
"6",
"hparams",
".",
"filter_size",
"=",
"128",
"hparams",
".",
"block_height",
"=",
"8",
"hparams",
".",
"block_width",
"=",
"8",
"hparams",
".",
"attention_type",
"=",
"\"local1d_spatial\"",
"hparams",
".",
"mesh_shape",
"=",
"\"\"",
"hparams",
".",
"layout",
"=",
"\"\"",
"return",
"hparams"
] |
Small single parameters.
|
[
"Small",
"single",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L465-L475
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_cifar
|
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
|
python
|
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
|
[
"def",
"mtf_image_transformer_base_cifar",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:8\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch\"",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"13600",
"# one epoch",
"hparams",
".",
"batch_size",
"=",
"32",
"hparams",
".",
"num_heads",
"=",
"4",
"hparams",
".",
"num_decoder_layers",
"=",
"12",
"hparams",
".",
"block_length",
"=",
"256",
"hparams",
".",
"hidden_size",
"=",
"512",
"hparams",
".",
"d_ff",
"=",
"2048",
"hparams",
".",
"learning_rate",
"=",
"0.5",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"none\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"dan\"",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.3",
"hparams",
".",
"unconditional",
"=",
"True",
"return",
"hparams"
] |
Data parallel CIFAR parameters.
|
[
"Data",
"parallel",
"CIFAR",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L493-L510
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_cifar_4x
|
def mtf_image_transformer_cifar_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
return hparams
|
python
|
def mtf_image_transformer_cifar_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
return hparams
|
[
"def",
"mtf_image_transformer_cifar_4x",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_cifar",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:32\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch\"",
"hparams",
".",
"batch_size",
"=",
"128",
"return",
"hparams"
] |
Data parallel CIFAR parameters.
|
[
"Data",
"parallel",
"CIFAR",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L514-L520
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_cifar_mp_4x
|
def mtf_image_transformer_cifar_mp_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
return hparams
|
python
|
def mtf_image_transformer_cifar_mp_4x():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
return hparams
|
[
"def",
"mtf_image_transformer_cifar_mp_4x",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_cifar",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"model:4;batch:8\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch;d_ff:model;heads:model\"",
"hparams",
".",
"batch_size",
"=",
"32",
"hparams",
".",
"num_heads",
"=",
"8",
"hparams",
".",
"d_ff",
"=",
"8192",
"return",
"hparams"
] |
Data parallel CIFAR parameters.
|
[
"Data",
"parallel",
"CIFAR",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L524-L532
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_imagenet
|
def mtf_image_transformer_base_imagenet():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
hparams.d_ff = 2048
hparams.hidden_size = 512
hparams.num_decoder_layers = 12
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 31250
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.unconditional = True
return hparams
|
python
|
def mtf_image_transformer_base_imagenet():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base_cifar()
hparams.mesh_shape = "batch:32"
hparams.layout = "batch:batch"
hparams.batch_size = 128
hparams.d_ff = 2048
hparams.hidden_size = 512
hparams.num_decoder_layers = 12
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 31250
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.unconditional = True
return hparams
|
[
"def",
"mtf_image_transformer_base_imagenet",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_cifar",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:32\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch\"",
"hparams",
".",
"batch_size",
"=",
"128",
"hparams",
".",
"d_ff",
"=",
"2048",
"hparams",
".",
"hidden_size",
"=",
"512",
"hparams",
".",
"num_decoder_layers",
"=",
"12",
"hparams",
".",
"learning_rate",
"=",
"0.5",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"31250",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"none\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"dan\"",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.1",
"hparams",
".",
"unconditional",
"=",
"True",
"return",
"hparams"
] |
Data parallel CIFAR parameters.
|
[
"Data",
"parallel",
"CIFAR",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L536-L551
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_imagenet_mp
|
def mtf_image_transformer_base_imagenet_mp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
return hparams
|
python
|
def mtf_image_transformer_base_imagenet_mp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:4;batch:8"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 32
hparams.num_heads = 8
hparams.d_ff = 8192
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
return hparams
|
[
"def",
"mtf_image_transformer_base_imagenet_mp",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_imagenet",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"model:4;batch:8\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch;d_ff:model;heads:model\"",
"hparams",
".",
"batch_size",
"=",
"32",
"hparams",
".",
"num_heads",
"=",
"8",
"hparams",
".",
"d_ff",
"=",
"8192",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"31250",
"hparams",
".",
"unconditional",
"=",
"True",
"return",
"hparams"
] |
Model parallel ImageNet parameters.
|
[
"Model",
"parallel",
"ImageNet",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L555-L565
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_imagenet_mp128
|
def mtf_image_transformer_base_imagenet_mp128():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.num_heads = 8
hparams.num_decoder_layers = 4
hparams.d_ff = 4096
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
hparams.max_length = 256*256*3
return hparams
|
python
|
def mtf_image_transformer_base_imagenet_mp128():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.num_heads = 8
hparams.num_decoder_layers = 4
hparams.d_ff = 4096
hparams.learning_rate_warmup_steps = 31250
hparams.unconditional = True
hparams.max_length = 256*256*3
return hparams
|
[
"def",
"mtf_image_transformer_base_imagenet_mp128",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_imagenet",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"model:8;batch:4\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch;d_ff:model;heads:model\"",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"img_len",
"=",
"128",
"hparams",
".",
"block_length",
"=",
"128",
"hparams",
".",
"num_heads",
"=",
"8",
"hparams",
".",
"num_decoder_layers",
"=",
"4",
"hparams",
".",
"d_ff",
"=",
"4096",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"31250",
"hparams",
".",
"unconditional",
"=",
"True",
"hparams",
".",
"max_length",
"=",
"256",
"*",
"256",
"*",
"3",
"return",
"hparams"
] |
Model parallel ImageNet parameters.
|
[
"Model",
"parallel",
"ImageNet",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L569-L583
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_imagenet_mp_sp
|
def mtf_image_transformer_base_imagenet_mp_sp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet_mp128()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;num_wblocks:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.attention_type = "local1d_spatial"
return hparams
|
python
|
def mtf_image_transformer_base_imagenet_mp_sp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet_mp128()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;num_wblocks:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.attention_type = "local1d_spatial"
return hparams
|
[
"def",
"mtf_image_transformer_base_imagenet_mp_sp",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_imagenet_mp128",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"model:8;batch:4\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch;d_ff:model;num_wblocks:model\"",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"img_len",
"=",
"128",
"hparams",
".",
"block_length",
"=",
"128",
"hparams",
".",
"attention_type",
"=",
"\"local1d_spatial\"",
"return",
"hparams"
] |
Model parallel ImageNet parameters.
|
[
"Model",
"parallel",
"ImageNet",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L587-L596
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_image_transformer.py
|
mtf_image_transformer_base_imagenet_mp64
|
def mtf_image_transformer_base_imagenet_mp64():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 8
hparams.img_len = 64
hparams.num_decoder_layers = 8
return hparams
|
python
|
def mtf_image_transformer_base_imagenet_mp64():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;heads:model"
hparams.batch_size = 8
hparams.img_len = 64
hparams.num_decoder_layers = 8
return hparams
|
[
"def",
"mtf_image_transformer_base_imagenet_mp64",
"(",
")",
":",
"hparams",
"=",
"mtf_image_transformer_base_imagenet",
"(",
")",
"hparams",
".",
"mesh_shape",
"=",
"\"model:8;batch:4\"",
"hparams",
".",
"layout",
"=",
"\"batch:batch;d_ff:model;heads:model\"",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"img_len",
"=",
"64",
"hparams",
".",
"num_decoder_layers",
"=",
"8",
"return",
"hparams"
] |
Model parallel ImageNet parameters.
|
[
"Model",
"parallel",
"ImageNet",
"parameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L600-L608
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/reversible_layers.py
|
create_degrees
|
def create_degrees(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
if (isinstance(input_order, str) and
input_order not in ('random', 'left-to-right', 'right-to-left')):
raise ValueError('Input order is not valid.')
if hidden_order not in ('random', 'left-to-right'):
raise ValueError('Hidden order is not valid.')
degrees = []
if isinstance(input_order, str):
input_degrees = np.arange(1, input_dim + 1)
if input_order == 'right-to-left':
input_degrees = np.flip(input_degrees, 0)
elif input_order == 'random':
np.random.shuffle(input_degrees)
else:
input_order = np.array(input_order)
if np.all(np.sort(input_order) != np.arange(1, input_dim + 1)):
raise ValueError('invalid input order')
input_degrees = input_order
degrees.append(input_degrees)
for units in hidden_dims:
if hidden_order == 'random':
min_prev_degree = min(np.min(degrees[-1]), input_dim - 1)
hidden_degrees = np.random.randint(
low=min_prev_degree, high=input_dim, size=units)
elif hidden_order == 'left-to-right':
hidden_degrees = (np.arange(units) % max(1, input_dim - 1) +
min(1, input_dim - 1))
degrees.append(hidden_degrees)
return degrees
|
python
|
def create_degrees(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
if (isinstance(input_order, str) and
input_order not in ('random', 'left-to-right', 'right-to-left')):
raise ValueError('Input order is not valid.')
if hidden_order not in ('random', 'left-to-right'):
raise ValueError('Hidden order is not valid.')
degrees = []
if isinstance(input_order, str):
input_degrees = np.arange(1, input_dim + 1)
if input_order == 'right-to-left':
input_degrees = np.flip(input_degrees, 0)
elif input_order == 'random':
np.random.shuffle(input_degrees)
else:
input_order = np.array(input_order)
if np.all(np.sort(input_order) != np.arange(1, input_dim + 1)):
raise ValueError('invalid input order')
input_degrees = input_order
degrees.append(input_degrees)
for units in hidden_dims:
if hidden_order == 'random':
min_prev_degree = min(np.min(degrees[-1]), input_dim - 1)
hidden_degrees = np.random.randint(
low=min_prev_degree, high=input_dim, size=units)
elif hidden_order == 'left-to-right':
hidden_degrees = (np.arange(units) % max(1, input_dim - 1) +
min(1, input_dim - 1))
degrees.append(hidden_degrees)
return degrees
|
[
"def",
"create_degrees",
"(",
"input_dim",
",",
"hidden_dims",
",",
"input_order",
"=",
"'left-to-right'",
",",
"hidden_order",
"=",
"'left-to-right'",
")",
":",
"if",
"(",
"isinstance",
"(",
"input_order",
",",
"str",
")",
"and",
"input_order",
"not",
"in",
"(",
"'random'",
",",
"'left-to-right'",
",",
"'right-to-left'",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Input order is not valid.'",
")",
"if",
"hidden_order",
"not",
"in",
"(",
"'random'",
",",
"'left-to-right'",
")",
":",
"raise",
"ValueError",
"(",
"'Hidden order is not valid.'",
")",
"degrees",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"input_order",
",",
"str",
")",
":",
"input_degrees",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"input_dim",
"+",
"1",
")",
"if",
"input_order",
"==",
"'right-to-left'",
":",
"input_degrees",
"=",
"np",
".",
"flip",
"(",
"input_degrees",
",",
"0",
")",
"elif",
"input_order",
"==",
"'random'",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"input_degrees",
")",
"else",
":",
"input_order",
"=",
"np",
".",
"array",
"(",
"input_order",
")",
"if",
"np",
".",
"all",
"(",
"np",
".",
"sort",
"(",
"input_order",
")",
"!=",
"np",
".",
"arange",
"(",
"1",
",",
"input_dim",
"+",
"1",
")",
")",
":",
"raise",
"ValueError",
"(",
"'invalid input order'",
")",
"input_degrees",
"=",
"input_order",
"degrees",
".",
"append",
"(",
"input_degrees",
")",
"for",
"units",
"in",
"hidden_dims",
":",
"if",
"hidden_order",
"==",
"'random'",
":",
"min_prev_degree",
"=",
"min",
"(",
"np",
".",
"min",
"(",
"degrees",
"[",
"-",
"1",
"]",
")",
",",
"input_dim",
"-",
"1",
")",
"hidden_degrees",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"low",
"=",
"min_prev_degree",
",",
"high",
"=",
"input_dim",
",",
"size",
"=",
"units",
")",
"elif",
"hidden_order",
"==",
"'left-to-right'",
":",
"hidden_degrees",
"=",
"(",
"np",
".",
"arange",
"(",
"units",
")",
"%",
"max",
"(",
"1",
",",
"input_dim",
"-",
"1",
")",
"+",
"min",
"(",
"1",
",",
"input_dim",
"-",
"1",
")",
")",
"degrees",
".",
"append",
"(",
"hidden_degrees",
")",
"return",
"degrees"
] |
Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
|
[
"Returns",
"a",
"list",
"of",
"degree",
"vectors",
"one",
"for",
"each",
"input",
"and",
"hidden",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/reversible_layers.py#L218-L269
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/reversible_layers.py
|
create_masks
|
def create_masks(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)
masks = []
# Create input-to-hidden and hidden-to-hidden masks.
for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]):
mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32)
masks.append(mask)
# Create hidden-to-output mask.
mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32)
masks.append(mask)
return masks
|
python
|
def create_masks(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)
masks = []
# Create input-to-hidden and hidden-to-hidden masks.
for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]):
mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32)
masks.append(mask)
# Create hidden-to-output mask.
mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32)
masks.append(mask)
return masks
|
[
"def",
"create_masks",
"(",
"input_dim",
",",
"hidden_dims",
",",
"input_order",
"=",
"'left-to-right'",
",",
"hidden_order",
"=",
"'left-to-right'",
")",
":",
"degrees",
"=",
"create_degrees",
"(",
"input_dim",
",",
"hidden_dims",
",",
"input_order",
",",
"hidden_order",
")",
"masks",
"=",
"[",
"]",
"# Create input-to-hidden and hidden-to-hidden masks.",
"for",
"input_degrees",
",",
"output_degrees",
"in",
"zip",
"(",
"degrees",
"[",
":",
"-",
"1",
"]",
",",
"degrees",
"[",
"1",
":",
"]",
")",
":",
"mask",
"=",
"tf",
".",
"cast",
"(",
"input_degrees",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"<=",
"output_degrees",
",",
"tf",
".",
"float32",
")",
"masks",
".",
"append",
"(",
"mask",
")",
"# Create hidden-to-output mask.",
"mask",
"=",
"tf",
".",
"cast",
"(",
"degrees",
"[",
"-",
"1",
"]",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"<",
"degrees",
"[",
"0",
"]",
",",
"tf",
".",
"float32",
")",
"masks",
".",
"append",
"(",
"mask",
")",
"return",
"masks"
] |
Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
|
[
"Returns",
"a",
"list",
"of",
"binary",
"mask",
"matrices",
"respecting",
"autoregressive",
"ordering",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/reversible_layers.py#L272-L302
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/reversible_layers.py
|
sinkhorn
|
def sinkhorn(inputs, n_iters=20):
"""Performs incomplete Sinkhorn normalization to inputs.
By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix
with positive entries can be turned into a doubly-stochastic matrix
(i.e. its rows and columns add up to one) via the succesive row and column
normalization.
-To ensure positivity, the effective input to sinkhorn has to be
exp(inputs) (elementwise).
-However, for stability, sinkhorn works in the log-space. It is only at
return time that entries are exponentiated.
Code is adapted from Mena et al. [2].
[1] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and
doubly stochastic matrices. Pacific Journal of Mathematics, 1967.
[2] Gonzalo Mena, David Belanger, Scott Linderman, Jasper Snoek.
Learning latent permutations with Gumbel-Sinkhorn networks. International
Conference on Learning Representations, 2018.
Args:
inputs: A `Tensor` with shape `[..., vocab_size, vocab_size]`.
n_iters: Number of sinkhorn iterations (in practice, as little as 20
iterations are needed to achieve decent convergence for `vocab_size` ~100)
Returns:
outputs: A `Tensor` of close-to-doubly-stochastic matrices with shape
`[:, vocab_size, vocab_size]`.
"""
vocab_size = tf.shape(inputs)[-1]
log_alpha = tf.reshape(inputs, [-1, vocab_size, vocab_size])
for _ in range(n_iters):
log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=2),
[-1, vocab_size, 1])
log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=1),
[-1, 1, vocab_size])
outputs = tf.exp(log_alpha)
return outputs
|
python
|
def sinkhorn(inputs, n_iters=20):
"""Performs incomplete Sinkhorn normalization to inputs.
By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix
with positive entries can be turned into a doubly-stochastic matrix
(i.e. its rows and columns add up to one) via the succesive row and column
normalization.
-To ensure positivity, the effective input to sinkhorn has to be
exp(inputs) (elementwise).
-However, for stability, sinkhorn works in the log-space. It is only at
return time that entries are exponentiated.
Code is adapted from Mena et al. [2].
[1] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and
doubly stochastic matrices. Pacific Journal of Mathematics, 1967.
[2] Gonzalo Mena, David Belanger, Scott Linderman, Jasper Snoek.
Learning latent permutations with Gumbel-Sinkhorn networks. International
Conference on Learning Representations, 2018.
Args:
inputs: A `Tensor` with shape `[..., vocab_size, vocab_size]`.
n_iters: Number of sinkhorn iterations (in practice, as little as 20
iterations are needed to achieve decent convergence for `vocab_size` ~100)
Returns:
outputs: A `Tensor` of close-to-doubly-stochastic matrices with shape
`[:, vocab_size, vocab_size]`.
"""
vocab_size = tf.shape(inputs)[-1]
log_alpha = tf.reshape(inputs, [-1, vocab_size, vocab_size])
for _ in range(n_iters):
log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=2),
[-1, vocab_size, 1])
log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=1),
[-1, 1, vocab_size])
outputs = tf.exp(log_alpha)
return outputs
|
[
"def",
"sinkhorn",
"(",
"inputs",
",",
"n_iters",
"=",
"20",
")",
":",
"vocab_size",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"-",
"1",
"]",
"log_alpha",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"[",
"-",
"1",
",",
"vocab_size",
",",
"vocab_size",
"]",
")",
"for",
"_",
"in",
"range",
"(",
"n_iters",
")",
":",
"log_alpha",
"-=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"reduce_logsumexp",
"(",
"log_alpha",
",",
"axis",
"=",
"2",
")",
",",
"[",
"-",
"1",
",",
"vocab_size",
",",
"1",
"]",
")",
"log_alpha",
"-=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"reduce_logsumexp",
"(",
"log_alpha",
",",
"axis",
"=",
"1",
")",
",",
"[",
"-",
"1",
",",
"1",
",",
"vocab_size",
"]",
")",
"outputs",
"=",
"tf",
".",
"exp",
"(",
"log_alpha",
")",
"return",
"outputs"
] |
Performs incomplete Sinkhorn normalization to inputs.
By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix
with positive entries can be turned into a doubly-stochastic matrix
(i.e. its rows and columns add up to one) via the succesive row and column
normalization.
-To ensure positivity, the effective input to sinkhorn has to be
exp(inputs) (elementwise).
-However, for stability, sinkhorn works in the log-space. It is only at
return time that entries are exponentiated.
Code is adapted from Mena et al. [2].
[1] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and
doubly stochastic matrices. Pacific Journal of Mathematics, 1967.
[2] Gonzalo Mena, David Belanger, Scott Linderman, Jasper Snoek.
Learning latent permutations with Gumbel-Sinkhorn networks. International
Conference on Learning Representations, 2018.
Args:
inputs: A `Tensor` with shape `[..., vocab_size, vocab_size]`.
n_iters: Number of sinkhorn iterations (in practice, as little as 20
iterations are needed to achieve decent convergence for `vocab_size` ~100)
Returns:
outputs: A `Tensor` of close-to-doubly-stochastic matrices with shape
`[:, vocab_size, vocab_size]`.
|
[
"Performs",
"incomplete",
"Sinkhorn",
"normalization",
"to",
"inputs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/reversible_layers.py#L319-L358
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/reversible_layers.py
|
TransformedRandomVariable
|
def TransformedRandomVariable(random_variable, # pylint: disable=invalid-name
reversible_layer,
name=None,
sample_shape=(),
value=None):
"""Random variable for f(x), where x ~ p(x) and f is reversible."""
return ed.RandomVariable(
distribution=TransformedDistribution(random_variable.distribution,
reversible_layer,
name=name),
sample_shape=sample_shape,
value=value)
|
python
|
def TransformedRandomVariable(random_variable, # pylint: disable=invalid-name
reversible_layer,
name=None,
sample_shape=(),
value=None):
"""Random variable for f(x), where x ~ p(x) and f is reversible."""
return ed.RandomVariable(
distribution=TransformedDistribution(random_variable.distribution,
reversible_layer,
name=name),
sample_shape=sample_shape,
value=value)
|
[
"def",
"TransformedRandomVariable",
"(",
"random_variable",
",",
"# pylint: disable=invalid-name",
"reversible_layer",
",",
"name",
"=",
"None",
",",
"sample_shape",
"=",
"(",
")",
",",
"value",
"=",
"None",
")",
":",
"return",
"ed",
".",
"RandomVariable",
"(",
"distribution",
"=",
"TransformedDistribution",
"(",
"random_variable",
".",
"distribution",
",",
"reversible_layer",
",",
"name",
"=",
"name",
")",
",",
"sample_shape",
"=",
"sample_shape",
",",
"value",
"=",
"value",
")"
] |
Random variable for f(x), where x ~ p(x) and f is reversible.
|
[
"Random",
"variable",
"for",
"f",
"(",
"x",
")",
"where",
"x",
"~",
"p",
"(",
"x",
")",
"and",
"f",
"is",
"reversible",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/reversible_layers.py#L447-L458
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/reversible_layers.py
|
ActNorm.log_det_jacobian
|
def log_det_jacobian(self, inputs):
"""Returns log det | dx / dy | = num_events * sum log | scale |."""
del inputs # unused
# Number of events is number of all elements excluding the batch and
# channel dimensions.
num_events = tf.reduce_prod(tf.shape(inputs)[1:-1])
log_det_jacobian = num_events * tf.reduce_sum(self.log_scale)
return log_det_jacobian
|
python
|
def log_det_jacobian(self, inputs):
"""Returns log det | dx / dy | = num_events * sum log | scale |."""
del inputs # unused
# Number of events is number of all elements excluding the batch and
# channel dimensions.
num_events = tf.reduce_prod(tf.shape(inputs)[1:-1])
log_det_jacobian = num_events * tf.reduce_sum(self.log_scale)
return log_det_jacobian
|
[
"def",
"log_det_jacobian",
"(",
"self",
",",
"inputs",
")",
":",
"del",
"inputs",
"# unused",
"# Number of events is number of all elements excluding the batch and",
"# channel dimensions.",
"num_events",
"=",
"tf",
".",
"reduce_prod",
"(",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"1",
":",
"-",
"1",
"]",
")",
"log_det_jacobian",
"=",
"num_events",
"*",
"tf",
".",
"reduce_sum",
"(",
"self",
".",
"log_scale",
")",
"return",
"log_det_jacobian"
] |
Returns log det | dx / dy | = num_events * sum log | scale |.
|
[
"Returns",
"log",
"det",
"|",
"dx",
"/",
"dy",
"|",
"=",
"num_events",
"*",
"sum",
"log",
"|",
"scale",
"|",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/reversible_layers.py#L94-L101
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/vq_discrete.py
|
DiscreteBottleneck.slice_hidden
|
def slice_hidden(self, x):
"""Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim].
"""
x_sliced = tf.reshape(
x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced
|
python
|
def slice_hidden(self, x):
"""Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim].
"""
x_sliced = tf.reshape(
x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced
|
[
"def",
"slice_hidden",
"(",
"self",
",",
"x",
")",
":",
"x_sliced",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"self",
".",
"hparams",
".",
"num_blocks",
",",
"self",
".",
"hparams",
".",
"block_dim",
"]",
")",
"return",
"x_sliced"
] |
Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim].
|
[
"Slice",
"encoder",
"hidden",
"state",
"into",
"block_dim",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vq_discrete.py#L61-L72
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/vq_discrete.py
|
DiscreteBottleneck.nearest_neighbor
|
def nearest_neighbor(self, x, means):
"""Find the nearest element in means to elements in x.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape [-1, num_blocks, block_dim].
means: Embedding means of shape.
Returns:
Tensor with nearest element in mean encoded in one-hot notation.
"""
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)
scalar_prod = tf.matmul(
tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))
scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
dist = x_norm_sq + tf.transpose(
means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod
if self.hparams.soft_em:
nearest_idx = tf.stack(
[
tf.multinomial(
-dist[:, i, :], num_samples=self.hparams.num_samples)
for i in range(self.hparams.num_blocks)
],
axis=1)
nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)
nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
else:
if self.hparams.random_top_k > 1:
_, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
nearest_idx = tf.gather(
top_k_idx,
tf.random_uniform(
[1],
minval=0,
maxval=self.hparams.random_top_k - 1,
dtype=tf.int32),
axis=-1)
else:
if self.hparams.use_scales:
dist /= tf.reshape(self.hparams.scales,
[1, 1, self.hparams.moe_num_experts])
nearest_idx = tf.argmax(-dist, axis=-1)
nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)
return nearest_hot
|
python
|
def nearest_neighbor(self, x, means):
"""Find the nearest element in means to elements in x.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape [-1, num_blocks, block_dim].
means: Embedding means of shape.
Returns:
Tensor with nearest element in mean encoded in one-hot notation.
"""
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)
scalar_prod = tf.matmul(
tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))
scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])
dist = x_norm_sq + tf.transpose(
means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod
if self.hparams.soft_em:
nearest_idx = tf.stack(
[
tf.multinomial(
-dist[:, i, :], num_samples=self.hparams.num_samples)
for i in range(self.hparams.num_blocks)
],
axis=1)
nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size)
nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)
else:
if self.hparams.random_top_k > 1:
_, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k)
nearest_idx = tf.gather(
top_k_idx,
tf.random_uniform(
[1],
minval=0,
maxval=self.hparams.random_top_k - 1,
dtype=tf.int32),
axis=-1)
else:
if self.hparams.use_scales:
dist /= tf.reshape(self.hparams.scales,
[1, 1, self.hparams.moe_num_experts])
nearest_idx = tf.argmax(-dist, axis=-1)
nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size)
return nearest_hot
|
[
"def",
"nearest_neighbor",
"(",
"self",
",",
"x",
",",
"means",
")",
":",
"x_norm_sq",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"x",
")",
",",
"axis",
"=",
"-",
"1",
",",
"keep_dims",
"=",
"True",
")",
"means_norm_sq",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"means",
")",
",",
"axis",
"=",
"-",
"1",
",",
"keep_dims",
"=",
"True",
")",
"scalar_prod",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"x",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
",",
"tf",
".",
"transpose",
"(",
"means",
",",
"perm",
"=",
"[",
"0",
",",
"2",
",",
"1",
"]",
")",
")",
"scalar_prod",
"=",
"tf",
".",
"transpose",
"(",
"scalar_prod",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"dist",
"=",
"x_norm_sq",
"+",
"tf",
".",
"transpose",
"(",
"means_norm_sq",
",",
"perm",
"=",
"[",
"2",
",",
"0",
",",
"1",
"]",
")",
"-",
"2",
"*",
"scalar_prod",
"if",
"self",
".",
"hparams",
".",
"soft_em",
":",
"nearest_idx",
"=",
"tf",
".",
"stack",
"(",
"[",
"tf",
".",
"multinomial",
"(",
"-",
"dist",
"[",
":",
",",
"i",
",",
":",
"]",
",",
"num_samples",
"=",
"self",
".",
"hparams",
".",
"num_samples",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"hparams",
".",
"num_blocks",
")",
"]",
",",
"axis",
"=",
"1",
")",
"nearest_hot",
"=",
"tf",
".",
"one_hot",
"(",
"nearest_idx",
",",
"depth",
"=",
"self",
".",
"hparams",
".",
"block_v_size",
")",
"nearest_hot",
"=",
"tf",
".",
"reduce_mean",
"(",
"nearest_hot",
",",
"axis",
"=",
"-",
"2",
")",
"else",
":",
"if",
"self",
".",
"hparams",
".",
"random_top_k",
">",
"1",
":",
"_",
",",
"top_k_idx",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"-",
"dist",
",",
"k",
"=",
"self",
".",
"hparams",
".",
"random_top_k",
")",
"nearest_idx",
"=",
"tf",
".",
"gather",
"(",
"top_k_idx",
",",
"tf",
".",
"random_uniform",
"(",
"[",
"1",
"]",
",",
"minval",
"=",
"0",
",",
"maxval",
"=",
"self",
".",
"hparams",
".",
"random_top_k",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"if",
"self",
".",
"hparams",
".",
"use_scales",
":",
"dist",
"/=",
"tf",
".",
"reshape",
"(",
"self",
".",
"hparams",
".",
"scales",
",",
"[",
"1",
",",
"1",
",",
"self",
".",
"hparams",
".",
"moe_num_experts",
"]",
")",
"nearest_idx",
"=",
"tf",
".",
"argmax",
"(",
"-",
"dist",
",",
"axis",
"=",
"-",
"1",
")",
"nearest_hot",
"=",
"tf",
".",
"one_hot",
"(",
"nearest_idx",
",",
"self",
".",
"hparams",
".",
"block_v_size",
")",
"return",
"nearest_hot"
] |
Find the nearest element in means to elements in x.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape [-1, num_blocks, block_dim].
means: Embedding means of shape.
Returns:
Tensor with nearest element in mean encoded in one-hot notation.
|
[
"Find",
"the",
"nearest",
"element",
"in",
"means",
"to",
"elements",
"in",
"x",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vq_discrete.py#L74-L120
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/vq_discrete.py
|
DiscreteBottleneck.embedding_lookup
|
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
"""
x_means_hot = self.nearest_neighbor(x, means)
x_means_hot_flat = tf.reshape(
x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
x_means = tf.transpose(x_means, [1, 0, 2])
q_loss = tf.reduce_mean(
tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_means, q_loss, e_loss
|
python
|
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
"""
x_means_hot = self.nearest_neighbor(x, means)
x_means_hot_flat = tf.reshape(
x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
x_means = tf.transpose(x_means, [1, 0, 2])
q_loss = tf.reduce_mean(
tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_means, q_loss, e_loss
|
[
"def",
"embedding_lookup",
"(",
"self",
",",
"x",
",",
"means",
")",
":",
"x_means_hot",
"=",
"self",
".",
"nearest_neighbor",
"(",
"x",
",",
"means",
")",
"x_means_hot_flat",
"=",
"tf",
".",
"reshape",
"(",
"x_means_hot",
",",
"[",
"-",
"1",
",",
"self",
".",
"hparams",
".",
"num_blocks",
",",
"self",
".",
"hparams",
".",
"block_v_size",
"]",
")",
"x_means",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"x_means_hot_flat",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
",",
"means",
")",
"x_means",
"=",
"tf",
".",
"transpose",
"(",
"x_means",
",",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"q_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"squared_difference",
"(",
"tf",
".",
"stop_gradient",
"(",
"x",
")",
",",
"x_means",
")",
")",
"e_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"squared_difference",
"(",
"x",
",",
"tf",
".",
"stop_gradient",
"(",
"x_means",
")",
")",
")",
"return",
"x_means_hot",
",",
"x_means",
",",
"q_loss",
",",
"e_loss"
] |
Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
|
[
"Compute",
"nearest",
"neighbors",
"and",
"loss",
"for",
"training",
"the",
"embeddings",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vq_discrete.py#L122-L145
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/vq_discrete.py
|
DiscreteBottleneck.int_to_bit
|
def int_to_bit(self, x_int, num_bits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
"""
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
# pylint: disable=g-complex-comprehension
x_labels = [
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base))
for i in range(num_bits)]
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
|
python
|
def int_to_bit(self, x_int, num_bits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
"""
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
# pylint: disable=g-complex-comprehension
x_labels = [
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base))
for i in range(num_bits)]
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
|
[
"def",
"int_to_bit",
"(",
"self",
",",
"x_int",
",",
"num_bits",
",",
"base",
"=",
"2",
")",
":",
"x_l",
"=",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"expand_dims",
"(",
"x_int",
",",
"axis",
"=",
"-",
"1",
")",
")",
"# pylint: disable=g-complex-comprehension",
"x_labels",
"=",
"[",
"tf",
".",
"floormod",
"(",
"tf",
".",
"floordiv",
"(",
"tf",
".",
"to_int32",
"(",
"x_l",
")",
",",
"tf",
".",
"to_int32",
"(",
"base",
")",
"**",
"i",
")",
",",
"tf",
".",
"to_int32",
"(",
"base",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_bits",
")",
"]",
"res",
"=",
"tf",
".",
"concat",
"(",
"x_labels",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"tf",
".",
"to_float",
"(",
"res",
")"
] |
Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
|
[
"Turn",
"x_int",
"representing",
"numbers",
"into",
"a",
"bitwise",
"(",
"lower",
"-",
"endian",
")",
"tensor",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vq_discrete.py#L167-L187
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/vq_discrete.py
|
DiscreteBottleneck.embed
|
def embed(self, x):
"""Embedding function that takes discrete latent and returns embedding.
Args:
x: Input to the discretization bottleneck.
Returns:
Continuous embedding to be passed on to the decoder.
Raises:
ValueError: For unknown or missing arguments.
"""
shape_x = common_layers.shape_list(x)
x_flat = tf.reshape(x, [-1, 1])
c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2)
shape = common_layers.shape_list(c)
new_shape = shape
new_shape.append(self.hparams.num_blocks)
new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks))
c = tf.to_int32(tf.reshape(c, shape=new_shape))
h1_shape = shape_x
h1_shape.append(self.hparams.hidden_size)
h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)
c_int = self.bit_to_int(
c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2)
c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1)
c_hot_flat = tf.reshape(
c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size])
h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means)
h1 = tf.transpose(h1, perm=[1, 0, 2])
h1 = tf.reshape(h1, shape=h1_shape)
h1_shape[0] = self.hparams.batch_size
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
return res
|
python
|
def embed(self, x):
"""Embedding function that takes discrete latent and returns embedding.
Args:
x: Input to the discretization bottleneck.
Returns:
Continuous embedding to be passed on to the decoder.
Raises:
ValueError: For unknown or missing arguments.
"""
shape_x = common_layers.shape_list(x)
x_flat = tf.reshape(x, [-1, 1])
c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2)
shape = common_layers.shape_list(c)
new_shape = shape
new_shape.append(self.hparams.num_blocks)
new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks))
c = tf.to_int32(tf.reshape(c, shape=new_shape))
h1_shape = shape_x
h1_shape.append(self.hparams.hidden_size)
h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)
c_int = self.bit_to_int(
c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2)
c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1)
c_hot_flat = tf.reshape(
c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size])
h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means)
h1 = tf.transpose(h1, perm=[1, 0, 2])
h1 = tf.reshape(h1, shape=h1_shape)
h1_shape[0] = self.hparams.batch_size
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
return res
|
[
"def",
"embed",
"(",
"self",
",",
"x",
")",
":",
"shape_x",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"x_flat",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
",",
"1",
"]",
")",
"c",
"=",
"self",
".",
"int_to_bit",
"(",
"x_flat",
",",
"num_bits",
"=",
"self",
".",
"hparams",
".",
"z_size",
",",
"base",
"=",
"2",
")",
"shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"c",
")",
"new_shape",
"=",
"shape",
"new_shape",
".",
"append",
"(",
"self",
".",
"hparams",
".",
"num_blocks",
")",
"new_shape",
".",
"append",
"(",
"int",
"(",
"self",
".",
"hparams",
".",
"z_size",
"/",
"self",
".",
"hparams",
".",
"num_blocks",
")",
")",
"c",
"=",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"reshape",
"(",
"c",
",",
"shape",
"=",
"new_shape",
")",
")",
"h1_shape",
"=",
"shape_x",
"h1_shape",
".",
"append",
"(",
"self",
".",
"hparams",
".",
"hidden_size",
")",
"h1",
"=",
"tf",
".",
"zeros",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"h1_shape",
")",
"c_int",
"=",
"self",
".",
"bit_to_int",
"(",
"c",
",",
"num_bits",
"=",
"int",
"(",
"self",
".",
"hparams",
".",
"z_size",
"/",
"self",
".",
"hparams",
".",
"num_blocks",
")",
",",
"base",
"=",
"2",
")",
"c_hot",
"=",
"tf",
".",
"one_hot",
"(",
"c_int",
",",
"depth",
"=",
"self",
".",
"hparams",
".",
"block_v_size",
",",
"axis",
"=",
"-",
"1",
")",
"c_hot_flat",
"=",
"tf",
".",
"reshape",
"(",
"c_hot",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"self",
".",
"hparams",
".",
"num_blocks",
",",
"self",
".",
"hparams",
".",
"block_v_size",
"]",
")",
"h1",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"c_hot_flat",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
",",
"self",
".",
"means",
")",
"h1",
"=",
"tf",
".",
"transpose",
"(",
"h1",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"h1",
"=",
"tf",
".",
"reshape",
"(",
"h1",
",",
"shape",
"=",
"h1_shape",
")",
"h1_shape",
"[",
"0",
"]",
"=",
"self",
".",
"hparams",
".",
"batch_size",
"h2",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"tf",
".",
"nn",
".",
"relu",
"(",
"h1",
")",
",",
"self",
".",
"hparams",
".",
"filter_size",
",",
"name",
"=",
"\"vch2\"",
")",
"res",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"tf",
".",
"nn",
".",
"relu",
"(",
"h2",
")",
",",
"self",
".",
"hparams",
".",
"hidden_size",
",",
"name",
"=",
"\"vcfin\"",
")",
"return",
"res"
] |
Embedding function that takes discrete latent and returns embedding.
Args:
x: Input to the discretization bottleneck.
Returns:
Continuous embedding to be passed on to the decoder.
Raises:
ValueError: For unknown or missing arguments.
|
[
"Embedding",
"function",
"that",
"takes",
"discrete",
"latent",
"and",
"returns",
"embedding",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vq_discrete.py#L189-L223
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/vq_discrete.py
|
DiscreteBottleneck.discrete_bottleneck
|
def discrete_bottleneck(self, x):
"""Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the
embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method
project, or
ema_count or ema_means is None if we are using ema, or unknown
args.
"""
x_reshaped = self.slice_hidden(x)
x_means_hot = []
x_means = 0
loss = 0
x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup(
x_reshaped, self.means)
if self.hparams.ema:
tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta))
updated_ema_count = \
moving_averages.assign_moving_average(
self.ema_count,
tf.reduce_sum(
tf.reshape(
x_means_hot,
shape=[-1, self.hparams.num_blocks,
self.hparams.block_v_size]),
axis=0),
self.hparams.decay,
zero_debias=False)
dw = tf.matmul(
tf.transpose(x_means_hot, perm=[1, 2, 0]),
tf.transpose(x_reshaped, perm=[1, 0, 2]))
updated_ema_means = \
moving_averages.assign_moving_average(
self.ema_means, dw, self.hparams.decay,
zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / (
n + 2**self.hparams.z_size * self.hparams.epsilon) * n)
updated_ema_means = updated_ema_means / tf.expand_dims(
updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = tf.assign(self.means, updated_ema_means)
with tf.control_dependencies([update_means]):
loss += self.hparams.beta * e_loss
else:
# Use a gradient based loss for learning the cluster centers
loss += q_loss + self.hparams.beta * e_loss
# Get the discrete latent representation
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation
num_bits = int(self.hparams.z_size // self.hparams.num_blocks)
x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2)
x_discrete = self.bit_to_int(
tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)
# Reshape x_discrete
shape_x = common_layers.shape_list(x)
shape_discrete = shape_x[:-1]
x_discrete = tf.reshape(x_discrete, shape_discrete)
x_means = tf.reshape(x_means, shape=shape_x)
h1 = x + tf.stop_gradient(x_means - x)
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
embed_fn = partial(self.embed)
return {
"dense": res,
"discrete": x_discrete,
"loss": loss,
"embed": embed_fn
}
|
python
|
def discrete_bottleneck(self, x):
"""Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the
embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method
project, or
ema_count or ema_means is None if we are using ema, or unknown
args.
"""
x_reshaped = self.slice_hidden(x)
x_means_hot = []
x_means = 0
loss = 0
x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup(
x_reshaped, self.means)
if self.hparams.ema:
tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta))
updated_ema_count = \
moving_averages.assign_moving_average(
self.ema_count,
tf.reduce_sum(
tf.reshape(
x_means_hot,
shape=[-1, self.hparams.num_blocks,
self.hparams.block_v_size]),
axis=0),
self.hparams.decay,
zero_debias=False)
dw = tf.matmul(
tf.transpose(x_means_hot, perm=[1, 2, 0]),
tf.transpose(x_reshaped, perm=[1, 0, 2]))
updated_ema_means = \
moving_averages.assign_moving_average(
self.ema_means, dw, self.hparams.decay,
zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / (
n + 2**self.hparams.z_size * self.hparams.epsilon) * n)
updated_ema_means = updated_ema_means / tf.expand_dims(
updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = tf.assign(self.means, updated_ema_means)
with tf.control_dependencies([update_means]):
loss += self.hparams.beta * e_loss
else:
# Use a gradient based loss for learning the cluster centers
loss += q_loss + self.hparams.beta * e_loss
# Get the discrete latent representation
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation
num_bits = int(self.hparams.z_size // self.hparams.num_blocks)
x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2)
x_discrete = self.bit_to_int(
tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2)
# Reshape x_discrete
shape_x = common_layers.shape_list(x)
shape_discrete = shape_x[:-1]
x_discrete = tf.reshape(x_discrete, shape_discrete)
x_means = tf.reshape(x_means, shape=shape_x)
h1 = x + tf.stop_gradient(x_means - x)
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
embed_fn = partial(self.embed)
return {
"dense": res,
"discrete": x_discrete,
"loss": loss,
"embed": embed_fn
}
|
[
"def",
"discrete_bottleneck",
"(",
"self",
",",
"x",
")",
":",
"x_reshaped",
"=",
"self",
".",
"slice_hidden",
"(",
"x",
")",
"x_means_hot",
"=",
"[",
"]",
"x_means",
"=",
"0",
"loss",
"=",
"0",
"x_means_hot",
",",
"x_means",
",",
"q_loss",
",",
"e_loss",
"=",
"self",
".",
"embedding_lookup",
"(",
"x_reshaped",
",",
"self",
".",
"means",
")",
"if",
"self",
".",
"hparams",
".",
"ema",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Using EMA with beta = {}\"",
".",
"format",
"(",
"self",
".",
"hparams",
".",
"beta",
")",
")",
"updated_ema_count",
"=",
"moving_averages",
".",
"assign_moving_average",
"(",
"self",
".",
"ema_count",
",",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"reshape",
"(",
"x_means_hot",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"self",
".",
"hparams",
".",
"num_blocks",
",",
"self",
".",
"hparams",
".",
"block_v_size",
"]",
")",
",",
"axis",
"=",
"0",
")",
",",
"self",
".",
"hparams",
".",
"decay",
",",
"zero_debias",
"=",
"False",
")",
"dw",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"x_means_hot",
",",
"perm",
"=",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
",",
"tf",
".",
"transpose",
"(",
"x_reshaped",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
")",
"updated_ema_means",
"=",
"moving_averages",
".",
"assign_moving_average",
"(",
"self",
".",
"ema_means",
",",
"dw",
",",
"self",
".",
"hparams",
".",
"decay",
",",
"zero_debias",
"=",
"False",
")",
"n",
"=",
"tf",
".",
"reduce_sum",
"(",
"updated_ema_count",
",",
"axis",
"=",
"-",
"1",
",",
"keep_dims",
"=",
"True",
")",
"updated_ema_count",
"=",
"(",
"(",
"updated_ema_count",
"+",
"self",
".",
"hparams",
".",
"epsilon",
")",
"/",
"(",
"n",
"+",
"2",
"**",
"self",
".",
"hparams",
".",
"z_size",
"*",
"self",
".",
"hparams",
".",
"epsilon",
")",
"*",
"n",
")",
"updated_ema_means",
"=",
"updated_ema_means",
"/",
"tf",
".",
"expand_dims",
"(",
"updated_ema_count",
",",
"axis",
"=",
"-",
"1",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"e_loss",
"]",
")",
":",
"update_means",
"=",
"tf",
".",
"assign",
"(",
"self",
".",
"means",
",",
"updated_ema_means",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"update_means",
"]",
")",
":",
"loss",
"+=",
"self",
".",
"hparams",
".",
"beta",
"*",
"e_loss",
"else",
":",
"# Use a gradient based loss for learning the cluster centers",
"loss",
"+=",
"q_loss",
"+",
"self",
".",
"hparams",
".",
"beta",
"*",
"e_loss",
"# Get the discrete latent representation",
"x_means_idx",
"=",
"tf",
".",
"argmax",
"(",
"x_means_hot",
",",
"axis",
"=",
"-",
"1",
")",
"# Get the binary representation",
"num_bits",
"=",
"int",
"(",
"self",
".",
"hparams",
".",
"z_size",
"//",
"self",
".",
"hparams",
".",
"num_blocks",
")",
"x_means_bits",
"=",
"self",
".",
"int_to_bit",
"(",
"x_means_idx",
",",
"num_bits",
"=",
"num_bits",
",",
"base",
"=",
"2",
")",
"x_discrete",
"=",
"self",
".",
"bit_to_int",
"(",
"tf",
".",
"to_int32",
"(",
"x_means_bits",
")",
",",
"num_bits",
"=",
"self",
".",
"hparams",
".",
"z_size",
",",
"base",
"=",
"2",
")",
"# Reshape x_discrete",
"shape_x",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"shape_discrete",
"=",
"shape_x",
"[",
":",
"-",
"1",
"]",
"x_discrete",
"=",
"tf",
".",
"reshape",
"(",
"x_discrete",
",",
"shape_discrete",
")",
"x_means",
"=",
"tf",
".",
"reshape",
"(",
"x_means",
",",
"shape",
"=",
"shape_x",
")",
"h1",
"=",
"x",
"+",
"tf",
".",
"stop_gradient",
"(",
"x_means",
"-",
"x",
")",
"h2",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"tf",
".",
"nn",
".",
"relu",
"(",
"h1",
")",
",",
"self",
".",
"hparams",
".",
"filter_size",
",",
"name",
"=",
"\"vch2\"",
")",
"res",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"tf",
".",
"nn",
".",
"relu",
"(",
"h2",
")",
",",
"self",
".",
"hparams",
".",
"hidden_size",
",",
"name",
"=",
"\"vcfin\"",
")",
"embed_fn",
"=",
"partial",
"(",
"self",
".",
"embed",
")",
"return",
"{",
"\"dense\"",
":",
"res",
",",
"\"discrete\"",
":",
"x_discrete",
",",
"\"loss\"",
":",
"loss",
",",
"\"embed\"",
":",
"embed_fn",
"}"
] |
Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the
embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method
project, or
ema_count or ema_means is None if we are using ema, or unknown
args.
|
[
"Discretization",
"bottleneck",
"for",
"latent",
"variables",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/vq_discrete.py#L225-L310
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/adafactor_experiments.py
|
mimic_adam_with_adafactor
|
def mimic_adam_with_adafactor(hparams):
"""Switch from Adam to Adafactor, approximating the behavior of Adam.
Some minor things may be different, like epsilon and beta1 correction.
Args:
hparams: model hyperparameters where "adam" in hparams.optimizer
"""
assert "adam" in hparams.optimizer
hparams.optimizer = "adafactor"
hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1
hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2
hparams.optimizer_adafactor_multiply_by_parameter_scale = False
hparams.optimizer_adafactor_factored = False
hparams.optimizer_adafactor_clipping_threshold = None
hparams.optimizer_adafactor_decay_type = "adam"
|
python
|
def mimic_adam_with_adafactor(hparams):
"""Switch from Adam to Adafactor, approximating the behavior of Adam.
Some minor things may be different, like epsilon and beta1 correction.
Args:
hparams: model hyperparameters where "adam" in hparams.optimizer
"""
assert "adam" in hparams.optimizer
hparams.optimizer = "adafactor"
hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1
hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2
hparams.optimizer_adafactor_multiply_by_parameter_scale = False
hparams.optimizer_adafactor_factored = False
hparams.optimizer_adafactor_clipping_threshold = None
hparams.optimizer_adafactor_decay_type = "adam"
|
[
"def",
"mimic_adam_with_adafactor",
"(",
"hparams",
")",
":",
"assert",
"\"adam\"",
"in",
"hparams",
".",
"optimizer",
"hparams",
".",
"optimizer",
"=",
"\"adafactor\"",
"hparams",
".",
"optimizer_adafactor_beta1",
"=",
"hparams",
".",
"optimizer_adam_beta1",
"hparams",
".",
"optimizer_adafactor_beta2",
"=",
"hparams",
".",
"optimizer_adam_beta2",
"hparams",
".",
"optimizer_adafactor_multiply_by_parameter_scale",
"=",
"False",
"hparams",
".",
"optimizer_adafactor_factored",
"=",
"False",
"hparams",
".",
"optimizer_adafactor_clipping_threshold",
"=",
"None",
"hparams",
".",
"optimizer_adafactor_decay_type",
"=",
"\"adam\""
] |
Switch from Adam to Adafactor, approximating the behavior of Adam.
Some minor things may be different, like epsilon and beta1 correction.
Args:
hparams: model hyperparameters where "adam" in hparams.optimizer
|
[
"Switch",
"from",
"Adam",
"to",
"Adafactor",
"approximating",
"the",
"behavior",
"of",
"Adam",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/adafactor_experiments.py#L27-L42
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/adafactor_experiments.py
|
afx_adam
|
def afx_adam():
"""Old version - Adam."""
hparams = transformer.transformer_base_v2()
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.symbol_modality_num_shards = 1
hparams.batch_size = 2048
hparams.optimizer = "adam"
hparams.learning_rate_schedule = (
"constant*rsqrt_decay*linear_warmup*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams
|
python
|
def afx_adam():
"""Old version - Adam."""
hparams = transformer.transformer_base_v2()
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.symbol_modality_num_shards = 1
hparams.batch_size = 2048
hparams.optimizer = "adam"
hparams.learning_rate_schedule = (
"constant*rsqrt_decay*linear_warmup*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams
|
[
"def",
"afx_adam",
"(",
")",
":",
"hparams",
"=",
"transformer",
".",
"transformer_base_v2",
"(",
")",
"hparams",
".",
"optimizer_adam_beta1",
"=",
"0.9",
"hparams",
".",
"optimizer_adam_beta2",
"=",
"0.999",
"hparams",
".",
"symbol_modality_num_shards",
"=",
"1",
"hparams",
".",
"batch_size",
"=",
"2048",
"hparams",
".",
"optimizer",
"=",
"\"adam\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"(",
"\"constant*rsqrt_decay*linear_warmup*rsqrt_hidden_size\"",
")",
"hparams",
".",
"learning_rate_constant",
"=",
"2.0",
"return",
"hparams"
] |
Old version - Adam.
|
[
"Old",
"version",
"-",
"Adam",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/adafactor_experiments.py#L46-L57
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.