INSTRUCTION stringlengths 1 46.3k | RESPONSE stringlengths 75 80.2k |
|---|---|
A stack of separable convolution blocks with residual connections. | def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None):
"""A stack of separable convolution blocks with residual connections."""
with tf.variable_scope(name):
padding_bias = None
if mask is not None:
padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding.
if p... |
Experimental rank loss, thanks to kkurach@ for the code. | def rank_loss(sentence_emb, image_emb, margin=0.2):
"""Experimental rank loss, thanks to kkurach@ for the code."""
with tf.name_scope("rank_loss"):
# Normalize first as this is assumed in cosine similarity later.
sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
image_emb = tf.nn.l2_normalize(image_emb... |
Loss telling to be more similar to your own targets than to others. | def similarity_cost(inputs_encoded, targets_encoded):
"""Loss telling to be more similar to your own targets than to others."""
# This is a first very simple version: handle variable-length by padding
# to same length and putting everything into batch. In need of a better way.
x, y = common_layers.pad_to_same_l... |
Middle part of slicenet, connecting encoder and decoder. | def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams):
"""Middle part of slicenet, connecting encoder and decoder."""
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size,
... |
Input embeddings -> is_padding. | def embedding_to_padding(emb):
"""Input embeddings -> is_padding."""
emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1, keep_dims=True)
return tf.to_float(tf.equal(emb_sum, 0.0)) |
The slicenet model, main step used for training. | def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True):
"""The slicenet model, main step used for training."""
with tf.variable_scope("slicenet"):
# Project to hidden size if necessary
if inputs.get_shape().as_list()[-1] != hparams.hidden_size:
inputs = common_layers.conv_bloc... |
Set of hyperparameters. | def slicenet_params1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 1024
hparams.hidden_size = 768
hparams.dropout = 0.5
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 4
hparams.kerne... |
Version with Noam's decay scheme. | def slicenet_params1_noam():
"""Version with Noam's decay scheme."""
hparams = slicenet_params1()
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 1.0
hparams.learning_rate_warmup_steps = 4000
hparams.initializer = "uniform_unit_scaling"
hparams.optimizer_adam_epsilon = 1e-9
hparams.o... |
Version for fast local runs. | def slicenet_params1_tiny():
"""Version for fast local runs."""
hparams = slicenet_params1()
hparams.attention_type = "simple"
hparams.separability = 0
hparams.hidden_size = 128
hparams.num_hidden_layers = 2
hparams.batch_size = 512
hparams.learning_rate_warmup_steps = 200
return hparams |
Small range of hyperparameters. | def slicenet_range1(ranged_hparams):
"""Small range of hyperparameters."""
rhp = ranged_hparams
rhp.set_float("clip_grad_norm", 1.0, 10.0, scale=rhp.LOG_SCALE)
rhp.set_float("learning_rate", 0.02, 1.0, scale=rhp.LOG_SCALE)
rhp.set_float("optimizer_adam_beta2", 0.995, 0.998)
rhp.set_float("weight_decay", 1.0... |
Converts a space-separated string of tokens to lists of ids.
Also store temporary vocabulary IDs for source OOV tokens. OOVs are
represented by their temporary OOV number. E.g., if the vocabulary size
is 50k and the source has 3 OOVs, then these temporary OOV numbers will
be 50000, 50001, 50002.
A... | def encode(self, s):
"""Converts a space-separated string of tokens to lists of ids.
Also store temporary vocabulary IDs for source OOV tokens. OOVs are
represented by their temporary OOV number. E.g., if the vocabulary size
is 50k and the source has 3 OOVs, then these temporary OOV numbers will
be... |
Converts a space-separated string of tokens to lists of ids.
Also store a version of extened vocabulary IDs.
For target OOVs that are in the source, encode them using the temporary
vocab IDs.
For target OOVs not in the source, encode them as <UNK>
Args:
target: target string
source_oov... | def encode_target(self, target, source_oovs):
"""Converts a space-separated string of tokens to lists of ids.
Also store a version of extened vocabulary IDs.
For target OOVs that are in the source, encode them using the temporary
vocab IDs.
For target OOVs not in the source, encode them as <UNK>
... |
decode ids back to tokens, considering OOVs temporary IDs.
Args:
ids: vocab ids. Could possibly include source temporary OOV ID starting
from vocab_size.
source_oov_id_to_token: a list of source OOV tokens, with the order the
same as they appear in the source.
Returns:
decoded to... | def decode_list_oov(self, ids, source_oov_id_to_token):
"""decode ids back to tokens, considering OOVs temporary IDs.
Args:
ids: vocab ids. Could possibly include source temporary OOV ID starting
from vocab_size.
source_oov_id_to_token: a list of source OOV tokens, with the order the
sa... |
Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current ... | def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current h... |
Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image. | def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing... |
Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
... | def _distort_color(image, color_ordering=0, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a disti... |
Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is... | def _apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the... |
Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each chann... | def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: ... |
vqa v2 preprocess image. | def vqa_v2_preprocess_image(
image,
height,
width,
mode,
resize_side=512,
distort=True,
image_model_fn="resnet_v1_152",
):
"""vqa v2 preprocess image."""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
assert resize_side > 0
if resize_side:
image = _aspect_preservi... |
Prepare one shard of the model for the encoder.
Args:
inputs: a Tensor.
target_space: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
encoder_input: a Tensor, bottom of encoder sta... | def transformer_prepare_encoder(inputs, target_space, hparams, features=None):
"""Prepare one shard of the model for the encoder.
Args:
inputs: a Tensor.
target_space: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well.
This is needed now f... |
A stack of transformer layers.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indic... | def transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
... |
Feed-forward layer in the transformer.
Args:
x: a Tensor of shape [batch_size, length, hparams.hidden_size]
hparams: hyperparameters for model
pad_remover: an expert_utils.PadRemover object tracking the padding
positions. If provided, when using convolutional settings, the padding
is removed ... | def transformer_ffn_layer(x,
hparams,
pad_remover=None,
conv_padding="LEFT",
nonpadding_mask=None,
losses=None,
cache=None,
decode_loop_st... |
Transformer on languagemodel_lm1b32k_packed. 50M Params. | def lmx_base():
"""Transformer on languagemodel_lm1b32k_packed. 50M Params."""
hparams = transformer.transformer_tpu()
# sharing is counterproductive when underparameterized
hparams.shared_embedding_and_softmax_weights = False
# we judge by log-ppl, so label smoothing hurts.
hparams.label_smoothing = 0.0
... |
HParams for training languagemodel_lm1b32k_packed. 880M Params. | def lmx_h3k_f12k():
"""HParams for training languagemodel_lm1b32k_packed. 880M Params."""
hparams = lmx_base()
hparams.hidden_size = 3072
hparams.filter_size = 12288
hparams.batch_size = 2048
hparams.weight_dtype = "bfloat16"
return hparams |
HParams for training languagemodel_lm1b32k_packed. 1470M Params. | def lmx_h4k_f16k():
"""HParams for training languagemodel_lm1b32k_packed. 1470M Params."""
hparams = lmx_base()
hparams.hidden_size = 4096
hparams.filter_size = 16384
hparams.batch_size = 1024
hparams.weight_dtype = "bfloat16"
return hparams |
Language model using relative attention. | def lmx_relative():
"""Language model using relative attention."""
hparams = lmx_base()
hparams.self_attention_type = "dot_product_relative_v2"
hparams.activation_dtype = "float32"
hparams.weight_dtype = "float32"
return hparams |
Transformer with mixture of experts. 890M Params. | def lmx_moe_h1k_f4k_x32():
"""Transformer with mixture of experts. 890M Params."""
hparams = lmx_h1k_f4k()
hparams.ffn_layer = "local_moe_tpu"
hparams.moe_num_experts = 32
hparams.weight_dtype = "bfloat16"
hparams.batch_size = 8192
return hparams |
Transformer with mixture of experts. 890M Params. | def lmx_moe_h1k_f8k_x16():
"""Transformer with mixture of experts. 890M Params."""
hparams = lmx_h1k_f4k()
hparams.filter_size = 8192
hparams.ffn_layer = "local_moe_tpu"
hparams.moe_num_experts = 16
hparams.weight_dtype = "bfloat16"
hparams.batch_size = 8192
return hparams |
HParams for training languagemodel_lm1b32k_packed. 880M Params. | def lmx_h1k_f64k():
"""HParams for training languagemodel_lm1b32k_packed. 880M Params."""
hparams = lmx_base()
hparams.hidden_size = 1024
hparams.filter_size = 65536
hparams.batch_size = 2048
return hparams |
Uncertainty reward based on logits. | def compute_uncertainty_reward(logits, predictions):
"""Uncertainty reward based on logits."""
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_l... |
Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations. | def _reset_non_empty(self, indices):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
reset_video_op = tf.cond(
self._video_condition,
lambda: tf.py_fun... |
Set the random seed from flag everywhere. | def set_random_seed():
"""Set the random seed from flag everywhere."""
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed) |
Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS. | def generate_data_for_problem(problem):
"""Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS."""
training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem]
num_train_shards = FLAGS.num_shards or 10
tf.logging.info("Generating training data for %s.", problem)
train_output_files = gene... |
Generate data for `EnvProblem`s. | def generate_data_for_env_problem(problem_name):
"""Generate data for `EnvProblem`s."""
assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps "
"should be greater than zero")
assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size shou... |
Generate data for a registered problem. | def generate_data_for_registered_problem(problem_name):
"""Generate data for a registered problem."""
tf.logging.info("Generating data for %s.", problem_name)
if FLAGS.num_shards:
raise ValueError("--num_shards should not be set for registered Problem.")
problem = registry.problem(problem_name)
task_id = ... |
Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples | def _collect_data(directory):
"""Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples
"""
# Returns:
data_files = []
transcripts = [
filename for filename in os.listdir... |
Checks if the filename exists under the path. | def _file_exists(path, filename):
"""Checks if the filename exists under the path."""
return os.path.isfile(os.path.join(path, filename)) |
Checks if the filename is relative, not absolute. | def _is_relative(path, filename):
"""Checks if the filename is relative, not absolute."""
return os.path.abspath(os.path.join(path, filename)).startswith(path) |
Define ppo step. | def define_ppo_step(data_points, hparams, action_space, lr):
"""Define ppo step."""
observation, action, discounted_reward, norm_advantage, old_pdf = data_points
obs_shape = common_layers.shape_list(observation)
observation = tf.reshape(
observation, [obs_shape[0] * obs_shape[1]] + obs_shape[2:]
)
(l... |
PPO epoch. | def define_ppo_epoch(memory, hparams, action_space, batch_size):
"""PPO epoch."""
observation, reward, done, action, old_pdf, value = memory
# This is to avoid propagating gradients through simulated environment.
observation = tf.stop_gradient(observation)
action = tf.stop_gradient(action)
reward = tf.stop... |
Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. | def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ...,... |
Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven't implemented. | def gym_space_spec(gym_space):
"""Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven'... |
Number of elements that can be represented by the space.
Makes the most sense for Discrete or Box type with integral dtype, ex: number
of actions in an action space.
Args:
gym_space: The gym space.
Returns:
np.int64 number of observations that can be represented by this space, or
returns None whe... | def cardinality(gym_space):
"""Number of elements that can be represented by the space.
Makes the most sense for Discrete or Box type with integral dtype, ex: number
of actions in an action space.
Args:
gym_space: The gym space.
Returns:
np.int64 number of observations that can be represented by th... |
RMSE but will argmax if last dim is not 1. | def image_rmse(predictions, labels, weights_fn=common_layers.weights_all):
"""RMSE but will argmax if last dim is not 1."""
if common_layers.shape_list(predictions)[-1] == 1:
predictions = tf.squeeze(predictions, axis=[-1])
else:
predictions = tf.argmax(predictions, axis=-1)
return padded_rmse(predictio... |
Computes mean(abs(preds-target)). | def abs_error(predictions, labels, weights_fn=None):
"""Computes mean(abs(preds-target))."""
del weights_fn # Unused
targets = tf.squeeze(labels, axis=[2, 3])
batch_abs_error = tf.abs(predictions - targets)
den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32)
return (batch_abs_error, den) |
Explained variance, also known as R^2. | def padded_variance_explained(predictions,
labels,
weights_fn=common_layers.weights_all):
"""Explained variance, also known as R^2."""
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets... |
Percentage of times that top-k predictions matches labels on non-0s. | def padded_accuracy_topk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels... |
Sequence accuracy for L1/L2 losses: round down the predictions to ints. | def rounding_sequence_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Sequence accuracy for L1/L2 losses: round down the predictions to ints."""
outputs = tf.squeeze(tf.to_int32(predictions), axis=-1)
weights = weights_fn(la... |
Percentage of times that predictions matches labels everywhere (non-0). | def padded_sequence_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
# If the last dimension is 1 then we're using L1/L2 loss.
if common_layers.shape_list... |
Average edit distance, ignoring padding 0s.
The score returned is the edit distance divided by the total length of
reference truth and the weight returned is the total length of the truth.
Args:
predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and
type tf.float32 representing ... | def sequence_edit_distance(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average edit distance, ignoring padding 0s.
The score returned is the edit distance divided by the total length of
reference truth and the weight returned is the tot... |
Average log-perplexity exluding padding 0s. No smoothing. | def padded_neg_log_perplexity(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average log-perplexity exluding padding 0s. No smoothing."""
num, den = common_layers.padded_cross_entropy(
predictions, labels, 0.0, weights_fn=weights_... |
Average log-perplexity with custom targets_mask. | def padded_neg_log_perplexity_with_masking(
predictions,
labels,
features,
weights_fn=None):
"""Average log-perplexity with custom targets_mask."""
del weights_fn
if "targets_mask" not in features:
raise ValueError("masked_neg_log_perplexity requires targets_mask feature")
# Features are 4 ... |
Average log-perplexity excluding padding 0s. No smoothing. | def dmol_neg_log_perplexity(predictions,
labels,
weights_fn=None):
"""Average log-perplexity excluding padding 0s. No smoothing."""
del weights_fn # Unused
num, den = common_layers.dml_loss(
predictions, labels, reduce_sum=False)
return (-num, den) |
Rounding accuracy for L1/L2 losses: round down the predictions to ints. | def rounding_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Rounding accuracy for L1/L2 losses: round down the predictions to ints."""
outputs = tf.squeeze(tf.to_int32(predictions))
labels = tf.squeeze(labels)
weights = weights_fn(labels)
... |
Percentage of times that predictions matches labels on non-0s. | def padded_accuracy(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that predictions matches labels on non-0s."""
# If the last dimension is 1 then we're using L1/L2 loss.
if common_layers.shape_list(predictions)[-1] == 1:
return r... |
Used to evaluate the VQA accuracy.
Let n be the times that predictions appear in labels, then final score
is min(n/k, 1).
Refer to https://arxiv.org/pdf/1505.00468.pdf.
Args:
predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size].
labels: A tensor with shape [batch_size, length, 1, 1].
... | def multilabel_accuracy_matchk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Used to evaluate the VQA accuracy.
Let n be the times that predictions appear in labels, then final score
is min(n/k, 1... |
Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A ... | def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A f... |
Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions. | def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of t... |
Calculate softmax cross entropy given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross-entropy (scalar), weight... | def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate softmax cross entropy given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels a... |
Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights | def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
"""Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weig... |
Calculate recall for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes... | def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
"""Calculate recall for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batc... |
Calculate sigmoid cross entropy for one-hot lanels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross_entropy (scalar), weights | def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate sigmoid cross entropy for one-hot lanels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and... |
Calculate ROC AUC.
Requires binary classes.
Args:
logits: Tensor of size [batch_size, 1, 1, num_classes]
labels: Tensor of size [batch_size, 1, 1, num_classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
ROC AUC (scalar), weights | def roc_auc(logits, labels, weights_fn=None):
"""Calculate ROC AUC.
Requires binary classes.
Args:
logits: Tensor of size [batch_size, 1, 1, num_classes]
labels: Tensor of size [batch_size, 1, 1, num_classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
ROC A... |
Creates the evaluation metrics for the model.
Args:
problems: List of Problem instances.
model_hparams: a set of hparams.
Returns:
dict<metric name, metric function>. The metric functions have signature
(Tensor predictions, features) -> (metric Tensor, update op), where features
is a dict with... | def create_evaluation_metrics(problems, model_hparams):
"""Creates the evaluation metrics for the model.
Args:
problems: List of Problem instances.
model_hparams: a set of hparams.
Returns:
dict<metric name, metric function>. The metric functions have signature
(Tensor predictions, features) -> ... |
See create_eager_metrics. | def create_eager_metrics_for_problem(problem, model_hparams):
"""See create_eager_metrics."""
metric_fns = problem.eval_metric_fns(model_hparams)
problem_hparams = problem.get_hparams(model_hparams)
target_modality = problem_hparams.modality["targets"]
weights_fn = model_hparams.weights_fn.get(
"targets... |
Create metrics accumulators and averager for Eager mode.
Args:
metric_names: list<str> from Metrics enum
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers.weights_all. Use
common_layers.weights_nonzero if labels have 0-padding.
... | def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all):
"""Create metrics accumulators and averager for Eager mode.
Args:
metric_names: list<str> from Metrics enum
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers... |
Create metrics accumulators and averager for Eager mode.
Args:
metric_fns: dict<metric name, metric function>
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers.weights_all. Use
common_layers.weights_nonzero if labels have 0-padding... | def create_eager_metrics_internal(metric_fns,
weights_fn=common_layers.weights_all):
"""Create metrics accumulators and averager for Eager mode.
Args:
metric_fns: dict<metric name, metric function>
weights_fn: function that takes labels and returns a weights mask. Defaults... |
Calculate word error rate.
Args:
raw_predictions: The raw predictions.
labels: The actual labels.
lookup: A tf.constant mapping indices to output tokens.
weights_fn: Weighting function.
Returns:
The word error rate. | def word_error_rate(raw_predictions,
labels,
lookup=None,
weights_fn=common_layers.weights_nonzero):
"""Calculate word error rate.
Args:
raw_predictions: The raw predictions.
labels: The actual labels.
lookup: A tf.constant mapping indices to ... |
Calculate pearson correlation coefficient.
Args:
predictions: The raw predictions.
labels: The actual labels.
weights_fn: Weighting function.
Returns:
The pearson correlation coefficient. | def pearson_correlation_coefficient(predictions, labels, weights_fn=None):
"""Calculate pearson correlation coefficient.
Args:
predictions: The raw predictions.
labels: The actual labels.
weights_fn: Weighting function.
Returns:
The pearson correlation coefficient.
"""
del weights_fn
_, pe... |
Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly biases for diagonal... | def attention_lm_prepare_decoder(targets, hparams):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to im... |
A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors | def attention_lm_decoder(decoder_input,
decoder_self_attention_bias,
hparams,
name="decoder"):
"""A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see c... |
Set of hyperparameters. | def attention_lm_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 1024
hparams.batch_size = 8192
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.lea... |
Cheap model.
on lm1b_32k:
45M params
2 steps/sec on [GeForce GTX TITAN X]
Returns:
an hparams object. | def attention_lm_small():
"""Cheap model.
on lm1b_32k:
45M params
2 steps/sec on [GeForce GTX TITAN X]
Returns:
an hparams object.
"""
hparams = attention_lm_base()
hparams.num_hidden_layers = 4
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout ... |
Version to use for seq2seq. | def attention_lm_translation():
"""Version to use for seq2seq."""
hparams = attention_lm_base()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.learning_rate = 0.4
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.max_length = 512
hparams.label_sm... |
BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
predictions: tensor, model predicti... | def bleu_score(predictions, labels, **unused_kwargs):
"""BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have be... |
Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams up to max_order in segment
with... | def _get_ngrams(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n... |
r"""Tokenize a string following the official BLEU implementation.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
In our case, the input string is expected to be just one line
and no HTML entities de-escaping is needed.
So we just tokenize on punc... | def bleu_tokenize(string):
r"""Tokenize a string following the official BLEU implementation.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
In our case, the input string is expected to be just one line
and no HTML entities de-escaping is needed.
... |
Compute BLEU for two files (reference and hypothesis translation). | def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):
"""Compute BLEU for two files (reference and hypothesis translation)."""
ref_lines = text_encoder.native_to_unicode(
tf.gfile.Open(ref_filename, "r").read()).split("\n")
hyp_lines = text_encoder.native_to_unicode(
tf.gfile.Open(hyp_fi... |
Glob twice, first time possibly catching `NotFoundError`.
tf.gfile.Glob may crash with
```
tensorflow.python.framework.errors_impl.NotFoundError:
xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40;
No such file or directory
```
Standard glob.glob does not have this bug, but does not handle mul... | def _try_twice_tf_glob(pattern):
"""Glob twice, first time possibly catching `NotFoundError`.
tf.gfile.Glob may crash with
```
tensorflow.python.framework.errors_impl.NotFoundError:
xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40;
No such file or directory
```
Standard glob.glob does not ... |
Return list of StepFiles sorted by step from files at path_prefix. | def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0):
"""Return list of StepFiles sorted by step from files at path_prefix."""
stepfiles = []
for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix):
basename = filename[:-len(path_suffix)] if path_suffix else filename
... |
Continuously yield new files with steps in filename as they appear.
This is useful for checkpoint files or other files whose names differ just in
an integer marking the number of steps and match the wildcard path_prefix +
"*-[0-9]*" + path_suffix.
Unlike `tf.contrib.training.checkpoints_iterator`, this implem... | def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0,
path_suffix=".index", sleep_sec=10):
"""Continuously yield new files with steps in filename as they appear.
This is useful for checkpoint files or other files whose names differ just in
an integer marking the number of steps ... |
Extract the VQA V2 annotation files to directory unless it's there. | def _get_vqa_v2_annotations(directory,
annotation_url,
annotation_filename="vqa_v2.tar.gz"):
"""Extract the VQA V2 annotation files to directory unless it's there."""
annotation_file = generator_utils.maybe_download_from_drive(
directory, annotation_file... |
Extract the VQA V2 image data set to directory unless it's there. | def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls):
"""Extract the VQA V2 image data set to directory unless it's there."""
for url in image_urls:
filename = os.path.basename(url)
download_url = os.path.join(image_root_url, url)
path = generator_utils.maybe_download(directory, file... |
Extract the VQA V2 feature data set to directory unless it's there. | def _get_vqa_v2_image_feature_dataset(
directory, feature_url, feature_filename="mscoco_feat.tar.gz"):
"""Extract the VQA V2 feature data set to directory unless it's there."""
feature_file = generator_utils.maybe_download_from_drive(
directory, feature_filename, feature_url)
with tarfile.open(feature_f... |
Helper function for raising a value error for bad assignment. | def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values)) |
Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for p... | def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mu... |
Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual... | def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_... |
Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible wi... | def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Rais... |
Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2'... | def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multi... |
Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid. | def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of... |
Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
... | def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError:... |
Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter. | def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name] |
Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` ... | def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
... |
Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed. | def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
... |
Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact represen... | def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert... |
Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_jso... | def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn... |
Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values. | def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()} |
Returns the value of `key` if it exists, else `default`. | def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>... |
Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized. | def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not rec... |
Returns the visualizations for query.
Args:
query: The query to process.
Returns:
A dictionary of results with processing and graph visualizations. | def process(self, query):
"""Returns the visualizations for query.
Args:
query: The query to process.
Returns:
A dictionary of results with processing and graph visualizations.
"""
tf.logging.info("Processing new query [%s]" %query)
# Create the new TFDBG hook directory.
hook_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.