Code stringlengths 103 85.9k | Summary listlengths 0 94 |
|---|---|
Please provide a description of the function:def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):
assert length_bucket_step > 1.0
x = min_length
boundaries = []
while x < max_length:
boundaries.append(x)
x = max(x + 1, int(x * length_bucket_step))
return boundaries | [
"A default set of length-bucket boundaries."
] |
Please provide a description of the function:def batching_scheme(batch_size,
max_length,
min_length_bucket,
length_bucket_step,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1,
... | [
"A batching scheme based on model hyperparameters.\n\n Every batch contains a number of sequences divisible by `shard_multiplier`.\n\n Args:\n batch_size: int, total number of tokens in a batch.\n max_length: int, sequences longer than this will be skipped. Defaults to\n batch_size.\n min_length_buc... |
Please provide a description of the function:def hparams_to_batching_scheme(hparams,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1):
return batching_scheme(
batch_size=hparams.batch_size,
m... | [
"Wrapper around _batching_scheme with hparams."
] |
Please provide a description of the function:def pad_for_tpu(shapes_dict, hparams, max_length):
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_... | [
"Pads unknown features' dimensions for TPU."
] |
Please provide a description of the function:def standardize_shapes(features, batch_size=None):
for fname in ["inputs", "targets"]:
if fname not in features:
continue
f = features[fname]
while len(f.get_shape()) < 4:
f = tf.expand_dims(f, axis=-1)
features[fname] = f
if batch_size:
... | [
"Set the right shapes for the features."
] |
Please provide a description of the function:def _file_num_records_cached(filename):
# Cache the result, as this is expensive to compute
if filename in _file_num_records_cache:
return _file_num_records_cache[filename]
ret = 0
for _ in tf.python_io.tf_record_iterator(filename):
ret += 1
_file_num_re... | [
"Return the number of TFRecords in a file."
] |
Please provide a description of the function:def pad_batch(features, batch_multiple):
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_featur... | [
"Pad batch dim of features to nearest multiple of batch_multiple."
] |
Please provide a description of the function:def input_fn(dataset,
filepattern,
skip_random_fraction_when_training,
batch_size_means_tokens_param,
batch_size_multiplier,
max_length,
mode,
hparams,
data_dir=None,
... | [
"Builds input pipeline for problem.\n\n Args:\n dataset: the dataset to make input function from.\n filepattern: the pattern of files to read from.\n skip_random_fraction_when_training: whether to skip randomly when training.\n batch_size_means_tokens_param: whether batch size should mean tokens.\n ... |
Please provide a description of the function:def generate_shard_args(outfiles, num_examples):
num_shards = len(outfiles)
num_examples_per_shard = num_examples // num_shards
start_idxs = [i * num_examples_per_shard for i in range(num_shards)]
end_idxs = list(start_idxs)
end_idxs.pop(0)
end_idxs.append(num... | [
"Generate start and end indices per outfile."
] |
Please provide a description of the function:def dataset_generator(filepath,
dataset,
chunk_size=1,
start_idx=None,
end_idx=None):
encoder = dna_encoder.DNAEncoder(chunk_size=chunk_size)
with h5py.File(filepath, "r") as h5_fi... | [
"Generate example dicts."
] |
Please provide a description of the function:def to_example_dict(encoder, inputs, mask, outputs):
# Inputs
bases = []
input_ids = []
last_idx = -1
for row in np.argwhere(inputs):
idx, base_id = row
idx, base_id = int(idx), int(base_id)
assert idx > last_idx # if not, means 2 True values in 1 r... | [
"Convert single h5 record to an example dict."
] |
Please provide a description of the function:def linear_interpolate(tensor1, tensor2, coeffs):
interp_tensors = []
for coeff in coeffs:
interp_tensor = tensor1 + coeff * (tensor2 - tensor1)
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0) | [
"Linearly interpolate between two tensors at coeff.\n\n Args:\n tensor1: 4-D Tensor, shape=(NHWC)\n tensor2: 4-D Tensor, shape=(NHWC)\n coeffs: list of floats.\n Returns:\n interp_latents: 5-D Tensor, with interp_latents[i] representing\n interpolations at coeffs[i].\n ... |
Please provide a description of the function:def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1):
# sum across space, max across channels.
_, _, _, num_channels = common_layers.shape_list(tensor1)
diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2))
_, feature_ranks = tf.math.top_k(d... | [
"Linearly interpolate channel at \"rank\" between two tensors.\n\n The channels are ranked according to their L2 norm between tensor1[channel]\n and tensor2[channel].\n\n Args:\n tensor1: 4-D Tensor, NHWC\n tensor2: 4-D Tensor, NHWC\n coeffs: list of floats.\n rank: integer.\n Returns:\n interp_l... |
Please provide a description of the function:def postprocess(x, n_bits_x=8):
x = tf.where(tf.is_finite(x), x, tf.ones_like(x))
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8) | [
"Converts x from [-0.5, 0.5], to [0, 255].\n\n Args:\n x: 3-D or 4-D Tensor normalized between [-0.5, 0.5]\n n_bits_x: Number of bits representing each pixel of the output.\n Defaults to 8, to default to 256 possible values.\n Returns:\n x: 3-D or 4-D Tensor representing images or videos.\n ... |
Please provide a description of the function:def get_cond_latents_at_level(cond_latents, level, hparams):
if cond_latents:
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
return [cond_latent[level] for cond_latent in cond_latents]
elif hparams.latent_dist_encoder in ["pointwise", "conv_... | [
"Returns a single or list of conditional latents at level 'level'."
] |
Please provide a description of the function:def check_cond_latents(cond_latents, hparams):
if cond_latents is None:
return
if not isinstance(cond_latents[0], list):
cond_latents = [cond_latents]
exp_num_latents = hparams.num_cond_latents
if hparams.latent_dist_encoder == "conv_net":
exp_num_late... | [
"Shape checking for cond_latents."
] |
Please provide a description of the function:def get_variable_ddi(name, shape, initial_value, dtype=tf.float32, init=False,
trainable=True):
# If init is a tf bool: w is assigned dynamically at runtime.
# If init is a python bool: then w is determined during graph construction.
w = tf.get_... | [
"Wrapper for data-dependent initialization."
] |
Please provide a description of the function:def get_dropout(x, rate=0.0, init=True):
if init or rate == 0:
return x
return tf.layers.dropout(x, rate=rate, training=True) | [
"Dropout x with dropout_rate = rate.\n\n Apply zero dropout during init or prediction time.\n\n Args:\n x: 4-D Tensor, shape=(NHWC).\n rate: Dropout rate.\n init: Initialization.\n Returns:\n x: activations after dropout.\n "
] |
Please provide a description of the function:def actnorm_3d(name, x, logscale_factor=3.):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = tf.unstack(x, axis=1)
x_normed = []
for ind, x_step in enumerate(x):
x_step, _ = actnorm("actnorm_%d" % ind, x_step,
logscale_... | [
"Applies actnorm to each time-step independently.\n\n There are a total of 2*n_channels*n_steps parameters learnt.\n\n Args:\n name: variable scope.\n x: 5-D Tensor, (NTHWC)\n logscale_factor: Increases the learning rate of the scale by\n logscale_factor.\n Returns:\n x: 5-D Tenso... |
Please provide a description of the function:def actnorm(name, x, logscale_factor=3., reverse=False, init=False,
trainable=True):
var_arg_scope = arg_scope([get_variable_ddi], trainable=trainable)
var_scope = tf.variable_scope(name, reuse=tf.AUTO_REUSE)
with var_scope, var_arg_scope:
if not re... | [
"x_{ij} = s x x_{ij} + b. Per-channel scaling and bias.\n\n If init is set to True, the scaling and bias are initialized such\n that the mean and variance of the output activations of the first minibatch\n are zero and one respectively.\n\n Args:\n name: variable scope.\n x: input\n logscale_factor: Us... |
Please provide a description of the function:def actnorm_center(name, x, reverse=False, init=False):
shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
assert len(shape) == 2 or len(shape) == 4
if len(shape) == 2:
x_mean = tf.reduce_mean(x, [0], keepdims=True)
... | [
"Add a bias to x.\n\n Initialize such that the output of the first minibatch is zero centered\n per channel.\n\n Args:\n name: scope\n x: 2-D or 4-D Tensor.\n reverse: Forward or backward operation.\n init: data-dependent initialization.\n\n Returns:\n x_center: (x + b), if reverse is True and (x... |
Please provide a description of the function:def actnorm_scale(name, x, logscale_factor=3., reverse=False, init=False):
x_shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# Variance initialization logic.
assert len(x_shape) == 2 or len(x_shape) == 4
if len(x_sha... | [
"Per-channel scaling of x."
] |
Please provide a description of the function:def invertible_1x1_conv(name, x, reverse=False):
_, height, width, channels = common_layers.shape_list(x)
w_shape = [channels, channels]
# Random rotation-matrix Q
random_matrix = np.random.rand(channels, channels)
np_w = scipy.linalg.qr(random_matrix)[0].astyp... | [
"1X1 convolution on x.\n\n The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where\n 1. P is a permutation matrix.\n 2. L is a lower triangular matrix with diagonal entries unity.\n 3. U is a upper triangular matrix where the diagonal entries zero.\n 4. s is a vector.\n\n sign(s) and P are ... |
Please provide a description of the function:def add_edge_bias(x, filter_size):
x_shape = common_layers.shape_list(x)
if filter_size[0] == 1 and filter_size[1] == 1:
return x
a = (filter_size[0] - 1) // 2 # vertical padding size
b = (filter_size[1] - 1) // 2 # horizontal padding size
padding = [[0, 0... | [
"Pad x and concatenates an edge bias across the depth of x.\n\n The edge bias can be thought of as a binary feature which is unity when\n the filter is being convolved over an edge and zero otherwise.\n\n Args:\n x: Input tensor, shape (NHWC)\n filter_size: filter_size to determine padding.\n Returns:\n ... |
Please provide a description of the function:def time_pad(x, filter_size, dilations):
x_shape = common_layers.shape_list(x)
if filter_size == [1, 1, 1]:
return x
_, h, w = filter_size
eff_h = h + (h - 1)*(dilations[2] - 1)
eff_w = w + (w - 1)*(dilations[3] - 1)
a = (eff_h - 1) // 2 # vertical paddin... | [
"Pad left across time and pad valid across the spatial components.\n\n Also concats a binary feature that indicates if a feature is padded or not.\n\n Args:\n x: 5-D Tensor, (NTHWC)\n filter_size: list of ints\n dilations: list of ints, dilations - 1 specifies the number of holes\n between ... |
Please provide a description of the function:def conv(name, x, output_channels, filter_size=None, stride=None,
logscale_factor=3.0, apply_actnorm=True, conv_init="default",
dilations=None):
if conv_init == "zeros" and apply_actnorm:
raise ValueError("apply_actnorm is unstable when init is set... | [
"Convolutional layer with edge bias padding and optional actnorm.\n\n If x is 5-dimensional, actnorm is applied independently across every\n time-step.\n\n Args:\n name: variable scope.\n x: 4-D Tensor or 5-D Tensor of shape NHWC or NTHWC\n output_channels: Number of output channels.\n filter_size: l... |
Please provide a description of the function:def conv_block(name, x, mid_channels, dilations=None, activation="relu",
dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
if is_2d:
... | [
"2 layer conv block used in the affine coupling layer.\n\n Args:\n name: variable scope.\n x: 4-D or 5-D Tensor.\n mid_channels: Output channels of the second layer.\n dilations: Optional, list of integers.\n activation: relu or gatu.\n If relu, the second layer is relu(W*x)\n If gatu, the... |
Please provide a description of the function:def dilated_conv_stack(name, x, mid_channels, output_channels,
dilation_rates, activation="relu",
dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output = 0.0
for dil_ind, dil_rate in enumerate(dilat... | [
"Dilated convolutional stack.\n\n Features at different rates are computed independently using a 3 layer\n convolutional stack and added.\n\n Args:\n name: variable scope.\n x: 5-D Tensor.\n mid_channels: Number of output channels of the first layer in the conv\n stack.\n output_chan... |
Please provide a description of the function:def conv_stack(name, x, mid_channels, output_channels, dilations=None,
activation="relu", dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = conv_block("conv_block", x, mid_channels=mid_channels,
dilations=dilatio... | [
"3-layer convolutional stack.\n\n Args:\n name: variable scope.\n x: 5-D Tensor.\n mid_channels: Number of output channels of the first layer.\n output_channels: Number of output channels.\n dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer.\n By default, appl... |
Please provide a description of the function:def additive_coupling(name, x, mid_channels=512, reverse=False,
activation="relu", dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output_channels = common_layers.shape_list(x)[-1] // 2
x1, x2 = tf.split(x, num_or_size_spli... | [
"Reversible additive coupling layer.\n\n Args:\n name: variable scope.\n x: 4-D Tensor, shape=(NHWC).\n mid_channels: number of channels in the coupling layer.\n reverse: Forward or reverse operation.\n activation: \"relu\" or \"gatu\"\n dropout: default, 0.0\n Returns:\n output: 4-D Tensor, ... |
Please provide a description of the function:def affine_coupling(name, x, mid_channels=512, activation="relu",
reverse=False, dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
#... | [
"Reversible affine coupling layer.\n\n Args:\n name: variable scope.\n x: 4-D Tensor.\n mid_channels: number of channels in the coupling layer.\n activation: Can be either \"relu\" or \"gatu\".\n reverse: Forward or reverse operation.\n dropout: default, 0.0\n Returns:\n output: x shifted and... |
Please provide a description of the function:def squeeze(name, x, factor=2, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
shape = common_layers.shape_list(x)
if factor == 1:
return x
height = int(shape[1])
width = int(shape[2])
n_channels = int(shape[3])
if not re... | [
"Block-wise spatial squeezing of x to increase the number of channels.\n\n Args:\n name: Used for variable scoping.\n x: 4-D Tensor of shape (batch_size X H X W X C)\n factor: Factor by which the spatial dimensions should be squeezed.\n reverse: Squueze or unsqueeze operation.\n\n Returns:\n x: 4-D... |
Please provide a description of the function:def get_dilation_rates(hparams, width):
# dil_rate=1 means no dilation.
allowed_dilations = [[1]*5]
apply_dilations = hparams.get("latent_apply_dilations", False)
dilation_rates = hparams.get("latent_dilation_rates", [1, 3])
if apply_dilations:
for rate in d... | [
"Get a list of valid dilation rates.\n\n Args:\n hparams: HParams.\n width: spatial dimension. Ensures that the effective filter size is\n not larger than the spatial dimension.\n Returns:\n allowed_dilations: A list of dilation rates.\n "
] |
Please provide a description of the function:def temporal_latent_to_dist(name, x, hparams, output_channels=None):
_, _, width, _, res_channels = common_layers.shape_list(x)
if output_channels is None:
output_channels = res_channels
dilation_rates = get_dilation_rates(hparams, width)
with tf.variable_scop... | [
"Network that maps a time-indexed list of 3-D latents to a gaussian.\n\n Args:\n name: variable scope.\n x: List of 4-D Tensors indexed by time, (NHWC)\n hparams: tf.contrib.training.Hparams.\n output_channels: int, Number of channels of the output gaussian mean.\n Returns:\n dist: tfp.distribution... |
Please provide a description of the function:def single_conv_dist(name, x, output_channels=None):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
if output_channels is None:
output_channels = x_shape[-1]
mean_log_scale = conv("conv2d", x, output_channels=2... | [
"A 3x3 convolution mapping x to a standard normal distribution at init.\n\n Args:\n name: variable scope.\n x: 4-D Tensor.\n output_channels: number of channels of the mean and std.\n "
] |
Please provide a description of the function:def latent_to_dist(name, x, hparams, output_channels=None):
architecture = hparams.get("latent_architecture", "single_conv")
depth = hparams.get("latent_encoder_depth", 1)
pre_output_channels = hparams.get("latent_pre_output_channels", 512)
width = hparams.get("la... | [
"Map latent to the mean and log-scale of a Gaussian.\n\n Args:\n name: variable scope.\n x: 4-D Tensor of shape (NHWC)\n hparams: HParams.\n latent_architecture - can be \"single_conv\", \"glow_nn\" or \"glow_resnet\",\n default = single_conv\n latent_encoder_depth - i... |
Please provide a description of the function:def noise_op(latents, hparams):
if hparams.latent_noise == 0 or hparams.mode != tf.estimator.ModeKeys.TRAIN:
return latents
latent_shape = common_layers.shape_list(latents)
return latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise) | [
"Adds isotropic gaussian-noise to each latent.\n\n Args:\n latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).\n hparams: HParams.\n Returns:\n latents: latents with isotropic gaussian noise appended.\n "
] |
Please provide a description of the function:def merge_level_and_latent_dist(level_dist, latent_dist,
merge_std="prev_level"):
level_mean, level_std = level_dist.loc, level_dist.scale
latent_mean, latent_std = latent_dist.loc, latent_dist.scale
new_mean = level_mean + latent_mea... | [
"Merge level_dist and latent_dist.\n\n new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined\n according to merge_std.\n\n Args:\n level_dist: instance of tfp.distributions.Normal\n latent_dist: instance of tfp.distributions.Normal\n merge_std: can be \"prev_level\", \"prev_step\"... |
Please provide a description of the function:def level_cond_prior(prior_dist, z, latent, hparams, state):
latent_dist_encoder = hparams.get("latent_dist_encoder", None)
latent_skip = hparams.get("latent_skip", False)
if latent_dist_encoder == "pointwise":
last_latent = latent
merge_std = hparams.level_... | [
"Returns a conditional prior for each level.\n\n Args:\n prior_dist: Distribution conditioned on the previous levels.\n z: Tensor, output of the previous levels.\n latent: Tensor or a list of tensors to condition the latent_distribution.\n hparams: next_frame_glow hparams.\n state: Current LSTM stat... |
Please provide a description of the function:def compute_prior(name, z, latent, hparams, condition=False, state=None,
temperature=1.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if isinstance(condition, bool):
condition = tf.constant(condition, dtype=tf.bool)
prior_dist = s... | [
"Distribution on z_t conditioned on z_{t-1} and latent.\n\n Args:\n name: variable scope.\n z: 4-D Tensor.\n latent: optional,\n if hparams.latent_dist_encoder == \"pointwise\", this is a list\n of 4-D Tensors of length hparams.num_cond_latents.\n else, this is just a 4-D ... |
Please provide a description of the function:def split(name, x, reverse=False, eps=None, eps_std=None, cond_latents=None,
hparams=None, state=None, condition=False, temperature=1.0):
# TODO(mechcoder) Change the return type to be a dict.
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if not rev... | [
"Splits / concatenates x into x1 and x2 across number of channels.\n\n For the forward pass, x2 is assumed be gaussian,\n i.e P(x2 | x1) ~ N(mu, sigma) where mu and sigma are the outputs of\n a network conditioned on x1 and optionally on cond_latents.\n For the reverse pass, x2 is determined from mu(x1) and sig... |
Please provide a description of the function:def revnet_step(name, x, hparams, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.coupling == "additive":
coupling_layer = functools.partial(
additive_coupling, name="additive", reverse=reverse,
mid_channels=hpa... | [
"One step of glow generative flow.\n\n Actnorm + invertible 1X1 conv + affine_coupling.\n\n Args:\n name: used for variable scope.\n x: input\n hparams: coupling_width is the only hparam that is being used in\n this function.\n reverse: forward or reverse pass.\n Returns:\n z: Output o... |
Please provide a description of the function:def revnet(name, x, hparams, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
steps = np.arange(hparams.depth)
if reverse:
steps = steps[::-1]
objective = 0.0
for step in steps:
x, curr_obj = revnet_step(
"revnet_s... | [
"'hparams.depth' steps of generative flow.\n\n Args:\n name: variable scope for the revnet block.\n x: 4-D Tensor, shape=(NHWC).\n hparams: HParams.\n reverse: bool, forward or backward pass.\n Returns:\n x: 4-D Tensor, shape=(NHWC).\n objective: float.\n "
] |
Please provide a description of the function:def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
z_shape = common_layers.shape_list(z)
latent_multiplier = tf.get_variable(
"latent_multiplier", shape=z_shape, dtype=tf.float32,
... | [
"Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.\n\n s^i is a learnable parameter with identity initialization.\n std^i is optionally learnable with identity initialization.\n\n Args:\n name: variable scope.\n z: input_tensor\n logscale_factor: equivalent to scaling up the learning_ra... |
Please provide a description of the function:def top_prior(name, z_shape, learn_prior="normal", temperature=1.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
h = tf.zeros(z_shape, dtype=tf.float32)
if learn_prior == "normal":
prior_dist = tfp.distributions.Normal(h, tf.exp(h))
elif learn_p... | [
"Unconditional prior distribution.\n\n Args:\n name: variable scope\n z_shape: Shape of the mean / scale of the prior distribution.\n learn_prior: Possible options are \"normal\" and \"single_conv\".\n If set to \"single_conv\", the gaussian is parametrized by a\n single co... |
Please provide a description of the function:def uniform_binning_correction(x, n_bits=8):
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minv... | [
"Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).\n\n Args:\n x: 4-D Tensor of shape (NHWC)\n n_bits: optional.\n Returns:\n x: x ~ U(x, x + 1.0 / 256)\n objective: Equivalent to -q(x)*log(q(x)).\n "
] |
Please provide a description of the function:def encoder_decoder(name, x, hparams, eps=None, reverse=False,
cond_latents=None, condition=False, states=None,
temperature=1.0):
# TODO(mechcoder) Change return_type to a dict to be backward compatible.
with tf.variable_scope(n... | [
"Glow encoder-decoder. n_levels of (Squeeze + Flow + Split.) operations.\n\n Args:\n name: variable scope.\n x: 4-D Tensor, shape=(NHWC).\n hparams: HParams.\n eps: Stores (glow(x) - mu) / sigma during the forward pass.\n Used only to test if the network is reversible.\n reverse: Forward or ... |
Please provide a description of the function:def bfloat16_activations_var_getter(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.bfloat16:
kwargs["dtype"] = tf.float32
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
... | [
"A custom getter function for float32 parameters and bfloat16 activations.\n\n Args:\n getter: custom getter\n *args: arguments\n **kwargs: keyword arguments\n Returns:\n variables with the correct dtype.\n Raises:\n KeyError: if \"dtype\" is not provided as a kwarg.\n "
] |
Please provide a description of the function:def float16_activations_var_getter(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype == tf.float16:
kwargs["dtype"] = tf.float32
if requested_dtype == tf.float32:
requested_dtype = tf.float16
var = getter(*args, **kwargs)
#... | [
"A custom getter function for float32 parameters and float16 activations.\n\n This function ensures the following:\n 1. All variables requested with type fp16 are stored as type fp32.\n 2. All variables requested with type fp32 are returned as type fp16.\n See https://docs.nvidia.com/deeplearning/sdk/mixed-... |
Please provide a description of the function:def simulated_quantize(x, num_bits, noise):
shape = x.get_shape().as_list()
if not (len(shape) >= 2 and shape[-1] > 1):
return x
max_abs = tf.reduce_max(tf.abs(x), -1, keepdims=True) + 1e-9
max_int = 2 ** (num_bits - 1) - 1
scale = max_abs / max_int
x /= s... | [
"Simulate quantization to num_bits bits, with externally-stored scale.\n\n num_bits is the number of bits used to store each value.\n noise is a float32 Tensor containing values in [0, 1).\n Each value in noise should take different values across\n different steps, approximating a uniform distribution over [0, ... |
Please provide a description of the function:def noise_from_step_num():
step = tf.to_int32(tf.train.get_or_create_global_step()) + 1
phi = ((5 ** 0.5) - 1) / 2
# Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous
# due to loss of precision when the step number gets large.
# Computatio... | [
"Quantization noise equal to (phi * (step_num + 1)) mod 1.0.\n\n Not using random_uniform here due to a problem on TPU in that random seeds\n are not respected, which may cause the parameters on different replicas\n to go out-of-sync.\n\n Returns:\n a float32 scalar\n "
] |
Please provide a description of the function:def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2):
cand1_f = tf.to_float(cand1)
cand2_f = tf.to_float(cand2)
step_size = cand2_f - cand1_f
fpart = (x - cand1_f) / step_size
ret = tf.where(tf.greater(fpart, noise), cand2, cand1)
return ret | [
"Round-off x to cand1 or to cand2 in an unbiased way.\n\n Cand1 and cand2 are the same shape as x.\n For every element of x, the corresponding elements of cand1 and cand2 should\n be the two closest bfloat16 values to x. Order does not matter.\n cand1 and cand2 must differ from each other.\n\n Args:\n x: A... |
Please provide a description of the function:def _to_bfloat16_unbiased(x, noise):
x_sign = tf.sign(x)
# Make sure x is positive. If it is zero, the two candidates are identical.
x = x * x_sign + 1e-30
cand1 = tf.to_bfloat16(x)
cand1_f = tf.to_float(cand1)
# This relies on the fact that for a positive bf... | [
"Convert a float32 to a bfloat16 using randomized roundoff.\n\n Args:\n x: A float32 Tensor.\n noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)\n Returns:\n A float32 Tensor.\n "
] |
Please provide a description of the function:def custom_getter(self, activation_dtype=tf.bfloat16):
def getter_fn(getter, *args, **kwargs):
requested_dtype = kwargs["dtype"]
if requested_dtype in (tf.bfloat16, tf.float32):
kwargs["dtype"] = tf.bfloat16
kwargs["initializer"] = _Encod... | [
"A custom getter that uses the encoding for bfloat16 and float32 vars.\n\n When a bfloat16 or float32 variable is requsted, an encoded float16\n varaible is created, which is then decoded and cast to a bfloat16\n activation.\n\n Args:\n activation_dtype: a dtype to which to convert the decoded valu... |
Please provide a description of the function:def load_videos(template, video_length, frame_shape):
filenames = tf.gfile.Glob(template)
if not filenames:
raise ValueError("no files found.")
filenames = sorted(filenames)
dataset_len = len(filenames)
filenames = tf.constant(filenames)
dataset = tf.data.... | [
"Loads videos from files.\n\n Args:\n template: template string for listing the image files.\n video_length: length of the video.\n frame_shape: shape of each frame.\n\n Returns:\n dataset: the tf dataset frame by frame.\n dataset_len: number of the items which is the number of image files.\n\n Ra... |
Please provide a description of the function:def psnr_and_ssim(output, target):
output = tf.cast(output, dtype=tf.int32)
target = tf.cast(target, dtype=tf.int32)
psnr = tf.image.psnr(output, target, max_val=255)
ssim = tf.image.ssim(output, target, max_val=255)
return psnr, ssim | [
"Compute the PSNR and SSIM.\n\n Args:\n output: 4-D Tensor, shape=(num_frames, height, width, num_channels)\n target: 4-D Tensor, shape=(num_frames, height, width, num_channels)\n Returns:\n psnr: 1-D Tensor, shape=(num_frames,)\n ssim: 1-D Tensor, shape=(num_frames,)\n "
] |
Please provide a description of the function:def get_zipped_dataset_from_predictions(predictions):
targets = stack_data_given_key(predictions, "targets")
outputs = stack_data_given_key(predictions, "outputs")
num_videos, num_steps = targets.shape[:2]
# Truncate output time-steps to match target time-steps
... | [
"Creates dataset from in-memory predictions."
] |
Please provide a description of the function:def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos):
output, target = iterator.get_next()
metrics = psnr_and_ssim(output, target)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
initalizer = iterator._initializer #... | [
"Computes the average of all the metric for one decoding.\n\n Args:\n iterator: dataset iterator.\n feed_dict: feed dict to initialize iterator.\n num_videos: number of videos.\n\n Returns:\n all_psnr: 2-D Numpy array, shape=(num_samples, num_frames)\n all_ssim: 2-D Numpy array, shape=(num_samples,... |
Please provide a description of the function:def reduce_to_best_decode(metrics, reduce_func):
num_videos = metrics.shape[1]
# Take mean of the metric across the frames to approximate the video
# closest to the ground truth.
mean_across_frames = np.mean(metrics, axis=-1)
# For every sample, use the decode ... | [
"Extracts the best-decode from the metrics according to reduce_func.\n\n Args:\n metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames)\n reduce_func: callable, np.argmax or np.argmin.\n Returns:\n best_metrics: 2-D numpy array, shape=(num_samples, num_frames).\n best_decode_ind: 1-D n... |
Please provide a description of the function:def compute_all_metrics_statistics(all_results):
statistics = {}
decode_inds = {}
all_metrics = all_results.keys()
for key in all_metrics:
values = all_results[key]
statistics[key + "_MEAN"] = np.mean(values, axis=0)
statistics[key + "_STD"] = np.std(... | [
"Computes statistics of metrics across multiple decodings.\n\n Args:\n all_results: dict of 3-D numpy arrays.\n Each array has shape=(num_decodes, num_samples, num_frames).\n Returns:\n statistics: dict of 1-D numpy arrays, shape=(num_frames).\n First the statistic (max/mean/s... |
Please provide a description of the function:def compute_video_metrics_from_predictions(predictions, decode_hparams):
all_results = {}
ssim_all_decodes, psnr_all_decodes = [], []
for single_decode in predictions:
args = get_zipped_dataset_from_predictions(single_decode)
psnr_single, ssim_single = com... | [
"Computes metrics from predictions.\n\n Args:\n predictions: list of list of dicts.\n outer length: num_decodes, inner_length: num_samples\n decode_hparams: Decode hparams. instance of HParams.\n Returns:\n statistics: dict of Tensors, key being the metric with each Tensor\n ... |
Please provide a description of the function:def compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape):
ssim_all_decodes, psnr_all_decodes = [], []
for output_dir in output_dirs:
output_files, target_files = get_target_and_output_filepatterns(
output_dir, pro... | [
"Computes the average of all the metric for one decoding.\n\n This function assumes that all the predicted and target frames\n have been saved on the disk and sorting them by name will result\n to consecutive frames saved in order.\n\n Args:\n output_dirs: directory with all the saved frames.\n problem_na... |
Please provide a description of the function:def compute_and_save_video_metrics(
output_dirs, problem_name, video_length, frame_shape):
statistics, all_results = compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape)
for results, output_dir in zip(all_results, outpu... | [
"Compute and saves the video metrics."
] |
Please provide a description of the function:def swap_time_and_batch_axes(inputs):
transposed_axes = tf.concat([[1, 0], tf.range(2, tf.rank(inputs))], axis=0)
return tf.transpose(inputs, transposed_axes) | [
"Swaps time and batch axis (the first two axis)."
] |
Please provide a description of the function:def encode_to_shape(inputs, shape, scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
w, h = shape[1], shape[2]
x = inputs
x = tfl.flatten(x)
x = tfl.dense(x, w * h, activation=None, name="enc_dense")
x = tf.reshape(x, (-1, w, h, 1))
ret... | [
"Encode the given tensor to given image shape."
] |
Please provide a description of the function:def decode_to_shape(inputs, shape, scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = inputs
x = tfl.flatten(x)
x = tfl.dense(x, shape[2], activation=None, name="dec_dense")
x = tf.expand_dims(x, axis=1)
return x | [
"Encode the given tensor to given image shape."
] |
Please provide a description of the function:def basic_lstm(inputs, state, num_units, name=None):
input_shape = common_layers.shape_list(inputs)
# reuse parameters across time-steps.
cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, name=name, reuse=tf.AUTO_REUSE)
if state is None:
state = cell.zero_... | [
"Basic LSTM."
] |
Please provide a description of the function:def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
... | [
"Full LSTM cell."
] |
Please provide a description of the function:def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = inp... | [
"2D Convolutional LSTM."
] |
Please provide a description of the function:def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_t... | [
"Sample batch with specified mix of groundtruth and generated data points.\n\n Args:\n ground_truth_x: tensor of ground-truth data points.\n generated_x: tensor of generated data points.\n batch_size: batch size\n scheduled_sample_var: number of ground-truth examples to include in batch.\n Returns:\n ... |
Please provide a description of the function:def inject_additional_input(layer, inputs, name, mode="concat"):
layer_shape = common_layers.shape_list(layer)
input_shape = common_layers.shape_list(inputs)
zeros_mask = tf.zeros(layer_shape, dtype=tf.float32)
if mode == "concat":
emb = encode_to_shape(inputs... | [
"Injects the additional input into the layer.\n\n Args:\n layer: layer that the input should be injected to.\n inputs: inputs to be injected.\n name: TF scope name.\n mode: how the infor should be added to the layer:\n \"concat\" concats as additional channels.\n \"multiplicative\" broadcasts... |
Please provide a description of the function:def scheduled_sample_prob(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
probability_threshold = scheduled_sample_var
probability_of_generated = tf.random_uniform([batch_siz... | [
"Probability based scheduled sampling.\n\n Args:\n ground_truth_x: tensor of ground-truth data points.\n generated_x: tensor of generated data points.\n batch_size: batch size\n scheduled_sample_var: probability of choosing from ground_truth.\n Returns:\n New batch with randomly selected data point... |
Please provide a description of the function:def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[... | [
"Apply dynamic neural advection to previous image.\n\n Args:\n prev_image: previous image to be transformed.\n dna_input: hidden lyaer to be used for computing DNA transformation.\n dna_kernel_size: dna kernel size.\n relu_shift: shift for ReLU function.\n Returns:\n List of images transformed by t... |
Please provide a description of the function:def cdna_transformation(prev_image, cdna_input, num_masks, color_channels,
dna_kernel_size, relu_shift):
batch_size = tf.shape(cdna_input)[0]
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels ... | [
"Apply convolutional dynamic neural advection to previous image.\n\n Args:\n prev_image: previous image to be transformed.\n cdna_input: hidden lyaer to be used for computing CDNA kernels.\n num_masks: number of masks and hence the number of CDNA transformations.\n color_channels: the number of color c... |
Please provide a description of the function:def vgg_layer(inputs,
nout,
kernel_size=3,
activation=tf.nn.leaky_relu,
padding="SAME",
is_training=True,
has_batchnorm=False,
scope=None):
with tf.variable_scope(scope):
... | [
"A layer of VGG network with batch norm.\n\n Args:\n inputs: image tensor\n nout: number of output channels\n kernel_size: size of the kernel\n activation: activation function\n padding: padding of the image\n is_training: whether it is training mode or not\n has_batchnorm: whether batchnorm i... |
Please provide a description of the function:def tile_and_concat(image, latent, concat_latent=True):
if not concat_latent:
return image
image_shape = common_layers.shape_list(image)
latent_shape = common_layers.shape_list(latent)
height, width = image_shape[1], image_shape[2]
latent_dims = latent_shape... | [
"Tile latent and concatenate to image across depth.\n\n Args:\n image: 4-D Tensor, (batch_size X height X width X channels)\n latent: 2-D Tensor, (batch_size X latent_dims)\n concat_latent: If set to False, the image is returned as is.\n\n Returns:\n concat_latent: 4-D Tensor, (batch_size X height X w... |
Please provide a description of the function:def _encode_gif(images, fps):
writer = WholeVideoWriter(fps)
writer.write_multi(images)
return writer.finish() | [
"Encodes numpy images into gif string.\n\n Args:\n images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape\n `[time, height, width, channels]` where `channels` is 1 or 3.\n fps: frames per second of the animation\n\n Returns:\n The encoded gif string.\n\n Raises:\n IOError: If the ff... |
Please provide a description of the function:def ffmpeg_works():
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False | [
"Tries to encode images with ffmpeg to check if it works."
] |
Please provide a description of the function:def py_gif_summary(tag, images, max_outputs, fps, return_summary_value=False):
images = np.asarray(images)
if images.dtype != np.uint8:
raise ValueError("Tensor must have dtype uint8 for gif summary.")
if images.ndim != 5:
raise ValueError("Tensor must be 5-... | [
"Outputs a `Summary` protocol buffer with gif animations.\n\n Args:\n tag: Name of the summary.\n images: A 5-D `uint8` `np.array` of shape `[batch_size, time, height, width,\n channels]` where `channels` is 1 or 3.\n max_outputs: Max number of batch elements to generate gifs for.\n fps: frames pe... |
Please provide a description of the function:def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None,
family=None):
tensor = tf.convert_to_tensor(tensor)
if len(tensor.get_shape()) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[ba... | [
"Outputs a `Summary` protocol buffer with gif animations.\n\n Args:\n name: Name of the summary.\n tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width,\n channels]` where `channels` is 1 or 3.\n max_outputs: Max number of batch elements to generate gifs for.\n fps: frames per... |
Please provide a description of the function:def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5,
is_training=False, random_latent=False,
tiny_mode=False, small_mode=False):
conv_size = tinyify([32, 64, 64], tiny_mode, small_mode)
with tf.variable... | [
"Builds convolutional latent tower for stochastic model.\n\n At training time this tower generates a latent distribution (mean and std)\n conditioned on the entire video. This latent variable will be fed to the\n main tower as an extra variable to be used for future frames prediction.\n At inference time, the t... |
Please provide a description of the function:def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end):
if decay_start > decay_end:
raise ValueError("decay_end is smaller than decay_end.")
# Since some of the TF schedules do not support incrementing a value,
# in all of the schedules, w... | [
"Get KL multiplier (beta) based on the schedule."
] |
Please provide a description of the function:def extract_random_video_patch(videos, num_frames=-1):
if num_frames == -1:
return videos
batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos)
if num_total_frames < num_frames:
raise ValueError("Expected num_frames <= %d, got %d" %
... | [
"For every video, extract a random consecutive patch of num_frames.\n\n Args:\n videos: 5-D Tensor, (NTHWC)\n num_frames: Integer, if -1 then the entire video is returned.\n Returns:\n video_patch: 5-D Tensor, (NTHWC) with T = num_frames.\n Raises:\n ValueError: If num_frames is greater than the numb... |
Please provide a description of the function:def write_multi(self, frames, encoded_frames=None):
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda: None, 1)
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame) | [
"Writes multiple video frames."
] |
Please provide a description of the function:def __init_ffmpeg(self, image_shape):
import itertools # pylint: disable=g-import-not-at-top
from subprocess import Popen, PIPE # pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member
ffmpeg = "ffmpeg"
height, width, channels = image... | [
"Initializes ffmpeg to write frames."
] |
Please provide a description of the function:def _start_reader_thread(self, stream, chunks):
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:... | [
"Starts a thread for reading output from FFMPEG.\n\n The thread reads consecutive chunks from the stream and saves them in\n the given list.\n\n Args:\n stream: output stream of the FFMPEG process.\n chunks: list to save output chunks to.\n\n Returns:\n Thread\n "
] |
Please provide a description of the function:def finish(self):
if self.proc is None:
return None
self.proc.stdin.close()
for thread in (self._out_thread, self._err_thread):
thread.join()
(out, err) = [
b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks)
]
... | [
"Finishes transconding and returns the video.\n\n Returns:\n bytes\n\n Raises:\n IOError: in case of transcoding error.\n "
] |
Please provide a description of the function:def validate_flags():
if FLAGS.cloud_mlengine_model_name:
assert not FLAGS.server
assert not FLAGS.servable_name
else:
assert FLAGS.server
assert FLAGS.servable_name | [
"Validates flags are set to acceptable values."
] |
Please provide a description of the function:def make_request_fn():
if FLAGS.cloud_mlengine_model_name:
request_fn = serving_utils.make_cloud_mlengine_request_fn(
credentials=GoogleCredentials.get_application_default(),
model_name=FLAGS.cloud_mlengine_model_name,
version=FLAGS.cloud_mle... | [
"Returns a request function."
] |
Please provide a description of the function:def encoder(self, inputs, n_layers=3):
latent_dims = self.hparams.z_dim
shape_as_list = inputs.shape.as_list()
if len(shape_as_list) != 5:
raise ValueError("Expected inputs to be a 5-D, got %d" %
len(shape_as_list))
if input... | [
"Convnet that encodes inputs into mean and std of a gaussian.\n\n Args:\n inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)\n n_layers: Number of layers.\n\n Returns:\n z_mu: Mean of the latent gaussians.\n z_log_var: log(var) of the latent gaussians.\n\n Raises:\n... |
Please provide a description of the function:def get_fc_dimensions(self, strides, kernel_sizes):
output_height, output_width, _ = self.hparams.problem.frame_shape
output_steps = self.hparams.video_num_target_frames
output_shape = np.array([output_steps, output_height, output_width])
for curr_stride... | [
"Get expected fully connected shape after a series of convolutions."
] |
Please provide a description of the function:def discriminator(self, frames):
ndf = self.hparams.num_discriminator_filters
frames = tf.stack(frames)
# Switch from time-major axis to batch-major axis.
frames = common_video.swap_time_and_batch_axes(frames)
# 3-D Conv-net mapping inputs to activ... | [
"3-D SNGAN discriminator.\n\n Args:\n frames: a list of batch-major tensors indexed by time.\n\n Returns:\n logits: 1-D Tensor with shape=batch_size.\n Positive logits imply that the discriminator thinks that it\n belongs to the true class.\n "
] |
Please provide a description of the function:def d_step(self, true_frames, gen_frames):
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss... | [
"Performs the discriminator step in computing the GAN loss.\n\n Applies stop-gradient to the generated frames while computing the\n discriminator loss to make sure that the gradients are not back-propagated\n to the generator. This makes sure that only the discriminator is updated.\n\n Args:\n true... |
Please provide a description of the function:def g_step(self, gen_frames, fake_logits_stop):
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
... | [
"Performs the generator step in computing the GAN loss.\n\n Args:\n gen_frames: Generated frames\n fake_logits_stop: Logits corresponding to the generated frames as per\n the discriminator. Assumed to have a stop-gradient term.\n Returns:\n gan_g_loss_pos_d: Loss.\n ... |
Please provide a description of the function:def get_gan_loss(self, true_frames, gen_frames, name):
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable... | [
"Get the discriminator + generator loss at every step.\n\n This performs an 1:1 update of the discriminator and generator at every\n step.\n\n Args:\n true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\n Assumed to be ground truth.\n gen_frames: 5-D Tensor of shap... |
Please provide a description of the function:def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None):
if not self.is_training:
return 0.0
vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
# Use sv2p's KL divergence computation.
if ... | [
"Gets extra loss from VAE and GAN."
] |
Please provide a description of the function:def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if is... | [
"Pad, apply 3-D convolution and leaky relu."
] |
Please provide a description of the function:def weight(w, sparsity):
w_shape = common_layers.shape_list(w)
k = int(np.prod(w_shape[:-1]))
count = tf.to_int32(k * sparsity)
mask = common_layers.weight_targeting(w, count)
return (1 - mask) * w | [
"Weight-level magnitude pruning."
] |
Please provide a description of the function:def unit(w, sparsity):
w_shape = common_layers.shape_list(w)
count = tf.to_int32(w_shape[-1] * sparsity)
mask = common_layers.unit_targeting(w, count)
return (1 - mask) * w | [
"Unit-level magnitude pruning."
] |
Please provide a description of the function:def sparsify(sess, eval_model, pruning_strategy, pruning_params):
weights = tf.trainable_variables()
def should_prune(name):
in_whitelist = not pruning_params.white_list or any(
e in name for e in pruning_params.white_list)
in_blacklist = any(e i... | [
"Prune the weights of a model and evaluate.",
"Whether to prune a weight or not."
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.