text
stringlengths
81
112k
Bipolar ReLU as in https://arxiv.org/abs/1709.04054. def brelu(x): """Bipolar ReLU as in https://arxiv.org/abs/1709.04054.""" x_shape = shape_list(x) x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) y1 = tf.nn.relu(x1) y2 = -tf.nn.relu(-x2) return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
Bipolar ELU as in https://arxiv.org/abs/1709.04054. def belu(x): """Bipolar ELU as in https://arxiv.org/abs/1709.04054.""" x_shape = shape_list(x) x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) y1 = tf.nn.elu(x1) y2 = -tf.nn.elu(-x2) return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: x with the GELU activation applied. def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: x with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf
NAC as in https://arxiv.org/abs/1808.00508. def nac(x, depth, name=None, reuse=None): """NAC as in https://arxiv.org/abs/1808.00508.""" with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse): x_shape = shape_list(x) w = tf.get_variable("w", [x_shape[-1], depth]) m = tf.get_variable("m", [x_shape[-1], depth]) w = tf.tanh(w) * tf.nn.sigmoid(m) x_flat = tf.reshape(x, [-1, x_shape[-1]]) res_flat = tf.matmul(x_flat, w) return tf.reshape(res_flat, x_shape[:-1] + [depth])
NALU as in https://arxiv.org/abs/1808.00508. def nalu(x, depth, epsilon=1e-30, name=None, reuse=None): """NALU as in https://arxiv.org/abs/1808.00508.""" with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse): x_shape = shape_list(x) x_flat = tf.reshape(x, [-1, x_shape[-1]]) gw = tf.get_variable("w", [x_shape[-1], depth]) g = tf.nn.sigmoid(tf.matmul(x_flat, gw)) g = tf.reshape(g, x_shape[:-1] + [depth]) a = nac(x, depth, name="nac_lin") log_x = tf.log(tf.abs(x) + epsilon) m = nac(log_x, depth, name="nac_log") return g * a + (1 - g) * tf.exp(m)
Argmax along with the value. def argmax_with_score(logits, axis=None): """Argmax along with the value.""" axis = axis or len(logits.get_shape()) - 1 predictions = tf.argmax(logits, axis=axis) logits_shape = shape_list(logits) prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1] prefix_size = 1 for d in prefix_shape: prefix_size *= d # Flatten to extract scores flat_logits = tf.reshape(logits, [prefix_size, vocab_size]) flat_predictions = tf.reshape(predictions, [prefix_size]) flat_indices = tf.stack( [tf.range(tf.to_int64(prefix_size)), tf.to_int64(flat_predictions)], axis=1) flat_scores = tf.gather_nd(flat_logits, flat_indices) # Unflatten scores = tf.reshape(flat_scores, prefix_shape) return predictions, scores
Compute the k-th top element of x on the last axis iteratively. This assumes values in x are non-negative, rescale if needed. It is often faster than tf.nn.top_k for small k, especially if k < 30. Note: this does not support back-propagation, it stops gradients! Args: x: a Tensor of non-negative numbers of type float. k: a python integer. Returns: a float tensor of the same shape as x but with 1 on the last axis that contains the k-th largest number in x. def top_kth_iterative(x, k): """Compute the k-th top element of x on the last axis iteratively. This assumes values in x are non-negative, rescale if needed. It is often faster than tf.nn.top_k for small k, especially if k < 30. Note: this does not support back-propagation, it stops gradients! Args: x: a Tensor of non-negative numbers of type float. k: a python integer. Returns: a float tensor of the same shape as x but with 1 on the last axis that contains the k-th largest number in x. """ # The iterative computation is as follows: # # cur_x = x # for _ in range(k): # top_x = maximum of elements of cur_x on the last axis # cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements) # # We encode this computation in a TF graph using tf.foldl, so the inner # part of the above loop is called "next_x" and tf.foldl does the loop. def next_x(cur_x, _): top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True) return cur_x * to_float(cur_x < top_x) # We only do k-1 steps of the loop and compute the final max separately. fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x), parallel_iterations=2, back_prop=False) return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True))
find max and argmax over the last dimension. Works well on TPU Args: inputs: A tensor with shape [..., depth] Returns: values: a Tensor with shape [...] indices: a Tensor with shape [...] def top_1_tpu(inputs): """find max and argmax over the last dimension. Works well on TPU Args: inputs: A tensor with shape [..., depth] Returns: values: a Tensor with shape [...] indices: a Tensor with shape [...] """ inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True) mask = tf.to_int32(tf.equal(inputs_max, inputs)) index = tf.range(tf.shape(inputs)[-1]) * mask return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
Use indices to index into the last axis of x. This can be useful for recovering the actual probabilities of a sample from a probability distribution. Args: x: Tensor, n-d. indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1) dimensions of x. The values of indices will be used to index into the last axis of x. Returns: Tensor, (n-1)-d. def index_last_dim_with_indices(x, indices): """Use indices to index into the last axis of x. This can be useful for recovering the actual probabilities of a sample from a probability distribution. Args: x: Tensor, n-d. indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1) dimensions of x. The values of indices will be used to index into the last axis of x. Returns: Tensor, (n-1)-d. """ assert len(x.shape) == len(indices.shape) + 1 x_shape = shape_list(x) vocab_size = x_shape[-1] flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size]) flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])]) idx = tf.stack( [ tf.range(tf.to_int64(shape_list(flat_indices)[0])), tf.to_int64(flat_indices) ], axis=1) flat_x_idx = tf.gather_nd(flat_x, idx) x_idx = tf.reshape(flat_x_idx, x_shape[:-1]) return x_idx
Is this an appropriate context to generate summaries. Returns: a boolean def should_generate_summaries(): """Is this an appropriate context to generate summaries. Returns: a boolean """ name_scope = tf.contrib.framework.get_name_scope() if name_scope and "while/" in name_scope: # Summaries don't work well within tf.while_loop() return False if tf.get_variable_scope().reuse: # Avoid generating separate summaries for different data shards return False return True
Reshapes a to match the shape of b in all but the last dimension. def reshape_like(a, b): """Reshapes a to match the shape of b in all but the last dimension.""" ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0)) if not tf.executing_eagerly(): ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:]) return ret
Summarize the video using image summaries starting with prefix. def summarize_video(video, prefix, max_outputs=1): """Summarize the video using image summaries starting with prefix.""" video_shape = shape_list(video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) if tf.executing_eagerly(): return if video.get_shape().as_list()[1] is None: tf.summary.image( "%s_last_frame" % prefix, tf.cast(video[:, -1, :, :, :], tf.uint8), max_outputs=max_outputs) else: for k in range(video_shape[1]): tf.summary.image( "%s_frame_%d" % (prefix, k), tf.cast(video[:, k, :, :, :], tf.uint8), max_outputs=max_outputs)
Cast x to y's dtype, if necessary. def cast_like(x, y): """Cast x to y's dtype, if necessary.""" x = tf.convert_to_tensor(x) y = tf.convert_to_tensor(y) if x.dtype.base_dtype == y.dtype.base_dtype: return x cast_x = tf.cast(x, y.dtype) if cast_x.device != x.device: x_name = "(eager Tensor)" try: x_name = x.name except AttributeError: pass tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name, x.device, cast_x.device) return cast_x
Pad x to be even-sized on axis 1 and 2, but only if necessary. def make_even_size(x): """Pad x to be even-sized on axis 1 and 2, but only if necessary.""" x_shape = x.get_shape().as_list() assert len(x_shape) > 2, "Only 3+-dimensional tensors supported." shape = [dim if dim is not None else -1 for dim in x_shape] new_shape = x_shape # To make sure constant shapes remain constant. if x_shape[1] is not None: new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5)) if x_shape[2] is not None: new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5)) if shape[1] % 2 == 0 and shape[2] % 2 == 0: return x if shape[1] % 2 == 0: x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) x.set_shape(new_shape) return x if shape[2] % 2 == 0: x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) x.set_shape(new_shape) return x x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) x.set_shape(new_shape) return x
Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947. Puts input1 and input2 through the provided discriminator to get logits. Then, computes num_vecs random projections of the logits, sorts them on the batch dimension and returns the L2 loss between the sorted vectors. See the above-mentioned paper for the reasoning behind it. Args: input1: first discriminator inputs. input2: second discriminator inputs. discriminator: inputs -> logits function. num_vecs: how many random vectors to use for projections. do_random_vecs: whether to use random vectors or just tanh of the logits. do_tanh: if true (default) we'll also just use tanh of the logits. return_logits: Whether or not to return the logits. Returns: The generator loss, i.e., the sliced approximation of the distance between the projected distributions (warning: discriminator should maximize it). def sliced_gan_loss(input1, input2, discriminator, num_vecs, do_random_vecs=True, do_tanh=True, return_logits=False): """Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947. Puts input1 and input2 through the provided discriminator to get logits. Then, computes num_vecs random projections of the logits, sorts them on the batch dimension and returns the L2 loss between the sorted vectors. See the above-mentioned paper for the reasoning behind it. Args: input1: first discriminator inputs. input2: second discriminator inputs. discriminator: inputs -> logits function. num_vecs: how many random vectors to use for projections. do_random_vecs: whether to use random vectors or just tanh of the logits. do_tanh: if true (default) we'll also just use tanh of the logits. return_logits: Whether or not to return the logits. Returns: The generator loss, i.e., the sliced approximation of the distance between the projected distributions (warning: discriminator should maximize it). """ with tf.variable_scope("sliced_gan"): with tf.variable_scope("discriminator"): logits1 = discriminator(input1) with tf.variable_scope("discriminator", reuse=True): logits2 = discriminator(input2) if do_random_vecs: random_vecs = tf.nn.l2_normalize( tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0) def get_sorted_projections(x): """Make projections of x and sort them on the batch dimension.""" x = tf.reshape(x, [-1, shape_list(x)[-1]]) batch_size = shape_list(x)[0] if do_random_vecs and do_tanh: n = tf.nn.l2_normalize(x, axis=1) proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1) elif do_random_vecs: n = tf.nn.l2_normalize(x, axis=1) proj = tf.matmul(n, random_vecs) else: proj = tf.tanh(x) proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this. if is_xla_compiled(): proj_dtype = proj.dtype proj = tf.cast(proj, tf.bfloat16) # Currently TPU only supports 1-D top_k calls. map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0] values = tf.map_fn(map_fn, proj) values = tf.cast(values, proj_dtype) else: values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True) return values proj1 = get_sorted_projections(logits1) proj2 = get_sorted_projections(logits2) dist = tf.reduce_mean(tf.squared_difference(proj1, proj2)) if return_logits: return dist, logits1, logits2 return dist
Discriminator architecture based on InfoGAN. def deep_discriminator(x, batch_norm, is_training, filters=64, filter_size=4, stride=2, output_size=1024): """Discriminator architecture based on InfoGAN.""" with tf.variable_scope( "discriminator", initializer=tf.random_normal_initializer(stddev=0.02)): batch_size, height, width = shape_list(x)[:3] # pylint: disable=unbalanced-tuple-unpacking net = layers().Conv2D( filters, filter_size, strides=stride, padding="SAME", name="conv1")(x) net = lrelu(net) net = layers().Conv2D( 2 * filters, filter_size, strides=stride, padding="SAME", name="conv2")(net) # [bs, h/4, w/4, 128] if batch_norm: net = layers().BatchNormalization( training=is_training, momentum=0.999, name="d_bn2")(net) net = lrelu(net) size = height * width x_shape = x.get_shape().as_list() if x_shape[1] is None or x_shape[2] is None: net = tf.reduce_mean(net, axis=[1, 2]) else: net = tf.reshape(net, [batch_size, size * 8]) net = layers().Dense(output_size, name="d_fc3")(net) if batch_norm: net = layers().BatchNormalization( training=is_training, momentum=0.999, name="d_bn3")(net) net = lrelu(net) return net
Instance normalization layer. def instance_norm(x): """Instance normalization layer.""" with tf.variable_scope("instance_norm"): epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable( "scale", [x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable( "offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0)) out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset return out
Generalized convolution layer. def general_conv(x, num_filters=64, filter_size=7, stride=1, stddev=0.02, padding="VALID", name="conv", do_norm="instance", do_relu=True, relufactor=0): """Generalized convolution layer.""" with tf.variable_scope(name): x = layers().Conv2D( num_filters, filter_size, stride, padding, activation=None, kernel_initializer=tf.truncated_normal_initializer(stddev=stddev), bias_initializer=tf.constant_initializer(0.0))(x) if do_norm == "layer": x = layer_norm(x) elif do_norm == "instance": x = instance_norm(x) if do_relu: if relufactor == 0: x = tf.nn.relu(x, "relu") else: x = lrelu(x, leak=relufactor) return x
Patch descriminator. def patch_discriminator(x, filters=64, filter_size=5, n=4, name="patch_discrim"): """Patch descriminator.""" with tf.variable_scope(name): x_shape = shape_list(x) spatial_dims = [x_shape[1] // 4, x_shape[2] // 4] x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]]) for i in range(n): x = general_conv( x=x, num_filters=filters * 2**i, filter_size=filter_size, stride=2 if i != n - 1 else 1, stddev=0.02, padding="SAME", name="c%d" % i, do_norm="instance" if i != 0 else False, do_relu=i != n - 1, relufactor=0.2) x = tf.reduce_mean(x, [1, 2]) return x
Mean and attention to reduce spatial dimensions. def mean_with_attention(x, name, num_heads=4): """Mean and attention to reduce spatial dimensions.""" with tf.variable_scope(name): shape = shape_list(x) m = tf.reduce_mean(x, [1, 2]) a = layers().Dense(num_heads, name="mean_attn")(x) s = tf.reshape(a, [shape[0], -1, num_heads]) s = tf.nn.softmax(s, axis=1) s = tf.reshape(s, shape[:-1] + [1, num_heads]) am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2]) l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1) return layers().Dense(2 * shape[-1], name="mean_attn_final")( tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]]))
A simple single-layer convolutional discriminator. def single_discriminator(x, filters=128, kernel_size=8, strides=4, pure_mean=False): """A simple single-layer convolutional discriminator.""" with tf.variable_scope("discriminator"): net = layers().Conv2D( filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x) if pure_mean: net = tf.reduce_mean(net, [1, 2]) else: net = mean_with_attention(net, "mean_with_attention") return net
A convolutional discriminator with 2 layers and concatenated output. def double_discriminator(x, filters1=128, filters2=None, kernel_size=8, strides=4, pure_mean=False): """A convolutional discriminator with 2 layers and concatenated output.""" if filters2 is None: filters2 = 4 * filters1 with tf.variable_scope("discriminator"): batch_size = shape_list(x)[0] net = layers().Conv2D( filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x) if pure_mean: net1 = tf.reduce_mean(net, [1, 2]) else: net1 = mean_with_attention(net, "mean_with_attention1") tf.reshape(net, [batch_size, -1]) net = tf.nn.relu(net) net = layers().Conv2D( filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x) if pure_mean: net2 = tf.reduce_mean(net, [1, 2]) else: net2 = mean_with_attention(net, "mean_with_attention2") return tf.concat([net1, net2], axis=-1)
Upscaling the image by a factor of f. def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR): """Upscaling the image by a factor of f.""" height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking return tf.image.resize_images(inputs, (height * f, width * f), method)
Upsamples the given inputs. Args: net: A Tensor of size [batch_size, height, width, filters]. num_outputs: The number of output filters. stride: A list of 2 scalars or a 1x2 Tensor indicating the scale, relative to the inputs, of the output dimensions. For example, if kernel size is [2, 3], then the output height and width will be twice and three times the input size. method: The upsampling method: 'nn_upsample_conv', 'bilinear_upsample_conv', or 'conv2d_transpose'. Returns: A Tensor which was upsampled using the specified method. Raises: ValueError: if `method` is not recognized. def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"): """Upsamples the given inputs. Args: net: A Tensor of size [batch_size, height, width, filters]. num_outputs: The number of output filters. stride: A list of 2 scalars or a 1x2 Tensor indicating the scale, relative to the inputs, of the output dimensions. For example, if kernel size is [2, 3], then the output height and width will be twice and three times the input size. method: The upsampling method: 'nn_upsample_conv', 'bilinear_upsample_conv', or 'conv2d_transpose'. Returns: A Tensor which was upsampled using the specified method. Raises: ValueError: if `method` is not recognized. """ with tf.variable_scope("upconv"): net_shape = tf.shape(net) height = net_shape[1] width = net_shape[2] # Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a # 3x3 "valid" convolution produce an output with the same dimension as the # input. spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]]) if method == "nn_upsample_conv": net = tf.image.resize_nearest_neighbor( net, [stride[0] * height, stride[1] * width]) net = tf.pad(net, spatial_pad_1, "REFLECT") net = layers().Conv2D( num_outputs, (3, 3), activation=tf.nn.relu)(net) elif method == "bilinear_upsample_conv": net = tf.image.resize_bilinear(net, [stride[0] * height, stride[1] * width]) net = tf.pad(net, spatial_pad_1, "REFLECT") net = layers().Conv2D( num_outputs, (3, 3), activation=tf.nn.relu)(net) elif method == "conv2d_transpose": # This corrects 1 pixel offset for images with even width and height. # conv2d is left aligned and conv2d_transpose is right aligned for even # sized images (while doing "SAME" padding). # Note: This doesn"t reflect actual model in paper. net = layers().Conv2DTranspose( num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net) net = net[:, 1:, 1:, :] else: raise ValueError("Unknown method: [%s]" % method) return net
Weight-level magnitude pruning. def weight_targeting(w, k): """Weight-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) transpose_w = tf.transpose(w) thres = tf.contrib.framework.sort(tf.abs(transpose_w), axis=1)[:, k] mask = to_float(thres[None, :] >= tf.abs(w)) return tf.reshape(mask, w_shape)
Unit-level magnitude pruning. def unit_targeting(w, k): """Unit-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) norm = tf.norm(w, axis=0) thres = tf.contrib.framework.sort(norm, axis=0)[k] mask = to_float(thres >= norm)[None, :] mask = tf.tile(mask, [size, 1]) return tf.reshape(mask, w_shape)
Apply targeted dropout to the weights of a convolution. def td_conv(inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=(1, 1), padding="valid", data_format="channels_last", dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=tf.zeros_initializer(), name=None, reuse=None): """Apply targeted dropout to the weights of a convolution.""" with tf.variable_scope(name, default_name="td_conv", reuse=reuse): nhwc = data_format == "channels_last" in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1] kernel_shape = [kernel_size, kernel_size, in_dim, filters] w = tf.get_variable( "DW", shape=kernel_shape, initializer=kernel_initializer) if use_bias: b = tf.get_variable("b", shape=[filters], initializer=bias_initializer) if keep_prob < 1.0: w = targeted_dropout( w, targeting_count, keep_prob, targeting_fn, is_training, do_prune=do_prune) if isinstance(strides, int): strides = [strides, strides] if isinstance(dilation_rate, int): dilation_rate = [dilation_rate, dilation_rate] if nhwc: strides = [1, strides[0], strides[1], 1] dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1] else: strides = [1, 1, strides[0], strides[1]] dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]] y = tf.nn.conv2d( inputs, w, strides, padding, data_format="NHWC" if nhwc else "NCHW", dilations=dilation_rate, name=None) if use_bias: y += b if activation: y = activation(y) return y
Applies targeted dropout. Applies dropout at a rate of `1 - keep_prob` to only those elements of `inputs` marked by `targeting_fn`. See below and paper for more detail: "Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang, Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton. Args: inputs: Tensor, inputs to apply targeted dropout to. k: Scalar Tensor or python scalar, sets the number of elements to target in `inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with second argument of `targeting_fn`. keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument. targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a boolean mask the same shape as `inputs` where True indicates an element will be dropped, and False not. is_training: bool, indicates whether currently training. do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)` elements of `inputs` expected to be dropped each forwards pass. Returns: Tensor, same shape and dtype as `inputs`. def targeted_dropout(inputs, k, keep_prob, targeting_fn, is_training, do_prune=False): """Applies targeted dropout. Applies dropout at a rate of `1 - keep_prob` to only those elements of `inputs` marked by `targeting_fn`. See below and paper for more detail: "Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang, Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton. Args: inputs: Tensor, inputs to apply targeted dropout to. k: Scalar Tensor or python scalar, sets the number of elements to target in `inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with second argument of `targeting_fn`. keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument. targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a boolean mask the same shape as `inputs` where True indicates an element will be dropped, and False not. is_training: bool, indicates whether currently training. do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)` elements of `inputs` expected to be dropped each forwards pass. Returns: Tensor, same shape and dtype as `inputs`. """ if not is_training and do_prune: k = tf.round(to_float(k) * to_float(1. - keep_prob)) mask = targeting_fn(inputs, k) mask = tf.cast(mask, inputs.dtype) if is_training: return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask elif do_prune: return inputs * (1 - mask) else: return inputs
KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss. def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0): """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss. """ batch_size = shape_list(mu)[0] prior_distribution = tfp.distributions.Normal( mu_p, tf.exp(tf.multiply(0.5, log_var_p))) posterior_distribution = tfp.distributions.Normal( mu, tf.exp(tf.multiply(0.5, log_var))) kld = tfp.distributions.kl_divergence(posterior_distribution, prior_distribution) return tf.reduce_sum(kld) / to_float(batch_size)
Convert to Tensor. def to_tensor(self): """Convert to Tensor.""" a_shape = shape_list(self.a) b_shape = shape_list(self.b) inner_dim = b_shape[1] result_dim = b_shape[0] flat_a = tf.reshape(self.a, [-1, inner_dim]) product = tf.matmul(flat_a, self.b, transpose_b=True) product_shape = a_shape[:-1] + [result_dim] product = tf.reshape(product, product_shape) product.set_shape(self.a.get_shape().as_list()[:-1] + [self.b.get_shape()[0]]) return product
Generate weights with normalization. def _compute_weights(self): """Generate weights with normalization.""" with tf.variable_scope("compute_weights"): self.layer.kernel = tf.nn.l2_normalize( self.layer.v, axis=self.norm_axes) * self.layer.g
Set the norm of the weight vector. def _init_norm(self, weights): """Set the norm of the weight vector.""" with tf.variable_scope("init_norm"): flat = tf.reshape(weights, [-1, self.layer_depth]) return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,))
Data dependent initialization for eager execution. def _data_dep_init(self, inputs): """Data dependent initialization for eager execution.""" with tf.variable_scope("data_dep_init"): # Generate data dependent init values activation = self.layer.activation self.layer.activation = None x_init = self.layer.call(inputs) m_init, v_init = tf.moments(x_init, self.norm_axes) scale_init = 1. / tf.sqrt(v_init + 1e-10) # Assign data dependent init values self.layer.g = self.layer.g * scale_init self.layer.bias = (-m_init * scale_init) self.layer.activation = activation self.initialized = True
Build `Layer`. def build(self, input_shape=None): """Build `Layer`.""" input_shape = tf.TensorShape(input_shape).as_list() self.input_spec = layers().InputSpec(shape=input_shape) if not self.layer.built: self.layer.build(input_shape) self.layer.built = False if not hasattr(self.layer, "kernel"): raise ValueError("`WeightNorm` must wrap a layer that" " contains a `kernel` for weights") # The kernel's filter or unit dimension is -1 self.layer_depth = int(self.layer.kernel.shape[-1]) self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1)) self.layer.v = self.layer.kernel self.layer.g = self.layer.add_variable( name="g", shape=(self.layer_depth,), initializer=tf.ones_initializer, dtype=self.layer.kernel.dtype, trainable=True) # with ops.control_dependencies([self.layer.g.assign( # self._init_norm(self.layer.v))]): # self._compute_weights() self._compute_weights() self.layer.built = True super(WeightNorm, self).build() self.built = True
Call `Layer`. def call(self, inputs): """Call `Layer`.""" # if context.executing_eagerly(): # if not self.initialized: # self._data_dep_init(inputs) self._compute_weights() # Recompute weights for each forward pass output = self.layer.call(inputs) return output
Calculate mean rewards from given epoch. def compute_mean_reward(rollouts, clipped): """Calculate mean rewards from given epoch.""" reward_name = "reward" if clipped else "unclipped_reward" rewards = [] for rollout in rollouts: if rollout[-1].done: rollout_reward = sum(getattr(frame, reward_name) for frame in rollout) rewards.append(rollout_reward) if rewards: mean_rewards = np.mean(rewards) else: mean_rewards = 0 return mean_rewards
Evaluate the PPO agent in the real environment. def evaluate_single_config( hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn=_eval_fn_with_learner ): """Evaluate the PPO agent in the real environment.""" tf.logging.info("Evaluating metric %s", get_metric_name( sampling_temp, max_num_noops, clipped=False )) eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params) env = setup_env( hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops, rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp) rollouts = env.current_epoch_rollouts() env.close() return tuple( compute_mean_reward(rollouts, clipped) for clipped in (True, False) )
Evaluate the agent with multiple eval configurations. def evaluate_all_configs( hparams, agent_model_dir, eval_fn=_eval_fn_with_learner ): """Evaluate the agent with multiple eval configurations.""" metrics = {} # Iterate over all combinations of sampling temperatures and whether to do # initial no-ops. for sampling_temp in hparams.eval_sampling_temps: # Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration. for max_num_noops in set([hparams.eval_max_num_noops, 0]): scores = evaluate_single_config( hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn ) for (score, clipped) in zip(scores, (True, False)): metric_name = get_metric_name(sampling_temp, max_num_noops, clipped) metrics[metric_name] = score return metrics
Evaluate the world model (reward accuracy). def evaluate_world_model( real_env, hparams, world_model_dir, debug_video_path, split=tf.estimator.ModeKeys.EVAL, ): """Evaluate the world model (reward accuracy).""" frame_stack_size = hparams.frame_stack_size rollout_subsequences = [] def initial_frame_chooser(batch_size): assert batch_size == len(rollout_subsequences) return np.stack([ [frame.observation.decode() for frame in subsequence[:frame_stack_size]] # pylint: disable=g-complex-comprehension for subsequence in rollout_subsequences ]) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=hparams.wm_eval_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir ) sim_env = env_fn(in_graph=False) subsequence_length = int( max(hparams.wm_eval_rollout_ratios) * hparams.simulated_rollout_length ) rollouts = real_env.current_epoch_rollouts( split=split, minimal_rollout_frames=(subsequence_length + frame_stack_size) ) video_writer = common_video.WholeVideoWriter( fps=10, output_path=debug_video_path, file_format="avi" ) reward_accuracies_by_length = { int(ratio * hparams.simulated_rollout_length): [] for ratio in hparams.wm_eval_rollout_ratios } for _ in range(hparams.wm_eval_num_batches): rollout_subsequences[:] = random_rollout_subsequences( rollouts, hparams.wm_eval_batch_size, subsequence_length + frame_stack_size ) eval_subsequences = [ subsequence[(frame_stack_size - 1):] for subsequence in rollout_subsequences ] # Check that the initial observation is the same in the real and simulated # rollout. sim_init_obs = sim_env.reset() def decode_real_obs(index): return np.stack([ subsequence[index].observation.decode() for subsequence in eval_subsequences # pylint: disable=cell-var-from-loop ]) real_init_obs = decode_real_obs(0) assert np.all(sim_init_obs == real_init_obs) debug_frame_batches = [] def append_debug_frame_batch(sim_obs, real_obs, sim_cum_rews, real_cum_rews, sim_rews, real_rews): """Add a debug frame.""" rews = [[sim_cum_rews, sim_rews], [real_cum_rews, real_rews]] headers = [] for j in range(len(sim_obs)): local_nps = [] for i in range(2): img = PIL_Image().new("RGB", (sim_obs.shape[-2], 11),) draw = PIL_ImageDraw().Draw(img) draw.text((0, 0), "c:{:3}, r:{:3}".format(int(rews[i][0][j]), int(rews[i][1][j])), fill=(255, 0, 0)) local_nps.append(np.asarray(img)) local_nps.append(np.zeros_like(local_nps[0])) headers.append(np.concatenate(local_nps, axis=1)) errs = absolute_hinge_difference(sim_obs, real_obs) headers = np.stack(headers) debug_frame_batches.append( # pylint: disable=cell-var-from-loop np.concatenate([headers, np.concatenate([sim_obs, real_obs, errs], axis=2)], axis=1) ) append_debug_frame_batch(sim_init_obs, real_init_obs, np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size)) (sim_cum_rewards, real_cum_rewards) = ( np.zeros(hparams.wm_eval_batch_size) for _ in range(2) ) for i in range(subsequence_length): actions = [subsequence[i].action for subsequence in eval_subsequences] (sim_obs, sim_rewards, _) = sim_env.step(actions) sim_cum_rewards += sim_rewards real_rewards = np.array([ subsequence[i + 1].reward for subsequence in eval_subsequences ]) real_cum_rewards += real_rewards for (length, reward_accuracies) in six.iteritems( reward_accuracies_by_length ): if i + 1 == length: reward_accuracies.append( np.sum(sim_cum_rewards == real_cum_rewards) / len(real_cum_rewards) ) real_obs = decode_real_obs(i + 1) append_debug_frame_batch(sim_obs, real_obs, sim_cum_rewards, real_cum_rewards, sim_rewards, real_rewards) for debug_frames in np.stack(debug_frame_batches, axis=1): debug_frame = None for debug_frame in debug_frames: video_writer.write(debug_frame) if debug_frame is not None: # Append two black frames for aesthetics. for _ in range(2): video_writer.write(np.zeros_like(debug_frame)) video_writer.finish_to_disk() return { "reward_accuracy/at_{}".format(length): np.mean(reward_accuracies) for (length, reward_accuracies) in six.iteritems( reward_accuracies_by_length ) }
Write metrics to summary. def summarize_metrics(eval_metrics_writer, metrics, epoch): """Write metrics to summary.""" for (name, value) in six.iteritems(metrics): summary = tf.Summary() summary.value.add(tag=name, simple_value=value) eval_metrics_writer.add_summary(summary, epoch) eval_metrics_writer.flush()
CamelCase game name with mode suffix. Args: short_name: snake_case name without mode e.g "crazy_climber" Returns: full game name e.g. "CrazyClimberNoFrameskip-v4" def full_game_name(short_name): """CamelCase game name with mode suffix. Args: short_name: snake_case name without mode e.g "crazy_climber" Returns: full game name e.g. "CrazyClimberNoFrameskip-v4" """ camel_game_name = misc_utils.snakecase_to_camelcase(short_name) full_name = camel_game_name + ATARI_GAME_MODE return full_name
Setup. def setup_env(hparams, batch_size, max_num_noops, rl_env_max_episode_steps=-1, env_name=None): """Setup.""" if not env_name: env_name = full_game_name(hparams.game) maxskip_envs = should_apply_max_and_skip_env(hparams) env = T2TGymEnv( base_env_name=env_name, batch_size=batch_size, grayscale=hparams.grayscale, should_derive_observation_space=hparams .rl_should_derive_observation_space, resize_width_factor=hparams.resize_width_factor, resize_height_factor=hparams.resize_height_factor, rl_env_max_episode_steps=rl_env_max_episode_steps, max_num_noops=max_num_noops, maxskip_envs=maxskip_envs, sticky_actions=hparams.sticky_actions ) return env
Copy a subset of hparams to target_hparams. def update_hparams_from_hparams(target_hparams, source_hparams, prefix): """Copy a subset of hparams to target_hparams.""" for (param_name, param_value) in six.iteritems(source_hparams.values()): if param_name.startswith(prefix): target_hparams.set_hparam(param_name[len(prefix):], param_value)
Chooses a random frame sequence of given length from a set of rollouts. def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length): """Chooses a random frame sequence of given length from a set of rollouts.""" def choose_subsequence(): # TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over # frames and not rollouts. rollout = random.choice(rollouts) try: from_index = random.randrange(len(rollout) - subsequence_length + 1) except ValueError: # Rollout too short; repeat. return choose_subsequence() return rollout[from_index:(from_index + subsequence_length)] return [choose_subsequence() for _ in range(num_subsequences)]
Make frame chooser. Args: real_env: T2TEnv to take initial frames from. frame_stack_size (int): Number of consecutive frames to extract. simulation_random_starts (bool): Whether to choose frames at random. simulation_flip_first_random_for_beginning (bool): Whether to flip the first frame stack in every batch for the frames at the beginning. split (tf.estimator.ModeKeys or None): Data split to take the frames from, None means use all frames. Returns: Function batch_size -> initial_frames. def make_initial_frame_chooser( real_env, frame_stack_size, simulation_random_starts, simulation_flip_first_random_for_beginning, split=tf.estimator.ModeKeys.TRAIN, ): """Make frame chooser. Args: real_env: T2TEnv to take initial frames from. frame_stack_size (int): Number of consecutive frames to extract. simulation_random_starts (bool): Whether to choose frames at random. simulation_flip_first_random_for_beginning (bool): Whether to flip the first frame stack in every batch for the frames at the beginning. split (tf.estimator.ModeKeys or None): Data split to take the frames from, None means use all frames. Returns: Function batch_size -> initial_frames. """ initial_frame_rollouts = real_env.current_epoch_rollouts( split=split, minimal_rollout_frames=frame_stack_size, ) def initial_frame_chooser(batch_size): """Frame chooser.""" deterministic_initial_frames =\ initial_frame_rollouts[0][:frame_stack_size] if not simulation_random_starts: # Deterministic starts: repeat first frames from the first rollout. initial_frames = [deterministic_initial_frames] * batch_size else: # Random starts: choose random initial frames from random rollouts. initial_frames = random_rollout_subsequences( initial_frame_rollouts, batch_size, frame_stack_size ) if simulation_flip_first_random_for_beginning: # Flip first entry in the batch for deterministic initial frames. initial_frames[0] = deterministic_initial_frames return np.stack([ [frame.observation.decode() for frame in initial_frame_stack] # pylint: disable=g-complex-comprehension for initial_frame_stack in initial_frames ]) return initial_frame_chooser
Point-wise, hinge loss-like, difference between arrays. Args: arr1: integer array to compare. arr2: integer array to compare. min_diff: minimal difference taken into consideration. dtype: dtype of returned array. Returns: array def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8): """Point-wise, hinge loss-like, difference between arrays. Args: arr1: integer array to compare. arr2: integer array to compare. min_diff: minimal difference taken into consideration. dtype: dtype of returned array. Returns: array """ diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int) return np.maximum(diff - min_diff, 0).astype(dtype)
Augments an observation with debug info. def augment_observation( observation, reward, cum_reward, frame_index, bar_color=None, header_height=27 ): """Augments an observation with debug info.""" img = PIL_Image().new( "RGB", (observation.shape[1], header_height,) ) draw = PIL_ImageDraw().Draw(img) draw.text( (1, 0), "c:{:3}, r:{:3}".format(int(cum_reward), int(reward)), fill=(255, 0, 0) ) draw.text( (1, 15), "f:{:3}".format(int(frame_index)), fill=(255, 0, 0) ) header = np.copy(np.asarray(img)) del img if bar_color is not None: header[0, :, :] = bar_color return np.concatenate([header, observation], axis=0)
Runs a batch of rollouts from given initial observations. def run_rollouts( env, agent, initial_observations, step_limit=None, discount_factor=1.0, log_every_steps=None, video_writers=(), color_bar=False, many_rollouts_from_each_env=False ): """Runs a batch of rollouts from given initial observations.""" assert step_limit is not None or not many_rollouts_from_each_env, ( "When collecting many rollouts from each environment, time limit must " "be set." ) num_dones = 0 first_dones = np.array([False] * env.batch_size) observations = initial_observations step_index = 0 cum_rewards = np.zeros(env.batch_size) for (video_writer, obs_stack) in zip(video_writers, initial_observations): for (i, ob) in enumerate(obs_stack): debug_frame = augment_observation( ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1), bar_color=((0, 255, 0) if color_bar else None) ) video_writer.write(debug_frame) def proceed(): if step_index < step_limit: return num_dones < env.batch_size or many_rollouts_from_each_env else: return False while proceed(): act_kwargs = {} if agent.needs_env_state: act_kwargs["env_state"] = env.state actions = agent.act(observations, **act_kwargs) (observations, rewards, dones) = env.step(actions) observations = list(observations) now_done_indices = [] for (i, done) in enumerate(dones): if done and (not first_dones[i] or many_rollouts_from_each_env): now_done_indices.append(i) first_dones[i] = True num_dones += 1 if now_done_indices: # Unless many_rollouts_from_each_env, reset only envs done the first time # in this timestep to ensure that we collect exactly 1 rollout from each # env. reset_observations = env.reset(now_done_indices) for (i, observation) in zip(now_done_indices, reset_observations): observations[i] = observation observations = np.array(observations) cum_rewards[~first_dones] = ( cum_rewards[~first_dones] * discount_factor + rewards[~first_dones] ) step_index += 1 for (video_writer, obs_stack, reward, cum_reward, done) in zip( video_writers, observations, rewards, cum_rewards, first_dones ): if done: continue ob = obs_stack[-1] debug_frame = augment_observation( ob, reward=reward, cum_reward=cum_reward, frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None) ) video_writer.write(debug_frame) # TODO(afrozm): Clean this up with tf.logging.log_every_n if log_every_steps is not None and step_index % log_every_steps == 0: tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean()) return (observations, cum_rewards)
Sets the state that will be used on next reset. def set_initial_state(self, initial_state, initial_frames): """Sets the state that will be used on next reset.""" self.env.set_initial_state(initial_state, initial_frames) self._initial_frames = initial_frames
Download corpora if necessary and unzip them. Args: tmp_dir: directory containing dataset. dataset_split: whether we're in train/dev/test mode. Returns: List of all files generated and path to file containing train/dev/test split info. def _maybe_download_corpora(tmp_dir, dataset_split): """Download corpora if necessary and unzip them. Args: tmp_dir: directory containing dataset. dataset_split: whether we're in train/dev/test mode. Returns: List of all files generated and path to file containing train/dev/test split info. """ cnn_filename = "cnn_stories.tgz" cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/") dailymail_filename = "dailymail_stories.tgz" dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/") if not tf.gfile.Exists(cnn_finalpath): cnn_file = generator_utils.maybe_download_from_drive( tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL) with tarfile.open(cnn_file, "r:gz") as cnn_tar: cnn_tar.extractall(tmp_dir) if not tf.gfile.Exists(dailymail_finalpath): dailymail_file = generator_utils.maybe_download_from_drive( tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL) with tarfile.open(dailymail_file, "r:gz") as dailymail_tar: dailymail_tar.extractall(tmp_dir) cnn_files = tf.gfile.Glob(cnn_finalpath + "*") dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*") all_files = cnn_files + dailymail_files if dataset_split == problem.DatasetSplit.TRAIN: urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt", _TRAIN_URLS) elif dataset_split == problem.DatasetSplit.EVAL: urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt", _DEV_URLS) else: urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt", _TEST_URLS) return all_files, urls_path
Generate splits of the data. def example_splits(url_file, all_files): """Generate splits of the data.""" def generate_hash(inp): """Generate a sha1 hash to match the raw url to the filename extracted.""" h = hashlib.sha1() h.update(inp) return h.hexdigest() all_files_map = {f.split("/")[-1]: f for f in all_files} urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)] filelist = [] for url in urls: url_hash = generate_hash(url) filename = url_hash + ".story" if filename not in all_files_map: tf.logging.info("Missing file: %s" % url) continue filelist.append(all_files_map[filename]) tf.logging.info("Found %d examples" % len(filelist)) return filelist
Generate examples. def example_generator(all_files, urls_path, sum_token): """Generate examples.""" def fix_run_on_sents(line): if u"@highlight" in line: return line if not line: return line if line[-1] in END_TOKENS: return line return line + u"." filelist = example_splits(urls_path, all_files) story_summary_split_token = u" <summary> " if sum_token else " " for story_file in filelist: story = [] summary = [] reading_highlights = False for line in tf.gfile.Open(story_file, "rb"): line = text_encoder.to_unicode_utf8(line.strip()) line = fix_run_on_sents(line) if not line: continue elif line.startswith(u"@highlight"): if not story: break # No article text. reading_highlights = True elif reading_highlights: summary.append(line) else: story.append(line) if (not story) or not summary: continue yield " ".join(story) + story_summary_split_token + " ".join(summary)
Write text to files. def write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir): """Write text to files.""" def write_to_file(all_files, urls_path, tmp_dir, filename): """Write text to files.""" with io.open( os.path.join(tmp_dir, filename + ".source"), "w", encoding="utf-8") as fstory: with io.open( os.path.join(tmp_dir, filename + ".target"), "w", encoding="utf-8") as fsummary: for example in example_generator(all_files, urls_path, sum_token=True): story, summary = _story_summary_split(example) fstory.write(story + "\n") fsummary.write(summary + "\n") if dataset_split == problem.DatasetSplit.TRAIN: filename = "cnndm.train" elif dataset_split == problem.DatasetSplit.EVAL: filename = "cnndm.dev" else: filename = "cnndm.test" tf.logging.info("Writing %s" % filename) write_to_file(all_files, urls_path, tmp_dir, filename)
Infer highest epoch number from file names in data_dir. def infer_last_epoch_num(data_dir): """Infer highest epoch number from file names in data_dir.""" names = os.listdir(data_dir) epochs_str = [re.findall(pattern=r".*\.(-?\d+)$", string=name) for name in names] epochs_str = sum(epochs_str, []) return max([int(epoch_str) for epoch_str in epochs_str])
Load T2TGymEnv with data from one epoch. Args: hparams: hparams. data_dir: data directory. which_epoch_data: data from which epoch to load. Returns: env. def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None): """Load T2TGymEnv with data from one epoch. Args: hparams: hparams. data_dir: data directory. which_epoch_data: data from which epoch to load. Returns: env. """ t2t_env = rl_utils.setup_env( hparams, batch_size=hparams.real_batch_size, max_num_noops=hparams.max_num_noops ) # Load data. if which_epoch_data is not None: if which_epoch_data == "last": which_epoch_data = infer_last_epoch_num(data_dir) assert isinstance(which_epoch_data, int), \ "{}".format(type(which_epoch_data)) t2t_env.start_new_epoch(which_epoch_data, data_dir) else: t2t_env.start_new_epoch(-999) return t2t_env
Infer name from filenames. def infer_game_name_from_filenames(data_dir, snake_case=True): """Infer name from filenames.""" names = os.listdir(data_dir) game_names = [re.findall(pattern=r"^Gym(.*)NoFrameskip", string=name) for name in names] assert game_names, "No data files found in {}".format(data_dir) game_names = sum(game_names, []) game_name = game_names[0] assert all(game_name == other for other in game_names), \ "There are multiple different game names in {}".format(data_dir) if snake_case: game_name = camelcase_to_snakecase(game_name) return game_name
Wrap environment with gym.Monitor. Video recording provided by Monitor requires 1) both height and width of observation to be even numbers. 2) rendering of environment Args: env: environment. video_dir: video directory. Returns: wrapped environment. def wrap_with_monitor(env, video_dir): """Wrap environment with gym.Monitor. Video recording provided by Monitor requires 1) both height and width of observation to be even numbers. 2) rendering of environment Args: env: environment. video_dir: video directory. Returns: wrapped environment. """ env = ExtendToEvenDimentions(env) env = RenderObservations(env) # pylint: disable=redefined-variable-type env = gym.wrappers.Monitor(env, video_dir, force=True, video_callable=lambda idx: True, write_upon_reset=True) return env
Create SimulatedEnv with minimal subset of hparams. def create_simulated_env( output_dir, grayscale, resize_width_factor, resize_height_factor, frame_stack_size, generative_model, generative_model_params, random_starts=True, which_epoch_data="last", **other_hparams ): """"Create SimulatedEnv with minimal subset of hparams.""" # We need these, to initialize T2TGymEnv, but these values (hopefully) have # no effect on player. a_bit_risky_defaults = { "game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1) "real_batch_size": 1, "rl_env_max_episode_steps": -1, "max_num_noops": 0 } for key in a_bit_risky_defaults: if key not in other_hparams: other_hparams[key] = a_bit_risky_defaults[key] hparams = hparam.HParams( grayscale=grayscale, resize_width_factor=resize_width_factor, resize_height_factor=resize_height_factor, frame_stack_size=frame_stack_size, generative_model=generative_model, generative_model_params=generative_model_params, **other_hparams ) return load_data_and_make_simulated_env( output_dir, wm_dir=None, hparams=hparams, which_epoch_data=which_epoch_data, random_starts=random_starts)
Infers standard paths to policy and model directories. Example: >>> infer_paths("/some/output/dir/", policy="", model="custom/path") {"policy": "/some/output/dir/policy", "model": "custom/path", "output_dir":"/some/output/dir/"} Args: output_dir: output directory. **subdirs: sub-directories. Returns: a dictionary with the directories. def infer_paths(output_dir, **subdirs): """Infers standard paths to policy and model directories. Example: >>> infer_paths("/some/output/dir/", policy="", model="custom/path") {"policy": "/some/output/dir/policy", "model": "custom/path", "output_dir":"/some/output/dir/"} Args: output_dir: output directory. **subdirs: sub-directories. Returns: a dictionary with the directories. """ directories = {} for name, path in six.iteritems(subdirs): directories[name] = path if path else os.path.join(output_dir, name) directories["output_dir"] = output_dir return directories
Adds new frame to (initial) frame stack, removes last one. def add_to_initial_stack(self, frame): """Adds new frame to (initial) frame stack, removes last one.""" if not self._setable_initial_frames: raise ValueError( "This instance does not allow to manually set initial frame stack.") assert_msg = "{}, {}".format(frame.shape, self._initial_frames.shape[:1]) assert frame.shape == self._initial_frames.shape[2:], assert_msg initial_frames = np.roll(self._initial_frames, shift=-1, axis=1) initial_frames[0, -1, ...] = frame self._initial_frames = initial_frames
Add single zero row/column to observation if needed. def observation(self, frame): """Add single zero row/column to observation if needed.""" if frame.shape == self.observation_space.shape: return frame else: extended_frame = np.zeros(self.observation_space.shape, self.observation_space.dtype) assert self.HW_AXES == (0, 1) extended_frame[:frame.shape[0], :frame.shape[1]] = frame return extended_frame
Add new observation to frame stack and infer policy. Args: ob: array of shape (height, width, channels) Returns: logits and vf. def infer(self, ob): """Add new observation to frame stack and infer policy. Args: ob: array of shape (height, width, channels) Returns: logits and vf. """ self._add_to_stack(ob) logits, vf = self.infer_from_frame_stack(self._frame_stack) return logits, vf
Infer policy from stack of observations. Args: ob_stack: array of shape (1, frame_stack_size, height, width, channels) Returns: logits and vf. def infer_from_frame_stack(self, ob_stack): """Infer policy from stack of observations. Args: ob_stack: array of shape (1, frame_stack_size, height, width, channels) Returns: logits and vf. """ logits, vf = self.sess.run([self.logits_t, self.value_function_t], feed_dict={self.obs_t: ob_stack}) return logits, vf
Normalizes the string using tokenizer.encode. Args: raw_str: the input string Returns: A string which is ready to be tokenized using split() def _normalize_string(raw_str): """Normalizes the string using tokenizer.encode. Args: raw_str: the input string Returns: A string which is ready to be tokenized using split() """ return " ".join( token.strip() for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str)))
Downloads and extracts the dataset. Args: tmp_dir: temp directory to download and extract the dataset data_dir: The base directory where data and vocab files are stored. Returns: tmp_dir: temp directory containing the raw data. def _prepare_babi_data(tmp_dir, data_dir): """Downloads and extracts the dataset. Args: tmp_dir: temp directory to download and extract the dataset data_dir: The base directory where data and vocab files are stored. Returns: tmp_dir: temp directory containing the raw data. """ if not tf.gfile.Exists(data_dir): tf.gfile.MakeDirs(data_dir) file_path = os.path.join(tmp_dir, _TAR) headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) " "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/63.0.3239.132 Safari/537.36"} resp = requests.get(_URL, headers=headers) with open(file_path, "wb") as f: f.write(resp.content) tar = tarfile.open(file_path) tar.extractall(tmp_dir) tar.close() return tmp_dir
Parsing the bAbi dataset (train and test). Args: tmp_dir: temp directory to download and extract the dataset babi_task_id: babi task id subset: babi subset dataset_split: dataset split (train or eval) joint_training: if training the model on all tasks. Returns: babi_instances: set of training examples, each a dict containing a story, a question and an answer. babi_lines: all the texts in the data separated based on their appearance in the stories, questions, or answers. def _babi_parser(tmp_dir, babi_task_id, subset, dataset_split, joint_training=True): """Parsing the bAbi dataset (train and test). Args: tmp_dir: temp directory to download and extract the dataset babi_task_id: babi task id subset: babi subset dataset_split: dataset split (train or eval) joint_training: if training the model on all tasks. Returns: babi_instances: set of training examples, each a dict containing a story, a question and an answer. babi_lines: all the texts in the data separated based on their appearance in the stories, questions, or answers. """ def _data_file(mode, task_id): """Generates the path to the data file for the given mode(train/test). Args: mode: either train or test for bAbi dataset task_id: babi task id Returns: data file path """ file_name = (_TASKS[task_id] + "_{}.txt") return os.path.join(_DIR_NAME, subset, file_name.format(mode)) def _all_task_raw_data_generator(tmp_dir, data_file, dataset_split): """Prepares raw data for all tasks to gether.. Args: tmp_dir: temp directory data_file: data file dataset_split: dataset split """ tf.logging.info("Preparing dataset of all task together") globe_name = ("*_{}.txt") mode_name = "test" if dataset_split == problem.DatasetSplit.TRAIN: mode_name = "train" files_name = os.path.join( tmp_dir, _DIR_NAME, subset, globe_name.format(mode_name)) with tf.gfile.GFile(data_file, "wb") as outfile: for filename in tf.gfile.Glob(files_name): if filename == data_file: # don"t want to copy the output into the output continue with tf.gfile.GFile(filename, "rb") as readfile: shutil.copyfileobj(readfile, outfile) def _parse_answer(answer): if (joint_training or babi_task_id in ["qa8", "qa19", "qa0" ]): # "lists-sets" or "path finding" return "".join([d for d in answer.split(",")]) # as a single token! else: return answer if dataset_split == problem.DatasetSplit.TRAIN: babi_train_task_id = "qa0" if joint_training else babi_task_id data_file = os.path.join(tmp_dir, _data_file("train", babi_train_task_id)) else: data_file = os.path.join(tmp_dir, _data_file("test", babi_task_id)) if ((babi_task_id == "qa0" or joint_training) and not tf.gfile.Exists(os.path.join(tmp_dir, data_file))): _all_task_raw_data_generator(tmp_dir, data_file, dataset_split) tf.logging.info("Parsing %s into training/testing instances...", data_file) babi_instances = [] with tf.gfile.GFile(data_file, mode="r") as f: story = [] for line in f: line_num, line = line.strip().split(" ", 1) if int(line_num) == 1: story = [] if "\t" in line: question, answer, _ = line.split("\t") question = _normalize_string(question) substories = [s for s in story if s] answer = _parse_answer(answer) instance = { FeatureNames.STORY: substories, FeatureNames.QUESTION: question, FeatureNames.ANSWER: answer } babi_instances.append(instance) story.append("") else: story.append(_normalize_string(line)) return babi_instances
It dynamically instantiates a class for each babi subsets-tasks. @registry.register_problem class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem): @property def babi_task_id(self): return "qa0" @property def babi_subset(self): return "en-10k" It does not put the classes into the global namespace, so to access the class we rely on the registry or this module"s REGISTERED_PROBLEMS list. It will be available as registry.problem("babi_qa_concat_all_tasks_10k") i.e., change camel case to snake case. Numbers are considered lower case characters for these purposes. def _register_babi_problems(): """It dynamically instantiates a class for each babi subsets-tasks. @registry.register_problem class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem): @property def babi_task_id(self): return "qa0" @property def babi_subset(self): return "en-10k" It does not put the classes into the global namespace, so to access the class we rely on the registry or this module"s REGISTERED_PROBLEMS list. It will be available as registry.problem("babi_qa_concat_all_tasks_10k") i.e., change camel case to snake case. Numbers are considered lower case characters for these purposes. """ for (subset, subset_suffix) in [("en", "_1k"), ("en-10k", "_10k")]: for problem_name, babi_task_id in six.iteritems(_problems_to_register()): problem_class = type("BabiQaConcat" + problem_name + subset_suffix, (BabiQaConcat,), { "babi_task_id": babi_task_id, "babi_subset": subset }) registry.register_problem(problem_class) REGISTERED_PROBLEMS.append(problem_class.name)
Builds encoder for the given class labels. Args: data_dir: data directory Returns: An encoder for class labels. def get_labels_encoder(self, data_dir): """Builds encoder for the given class labels. Args: data_dir: data directory Returns: An encoder for class labels. """ label_filepath = os.path.join(data_dir, self.vocab_filename) return text_encoder.TokenTextEncoder(label_filepath)
A generator that generates samples that are encoded. Args: data_dir: data directory tmp_dir: temp directory dataset_split: dataset split Yields: A dict. def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): """A generator that generates samples that are encoded. Args: data_dir: data directory tmp_dir: temp directory dataset_split: dataset split Yields: A dict. """ generator = self.generate_samples(data_dir, tmp_dir, dataset_split) encoder = self.get_or_create_vocab(data_dir, tmp_dir) label_encoder = self.get_labels_encoder(data_dir) for sample in generator: inputs = encoder.encode(sample["inputs"]) inputs.append(text_encoder.EOS_ID) context = encoder.encode(sample["context"]) context.append(text_encoder.EOS_ID) targets = label_encoder.encode(sample["targets"]) sample["targets"] = targets yield {"inputs": inputs, "context": context, "targets": targets}
Return a dict for encoding and decoding inference input/output. Args: data_dir: data directory Returns: A dict of <feature name, TextEncoder>. def feature_encoders(self, data_dir): """Return a dict for encoding and decoding inference input/output. Args: data_dir: data directory Returns: A dict of <feature name, TextEncoder>. """ encoders = (super(BabiQa, self).feature_encoders(data_dir)) label_encoder = self.get_labels_encoder(data_dir) encoders["targets"] = label_encoder # bAbi as a classification task return encoders
Returns problem_hparams. Args: defaults: default hyperparameters unused_model_hparams: model hyperparameters def hparams(self, defaults, unused_model_hparams): """Returns problem_hparams. Args: defaults: default hyperparameters unused_model_hparams: model hyperparameters """ (super(BabiQa, self).hparams(defaults, unused_model_hparams)) p = defaults num_classes = self._encoders["targets"].vocab_size p.modality = {"targets": modalities.ModalityType.CLASS_LABEL} p.vocab_size = {"targets": num_classes}
Splits of data to produce and number the output shards for each. def dataset_splits(self): """Splits of data to produce and number the output shards for each.""" return [{ "split": problem.DatasetSplit.TRAIN, "shards": self.num_train_shards, }, { "split": problem.DatasetSplit.EVAL, "shards": self.num_eval_shards, }, { "split": problem.DatasetSplit.TEST, "shards": self.num_test_shards, }]
Traverses directory collecting input and target files. def _collect_data(directory, input_ext, transcription_ext): """Traverses directory collecting input and target files.""" # Directory from string to tuple pair of strings # key: the filepath to a datafile including the datafile's basename. Example, # if the datafile was "/path/to/datafile.wav" then the key would be # "/path/to/datafile" # value: a pair of strings (media_filepath, label) data_files = {} for root, _, filenames in os.walk(directory): transcripts = [filename for filename in filenames if transcription_ext in filename] for transcript in transcripts: transcript_path = os.path.join(root, transcript) with open(transcript_path, "r") as transcript_file: for transcript_line in transcript_file: line_contents = transcript_line.strip().split(" ", 1) media_base, label = line_contents key = os.path.join(root, media_base) assert key not in data_files media_name = "%s.%s"%(media_base, input_ext) media_path = os.path.join(root, media_name) data_files[key] = (media_base, media_path, label) return data_files
Adding to base hparams the attributes for for librispeech. def add_librispeech_hparams(hparams): """Adding to base hparams the attributes for for librispeech.""" hparams.batch_size = 36 hparams.audio_compression = 8 hparams.hidden_size = 2048 hparams.max_input_seq_length = 600000 hparams.max_target_seq_length = 350 hparams.max_length = hparams.max_input_seq_length hparams.min_length_bucket = hparams.max_input_seq_length // 2 hparams.learning_rate = 0.05 hparams.train_steps = 5000000 hparams.num_hidden_layers = 4 return hparams
Generates linearized trees and tokens from the wsj tree format. It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449. Args: tree_string: tree in wsj format Returns: tuple: (words, linearized tree) def words_and_tags_from_wsj_tree(tree_string): """Generates linearized trees and tokens from the wsj tree format. It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449. Args: tree_string: tree in wsj format Returns: tuple: (words, linearized tree) """ stack, tags, words = [], [], [] for tok in tree_string.strip().split(): if tok[0] == "(": symbol = tok[1:] tags.append(symbol) stack.append(symbol) else: assert tok[-1] == ")" stack.pop() # Pop the POS-tag. while tok[-2] == ")": tags.append("/" + stack.pop()) tok = tok[:-1] words.append(tok[:-1]) return str.join(" ", words), str.join(" ", tags[1:-1])
Generator for parsing as a sequence-to-sequence task that uses tokens. This generator assumes the files at source_path and target_path have the same number of lines and yields dictionaries of "inputs" and "targets" where inputs and targets are token ids from source and target lines converted to integers using the token_map. Args: tree_path: path to the file with WSJ format trees, one per line. source_token_vocab: GenericVocabulary object for source vocabulary. target_token_vocab: GenericVocabulary object for target vocabulary. eos: integer to append at the end of each sequence (default: None). Yields: A dictionary {"inputs": source-line, "targets": target-line} where the lines are integer lists converted from tokens in the file lines. def token_generator(tree_path, source_token_vocab, target_token_vocab, eos=None): """Generator for parsing as a sequence-to-sequence task that uses tokens. This generator assumes the files at source_path and target_path have the same number of lines and yields dictionaries of "inputs" and "targets" where inputs and targets are token ids from source and target lines converted to integers using the token_map. Args: tree_path: path to the file with WSJ format trees, one per line. source_token_vocab: GenericVocabulary object for source vocabulary. target_token_vocab: GenericVocabulary object for target vocabulary. eos: integer to append at the end of each sequence (default: None). Yields: A dictionary {"inputs": source-line, "targets": target-line} where the lines are integer lists converted from tokens in the file lines. """ eos_list = [] if eos is None else [eos] with tf.gfile.GFile(tree_path, mode="r") as tree_file: tree_line = tree_file.readline() while tree_line: source, target = words_and_tags_from_wsj_tree(tree_line) source_ints = source_token_vocab.encode(source.strip()) + eos_list target_ints = target_token_vocab.encode(target.strip()) + eos_list yield {"inputs": source_ints, "targets": target_ints} tree_line = tree_file.readline()
Generator for parsing as a sequence-to-sequence task that uses tokens. This generator assumes the files parsing_{train,dev}.trees, which contain trees in WSJ format. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. train: whether we're training or not. source_vocab_size: source vocab size. target_vocab_size: target vocab size. Returns: A generator to a dictionary of inputs and outputs. def parsing_token_generator(data_dir, tmp_dir, train, source_vocab_size, target_vocab_size): """Generator for parsing as a sequence-to-sequence task that uses tokens. This generator assumes the files parsing_{train,dev}.trees, which contain trees in WSJ format. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. train: whether we're training or not. source_vocab_size: source vocab size. target_vocab_size: target vocab size. Returns: A generator to a dictionary of inputs and outputs. """ # TODO(lukaszkaiser): Correct these calls to generate vocabularies. No data # sources are being passed. del (data_dir, tmp_dir, train, source_vocab_size, target_vocab_size) assert False, "Vocabulary generation not implemented"
Aggregate stats in per-shard stats files. def aggregate_stats(stats_files): """Aggregate stats in per-shard stats files.""" all_stats = {} for fname in stats_files: with tf.gfile.Open(fname) as f: stats = json.loads(f.read()) for k, v in stats.iteritems(): if k not in all_stats: if isinstance(v, list): all_stats[k] = [] else: all_stats[k] = 0 if isinstance(v, list): all_stats[k].extend(v) else: all_stats[k] += v stats = all_stats ref_coverage = float(stats["total_found_refs"]) / stats["total_original_refs"] len_bounds = [0, 2, 10, 100, 1000, 5000, 10000, 20000, 50000, 100000, 1000000] len_counts, len_bounds = np.histogram(stats["ref_lengths"], len_bounds) len_dist = len_counts.astype(np.float32) / len_counts.sum() wiki_coverage = (float(stats["num_wikis_written"]) / stats["total_original_wikis"]) wikis_skipped_no_ref = (float(stats["wikis_skipped_no_refs"]) / stats["total_original_wikis"]) wikis_skipped_no_lead = (float(stats["wikis_skipped_short_lead"]) / stats["total_original_wikis"]) wiki_ref_coverage = [ float(found) / orig for found, orig in zip(stats["wiki_found_refs"], stats["wiki_original_refs"]) if found ] coverage_bounds = np.arange(21).astype(np.float32) / 20 coverage_counts, coverage_bounds = np.histogram(wiki_ref_coverage, coverage_bounds) coverage_dist = coverage_counts.astype(np.float32) / coverage_counts.sum() agg_stats = dict( total_original_wikis=stats["total_original_wikis"], total_original_refs=stats["total_original_refs"], wiki_coverage=wiki_coverage, wikis_skipped_no_ref=wikis_skipped_no_ref, wikis_skipped_no_lead=wikis_skipped_no_lead, overall_ref_coverage=ref_coverage, per_wiki_ref_coverage_dist=list((coverage_dist * 100).astype(int)), per_wiki_ref_coverage_bounds=list((coverage_bounds * 100).astype(int)), ref_len_dist=list((len_dist * 100).astype(int)), ref_len_bounds=list(len_bounds), ) return agg_stats
Map filename to the task id that created it assuming 1k tasks. def filename_to_task_id(fname): """Map filename to the task id that created it assuming 1k tasks.""" # This matches the order and size in WikisumBase.out_filepaths fname = os.path.basename(fname) shard_id_increment = { "train": 0, "dev": 800, "test": 900, } parts = fname.split("-") split = parts[1] shard_id = parts[2] task_id = int(shard_id) + shard_id_increment[split] return task_id
Validate presence and minimum size of files. def validate_data_files(problem, data_files, min_size): """Validate presence and minimum size of files.""" # Check that all files are present data_dir = os.path.split(data_files[0])[0] out_filepaths = problem.out_filepaths(data_dir) missing_filepaths = set(out_filepaths) - set(data_files) if missing_filepaths: tf.logging.error("Missing %d data files", len(missing_filepaths)) # Check that each file is at least 100M too_small = [] for data_file in data_files: length = get_length(data_file) if length < min_size: too_small.append(data_file) if too_small: tf.logging.error("%d files too small", len(too_small)) bad_files = too_small + list(missing_filepaths) return bad_files
Set of hyperparameters. def distill_resnet_32_to_15_cifar20x5(): """Set of hyperparameters.""" hparams = distill_base() hparams.teacher_model = "resnet" hparams.teacher_hparams = "resnet_cifar_32" hparams.student_model = "resnet" hparams.student_hparams = "resnet_cifar_15" hparams.optimizer_momentum_nesterov = True # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) hparams.teacher_learning_rate = 0.25 * 128. * 8. / 256. hparams.student_learning_rate = 0.2 * 128. * 8. / 256. hparams.learning_rate_decay_scheme = "piecewise" hparams.add_hparam("learning_rate_boundaries", [40000, 60000, 80000]) hparams.add_hparam("learning_rate_multiples", [0.1, 0.01, 0.001]) hparams.task_balance = 0.28 hparams.distill_temperature = 2.0 hparams.num_classes = 20 return hparams
Downloading and preparing the dataset. Args: tmp_dir: tem directory data_dir: data directory vocab_size: size of vocabulary vocab_filename: name of vocab file def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename): """Downloading and preparing the dataset. Args: tmp_dir: tem directory data_dir: data directory vocab_size: size of vocabulary vocab_filename: name of vocab file """ if not tf.gfile.Exists(data_dir): tf.gfile.MakeDirs(data_dir) file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL) tar_all = tarfile.open(file_path) tar_all.extractall(tmp_dir) tar_all.close() tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar")) tar_train.extractall(tmp_dir) tar_train.close() vocab_path = os.path.join(data_dir, vocab_filename) if not tf.gfile.Exists(vocab_path): with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile: reader = csv.reader(infile, delimiter="\t") words = [row[0] for row in reader] words = [_UNK] + words[:vocab_size] with tf.gfile.GFile(vocab_path, "w") as outfile: outfile.write("\n".join(words))
Gives the file paths with regards to the given split. Args: tmp_dir: temp directory split: dataset split use_control_set: uses control dataset if true. Returns: list of file paths. def get_dataset_split(tmp_dir, split, use_control_set): """Gives the file paths with regards to the given split. Args: tmp_dir: temp directory split: dataset split use_control_set: uses control dataset if true. Returns: list of file paths. """ if not use_control_set: dataset_split = { problem.DatasetSplit.TRAIN: [ f for f in tf.gfile.Glob( os.path.join(tmp_dir, "train-novels/*/*.txt")) ], problem.DatasetSplit.EVAL: [ os.path.join(tmp_dir, "lambada_development_plain_text.txt") ], problem.DatasetSplit.TEST: [ os.path.join(tmp_dir, "lambada_test_plain_text.txt") ] } else: dataset_split = { problem.DatasetSplit.TRAIN: [ f for f in tf.gfile.Glob( os.path.join(tmp_dir, "train-novels/*/*.txt")) ], problem.DatasetSplit.EVAL: [ os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt") ], } return dataset_split[split]
Determine the minimum sequence length given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The minimum length that a sequence can be for this dataset_split. def min_sequence_length(self, dataset_split): """Determine the minimum sequence length given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The minimum length that a sequence can be for this dataset_split. """ return { problem.DatasetSplit.TRAIN: 8, problem.DatasetSplit.EVAL: 65, problem.DatasetSplit.TEST: 65 }[dataset_split]
Determine the maximum sequence length given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The maximum length that a sequence can be for this dataset_split. def max_sequence_length(self, dataset_split): """Determine the maximum sequence length given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The maximum length that a sequence can be for this dataset_split. """ return { problem.DatasetSplit.TRAIN: 64, problem.DatasetSplit.EVAL: 128, problem.DatasetSplit.TEST: 128 }[dataset_split]
Determine the dataset sized given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The desired number of samples for this dataset_split. def num_samples(self, dataset_split): """Determine the dataset sized given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The desired number of samples for this dataset_split. """ return { problem.DatasetSplit.TRAIN: 1000000, problem.DatasetSplit.EVAL: 10000, problem.DatasetSplit.TEST: 10000 }[dataset_split]
Yields successive checkpoints from model_dir. Args: model_dir: The directory in which checkpoints are saved. timeout_mins: The maximum amount of time in minutes to wait between checkpoints. Set this to -1 to wait indefinitely. Yields: last_ckpt: a new checkpoint path, or None if the timeout was reached. def next_checkpoint(model_dir, timeout_mins=240): """Yields successive checkpoints from model_dir. Args: model_dir: The directory in which checkpoints are saved. timeout_mins: The maximum amount of time in minutes to wait between checkpoints. Set this to -1 to wait indefinitely. Yields: last_ckpt: a new checkpoint path, or None if the timeout was reached. """ last_ckpt = None timeout_secs = None if timeout_mins != -1: timeout_secs = timeout_mins * 60 while True: last_ckpt = tf.contrib.training.wait_for_new_checkpoint( model_dir, last_ckpt, seconds_to_sleep=60, timeout=timeout_secs) if last_ckpt is None: tf.logging.info( "Eval timeout: no new checkpoints within %dm" % timeout_mins) break yield last_ckpt
Yields successive checkpoints from model_dir. def next_undecoded_checkpoint(model_dir, timeout_mins=240): """Yields successive checkpoints from model_dir.""" last_ckpt = None last_step = 0 while True: # Get the latest checkpoint. last_ckpt = tf.contrib.training.wait_for_new_checkpoint( model_dir, last_ckpt, seconds_to_sleep=60, timeout=60 * timeout_mins) # Get all the checkpoint from the model dir. ckpt_path = tf.train.get_checkpoint_state(model_dir) all_model_checkpoint_paths = ckpt_path.all_model_checkpoint_paths ckpt_step = np.inf next_ckpt = None # Find the next checkpoint to eval based on last_step. for ckpt in all_model_checkpoint_paths: step = int(os.path.basename(ckpt).split("-")[1]) if step > last_step and step < ckpt_step: ckpt_step = step next_ckpt = ckpt # If all the checkpoints have been evaluated. if last_ckpt is None and next_ckpt is None: tf.logging.info( "Eval timeout: no new checkpoints within %dm" % timeout_mins) break if next_ckpt is not None: last_step = ckpt_step last_ckpt = next_ckpt yield last_ckpt
The TensorFlow Session config to use. def create_session_config(log_device_placement=False, enable_graph_rewriter=False, gpu_mem_fraction=0.95, use_tpu=False, xla_jit_level=tf.OptimizerOptions.OFF, inter_op_parallelism_threads=0, intra_op_parallelism_threads=0): """The TensorFlow Session config to use.""" if use_tpu: graph_options = tf.GraphOptions() else: if enable_graph_rewriter: rewrite_options = rewriter_config_pb2.RewriterConfig() rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON graph_options = tf.GraphOptions(rewrite_options=rewrite_options) else: graph_options = tf.GraphOptions( optimizer_options=tf.OptimizerOptions( opt_level=tf.OptimizerOptions.L1, do_function_inlining=False, global_jit_level=xla_jit_level)) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction) config = tf.ConfigProto( allow_soft_placement=True, graph_options=graph_options, gpu_options=gpu_options, log_device_placement=log_device_placement, inter_op_parallelism_threads=inter_op_parallelism_threads, intra_op_parallelism_threads=intra_op_parallelism_threads, isolate_session_state=True) return config
Create RunConfig, TPUConfig, and Parallelism object. def create_run_config(model_name, master="", model_dir=None, iterations_per_loop=1000, num_shards=8, log_device_placement=False, save_checkpoints_steps=1000, save_checkpoints_secs=None, keep_checkpoint_max=20, keep_checkpoint_every_n_hours=10000, num_gpus=1, gpu_order="", num_async_replicas=1, enable_graph_rewriter=False, gpu_mem_fraction=0.95, no_data_parallelism=False, optionally_use_dist_strat=False, daisy_chain_variables=True, schedule="continuous_train_and_eval", worker_job="/job:localhost", worker_id=0, ps_replicas=0, ps_job="/job:ps", ps_gpu=0, random_seed=None, sync=False, tpu_infeed_sleep_secs=None, use_tpu=False, use_tpu_estimator=False, xla_jit_level=tf.OptimizerOptions.OFF, inter_op_parallelism_threads=0, log_step_count_steps=100, intra_op_parallelism_threads=0, tpu_config_extra_kwargs=None, cloud_tpu_name=""): """Create RunConfig, TPUConfig, and Parallelism object.""" session_config = create_session_config( log_device_placement=log_device_placement, enable_graph_rewriter=enable_graph_rewriter, gpu_mem_fraction=gpu_mem_fraction, use_tpu=use_tpu, xla_jit_level=xla_jit_level, inter_op_parallelism_threads=inter_op_parallelism_threads, intra_op_parallelism_threads=intra_op_parallelism_threads) run_config_args = { "master": master, "evaluation_master": master, "model_dir": model_dir, "session_config": session_config, "save_summary_steps": 100, "save_checkpoints_steps": save_checkpoints_steps, "save_checkpoints_secs": save_checkpoints_secs, "keep_checkpoint_max": keep_checkpoint_max, "keep_checkpoint_every_n_hours": keep_checkpoint_every_n_hours, "tf_random_seed": random_seed, "log_step_count_steps": log_step_count_steps } if save_checkpoints_secs: del run_config_args["save_checkpoints_steps"] run_config_cls = tf.contrib.learn.RunConfig if use_tpu or use_tpu_estimator: # If using TPUEstimator, use TPU RunConfig, add TPUConfig, and add # additional args. tpu_config_kwargs = { "iterations_per_loop": iterations_per_loop, "num_shards": num_shards, "per_host_input_for_training": True, "initial_infeed_sleep_secs": tpu_infeed_sleep_secs, } if tpu_config_extra_kwargs is not None: tpu_config_kwargs.update(tpu_config_extra_kwargs) run_config_cls = tf.contrib.tpu.RunConfig tpu_config = tf.contrib.tpu.TPUConfig( **tpu_config_kwargs) run_config_args["tpu_config"] = tpu_config if not master and "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS" in os.environ: # If running on TPU but no master is set and the KUBE env var is present # then we're running on ML Engine. Set the master. run_config_args["master"] = os.environ[ "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS"] run_config_args["evaluation_master"] = run_config_args["master"] elif not master and cloud_tpu_name: # Update run_config to use cluster instead of master/evaluation_master # as we need the cluster spec to use Cloud Pods tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( cloud_tpu_name) run_config_args["cluster"] = tpu_cluster_resolver del run_config_args["master"] del run_config_args["evaluation_master"] elif is_cloud_async_distributed(): run_config_cls = tf.estimator.RunConfig del run_config_args["master"] del run_config_args["evaluation_master"] config = run_config_cls(**run_config_args) # If not using TPU, add device info for data_parallelism config.use_tpu = use_tpu if not use_tpu: config.t2t_device_info = { "num_async_replicas": num_async_replicas, } use_distribution_strategy = ( optionally_use_dist_strat and t2t_model.T2TModel.has_symmetric_shards(model_name) and not no_data_parallelism and ps_replicas == 0 and ps_gpu == 0 and num_async_replicas == 1) if use_distribution_strategy: tf.logging.info( "Configuring MirroredStrategy DistributionStrategy to replicate the " "model." ) distribution = tf.contrib.distribute.MirroredStrategy() config = config.replace(train_distribute=distribution) config.data_parallelism = None else: tf.logging.info("Configuring DataParallelism to replicate the model.") config.data_parallelism = devices.data_parallelism( daisy_chain_variables=daisy_chain_variables, ps_replicas=ps_replicas, ps_job=ps_job, ps_gpu=ps_gpu, schedule=schedule, sync=sync, worker_gpu=num_gpus, worker_replicas=num_async_replicas, worker_id=worker_id, gpu_order=gpu_order, worker_job=worker_job, no_data_parallelism=no_data_parallelism) return config
Create a T2T Estimator. def create_estimator(model_name, hparams, run_config, schedule="train_and_evaluate", decode_hparams=None, use_tpu=False, use_tpu_estimator=False, use_xla=False): """Create a T2T Estimator.""" model_fn = t2t_model.T2TModel.make_estimator_model_fn( model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu) del use_xla if use_tpu or use_tpu_estimator: problem = hparams.problem batch_size = ( problem.tpu_batch_size_per_shard(hparams) * run_config.tpu_config.num_shards) mlperf_log.transformer_print( key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size) if getattr(hparams, "mtf_mode", False): batch_size = problem.tpu_batch_size_per_shard(hparams) predict_batch_size = batch_size if decode_hparams and decode_hparams.batch_size: predict_batch_size = decode_hparams.batch_size if decode_hparams and run_config.tpu_config: decode_hparams.add_hparam("iterations_per_loop", run_config.tpu_config.iterations_per_loop) estimator = tf.contrib.tpu.TPUEstimator( model_fn=model_fn, model_dir=run_config.model_dir, config=run_config, use_tpu=use_tpu, train_batch_size=batch_size, eval_batch_size=batch_size if "eval" in schedule else None, predict_batch_size=predict_batch_size, experimental_export_device_assignment=True) else: estimator = tf.estimator.Estimator( model_fn=model_fn, model_dir=run_config.model_dir, config=run_config, ) return estimator
Create train and eval hooks for Experiment. def create_hooks(use_tfdbg=False, use_dbgprofile=False, dbgprofile_kwargs=None, use_validation_monitor=False, validation_monitor_kwargs=None, use_early_stopping=False, early_stopping_kwargs=None): """Create train and eval hooks for Experiment.""" train_hooks = [] eval_hooks = [] if use_tfdbg: hook = debug.LocalCLIDebugHook() train_hooks.append(hook) eval_hooks.append(hook) if use_dbgprofile: # Recorded traces can be visualized with chrome://tracing/ # The memory/tensor lifetime is also profiled tf.logging.info("Using ProfilerHook") defaults = dict(save_steps=10, show_dataflow=True, show_memory=True) defaults.update(dbgprofile_kwargs) train_hooks.append(tf.train.ProfilerHook(**defaults)) if use_validation_monitor: tf.logging.info("Using ValidationMonitor") train_hooks.append( tf.contrib.learn.monitors.ValidationMonitor( hooks=eval_hooks, **validation_monitor_kwargs)) if use_early_stopping: tf.logging.info("Using EarlyStoppingHook") hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs) # Adding to both training and eval so that eval aborts as well train_hooks.append(hook) eval_hooks.append(hook) return train_hooks, eval_hooks
Create Experiment. def create_experiment( run_config, hparams, model_name, problem_name, data_dir, train_steps, eval_steps, min_eval_frequency=2000, eval_throttle_seconds=600, schedule="train_and_evaluate", export=False, decode_hparams=None, use_tfdbg=False, use_dbgprofile=False, eval_early_stopping_steps=None, eval_early_stopping_metric=None, eval_early_stopping_metric_delta=None, eval_early_stopping_metric_minimize=True, eval_timeout_mins=240, eval_use_test_set=False, use_tpu=False, use_tpu_estimator=False, use_xla=False, additional_train_hooks=None, additional_eval_hooks=None, warm_start_from=None, decode_from_file="", decode_to_file="", decode_reference="", std_server_protocol=None): """Create Experiment.""" # HParams hparams.add_hparam("model_dir", run_config.model_dir) hparams.add_hparam("data_dir", data_dir) hparams.add_hparam("train_steps", train_steps) hparams.add_hparam("eval_steps", eval_steps) hparams.add_hparam("schedule", schedule) hparams.add_hparam("warm_start_from", warm_start_from) hparams.add_hparam("std_server_protocol", std_server_protocol) hparams.add_hparam("eval_freq_in_steps", min_eval_frequency) hparams.add_hparam("eval_timeout_mins", eval_timeout_mins) if decode_hparams is not None: decode_hparams.add_hparam("decode_from_file", decode_from_file) if decode_to_file and not decode_hparams.decode_to_file: decode_hparams.decode_to_file = decode_to_file if decode_reference and not decode_hparams.decode_reference: decode_hparams.decode_reference = decode_reference add_problem_hparams(hparams, problem_name) # Estimator estimator = create_estimator( model_name, hparams, run_config, schedule=schedule, decode_hparams=decode_hparams, use_tpu=use_tpu, use_tpu_estimator=use_tpu_estimator, use_xla=use_xla) # Input fns from Problem problem = hparams.problem train_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.TRAIN, hparams) dataset_split = "test" if eval_use_test_set else None dataset_kwargs = {"dataset_split": dataset_split} eval_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL, hparams, dataset_kwargs=dataset_kwargs) # Export exporter = None if export: def compare_fn(best_eval_result, current_eval_result): metric = eval_early_stopping_metric or "loss" return current_eval_result[metric] < best_eval_result[metric] def serving_input_receiver_fn(hparams, decode_hparams, use_tpu): return problem.serving_input_fn(hparams, decode_hparams, use_tpu) exporter = tf.estimator.BestExporter( name="best", serving_input_receiver_fn=serving_input_receiver_fn, compare_fn=compare_fn, assets_extra=problem.export_assets) # Hooks validation_monitor_kwargs = dict( input_fn=eval_input_fn, eval_steps=eval_steps, every_n_steps=min_eval_frequency, early_stopping_rounds=eval_early_stopping_steps, early_stopping_metric=eval_early_stopping_metric, early_stopping_metric_minimize=eval_early_stopping_metric_minimize) dbgprofile_kwargs = {"output_dir": run_config.model_dir} early_stopping_kwargs = dict( events_dir=os.path.join(run_config.model_dir, "eval_continuous"), tag=eval_early_stopping_metric, num_plateau_steps=eval_early_stopping_steps, plateau_decrease=eval_early_stopping_metric_minimize, plateau_delta=eval_early_stopping_metric_delta, every_n_steps=min_eval_frequency) # Eval on TPU Pods is not supported yet if use_tpu and run_config.tpu_config.num_shards > 8 and "eval" in schedule: raise ValueError("Eval is not currently supported on a TPU Pod") # In-process eval (and possible early stopping) if schedule == "continuous_train_and_eval" and min_eval_frequency: tf.logging.warn("ValidationMonitor only works with " "--schedule=train_and_evaluate") use_validation_monitor = ( schedule == "train_and_evaluate" and min_eval_frequency) # Distributed early stopping local_schedules = ["train_and_evaluate", "continuous_train_and_eval"] use_early_stopping = ( schedule not in local_schedules and eval_early_stopping_steps) train_hooks, eval_hooks = create_hooks( use_tfdbg=use_tfdbg, use_dbgprofile=use_dbgprofile, dbgprofile_kwargs=dbgprofile_kwargs, use_validation_monitor=use_validation_monitor, validation_monitor_kwargs=validation_monitor_kwargs, use_early_stopping=use_early_stopping, early_stopping_kwargs=early_stopping_kwargs) hook_context = HookContext( estimator=estimator, problem=problem, hparams=hparams) train_hooks += t2t_model.T2TModel.get_train_hooks(model_name, hook_context) eval_hooks += t2t_model.T2TModel.get_eval_hooks(model_name, hook_context) if additional_train_hooks: train_hooks += additional_train_hooks if additional_eval_hooks: eval_hooks += additional_eval_hooks train_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks( train_hooks, estimator) eval_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks( eval_hooks, estimator) train_spec = tf.estimator.TrainSpec( train_input_fn, max_steps=train_steps, hooks=train_hooks) eval_spec = tf.estimator.EvalSpec( eval_input_fn, steps=eval_steps, hooks=eval_hooks, start_delay_secs=0 if hparams.schedule == "evaluate" else 120, throttle_secs=eval_throttle_seconds, exporters=exporter) return T2TExperiment(estimator, hparams, train_spec, eval_spec, use_validation_monitor, decode_hparams)
Wrapper for canonical experiment_fn. See create_experiment. def create_experiment_fn(*args, **kwargs): """Wrapper for canonical experiment_fn. See create_experiment.""" def experiment_fn(run_config, hparams): return create_experiment(run_config, hparams, *args, **kwargs) return experiment_fn
Restore from a checkpoint. def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False): """Restore from a checkpoint.""" ckpt = tf.train.get_checkpoint_state(ckpt_dir) if must_restore and not ckpt: raise ValueError("No checkpoint found in %s" % ckpt_dir) if not ckpt: return 0 path = ckpt.model_checkpoint_path tf.logging.info("Restoring checkpoint %s", path) saver.restore(sess, path) step = int(path.split("-")[-1]) return step
Does eval and decode after training every eval_freq_in_steps. def train_eval_and_decode(self): """Does eval and decode after training every eval_freq_in_steps.""" eval_steps = self._hparams.eval_freq_in_steps packed_dataset = "_packed" in self._hparams.problem.name mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP) for i in range(0, self._train_spec.max_steps, eval_steps): mlperf_log.transformer_print( key=mlperf_log.TRAIN_EPOCH, value=i // eval_steps) if packed_dataset and i > 0: problem = registry.problem(self._hparams.problem.name + "_packed") p_hparams = problem.get_hparams(self._hparams) self._hparams.problem = problem self._hparams.problem_hparams = p_hparams self._estimator.train( self._train_spec.input_fn, steps=eval_steps, hooks=self._train_spec.hooks) self._set_eval_dir_name("eval") self._estimator.evaluate( self._eval_spec.input_fn, steps=self._eval_spec.steps, hooks=self._eval_spec.hooks, name="eval") if packed_dataset: problem = registry.problem( self._hparams.problem.name.replace("_packed", "")) p_hparams = problem.get_hparams(self._hparams) self._hparams.problem = problem self._hparams.problem_hparams = p_hparams mlperf_log.transformer_print(key=mlperf_log.EVAL_START) if self._hparams.mlperf_mode: self._decode_hparams.mlperf_decode_step = i + eval_steps self.decode(dataset_split=tf.estimator.ModeKeys.EVAL) d_hparams = self._decode_hparams if self._hparams.mlperf_mode and d_hparams.mlperf_success: mlperf_log.transformer_print( key=mlperf_log.RUN_STOP, value={"success": "true"}) break d_hparams = self._decode_hparams if self._hparams.mlperf_mode and not d_hparams.mlperf_success: mlperf_log.transformer_print( key=mlperf_log.RUN_STOP, value={"success": "false"})
Evaluate until checkpoints stop being produced. def continuous_eval(self): """Evaluate until checkpoints stop being produced.""" for ckpt_path in next_checkpoint(self._hparams.model_dir, self._hparams.eval_timeout_mins): # Skip zero'th step. train_step = decoding.get_step_from_ckpt_path(ckpt_path) if train_step == 0: tf.logging.info("Skipping evaluation at step 0") continue self.evaluate()
Evaluate on train data until checkpoints stop being produced. def continuous_eval_on_train_data(self): """Evaluate on train data until checkpoints stop being produced.""" for ckpt_path in next_checkpoint(self._hparams.model_dir, self._hparams.eval_timeout_mins): # Skip zero'th step. train_step = decoding.get_step_from_ckpt_path(ckpt_path) if train_step == 0: tf.logging.info("Skipping evaluation at step 0") continue self.evaluate_on_train_data()
Starts a TensorFlow server and joins the serving thread. Typically used for parameter servers. Raises: ValueError: if not enough information is available in the estimator's config to create a server. def run_std_server(self): """Starts a TensorFlow server and joins the serving thread. Typically used for parameter servers. Raises: ValueError: if not enough information is available in the estimator's config to create a server. """ config = tf.estimator.RunConfig() server = tf.train.Server( config.cluster_spec, job_name=config.task_type, task_index=config.task_id, protocol=config.protocol) server.join()
Decodes from dataset or file. def decode(self, dataset_split=None, decode_from_file=False, checkpoint_path=None): """Decodes from dataset or file.""" if decode_from_file: decoding.decode_from_file(self._estimator, self._decode_hparams.decode_from_file, self._hparams, self._decode_hparams, self._decode_hparams.decode_to_file) else: decoding.decode_from_dataset( self._estimator, self._hparams.problem.name, self._hparams, self._decode_hparams, dataset_split=dataset_split, checkpoint_path=checkpoint_path)