text
stringlengths
81
112k
Local and global 1d self attention. def local_global_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Local and global 1d self attention.""" with tf.variable_scope("self_local_global_att"): [x_global, x_local] = tf.split(x, 2, axis=-1) split_hidden_size = int(hparams.hidden_size / 2) split_heads = int(hparams.num_heads / 2) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) y_global = common_attention.multihead_attention( x_global, None, self_attention_bias, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="global_self_att") y_local = common_attention.multihead_attention( x_local, None, None, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, attention_type="local_masked", block_length=hparams.block_length, block_width=hparams.block_width, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="local_self_att") y = tf.concat([y_global, y_local], axis=-1) return y
Full self-attention layer. def full_self_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Full self-attention layer.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) with tf.variable_scope("self_att"): y = common_attention.multihead_attention( x, None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="self_att") if is_4d: y = tf.reshape(y, [x_shape[0], x_shape[1], x_shape[2], x_shape[3]]) y.set_shape([None, None, None, hparams.hidden_size]) return y
Local 1d self attention. def encdec_attention_1d(x, encoder_output, encoder_decoder_attention_bias, hparams): """Local 1d self attention.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) encoder_output, _, _ = maybe_reshape_4d_to_3d(encoder_output) with tf.variable_scope("encdec_attention"): # Encoder Decoder attention y = common_attention.multihead_attention( x, encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="encdec_attention") if is_4d: y = tf.reshape(y, x_shape) y.set_shape([None, None, None, hparams.hidden_size]) return y
Multi layer transformer. def transformer_decoder_layers(inputs, encoder_output, num_layers, hparams, self_attention_bias=None, encoder_decoder_attention_bias=None, attention_type=AttentionType.LOCAL_2D, losses=None, name="transformer"): """Multi layer transformer.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) if attention_type == AttentionType.DILATED: assert len(hparams.gap_sizes) == num_layers for layer in range(num_layers): with tf.variable_scope("%s_layer_%d" % (name, layer)): # self-attention + skip connections if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="masked_local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.RELATIVE_LOCAL_1D: y = local_attention_1d( common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_relative_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.NON_CAUSAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding="VALID", kv_padding="VALID") elif attention_type == AttentionType.LOCAL_BLOCK: y = local_within_block_attention( common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.GLOCAL: y = local_global_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.DILATED: y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, q_padding="LEFT", kv_padding="LEFT", gap_size=hparams.gap_sizes[layer]) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") x = common_layers.layer_postprocess(x, y, hparams) # enc-dec attention + skip connections if encoder_output is not None: y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layers + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, losses=losses) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
Multi layer transformer encoder. def transformer_encoder_layers(inputs, num_layers, hparams, attention_type=AttentionType.GLOBAL, self_attention_bias=None, q_padding="VALID", kv_padding="VALID", name="transformer"): """Multi layer transformer encoder.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) for layer in range(num_layers): # attention layers + skip connections with tf.variable_scope("%s_layer_%d" % (name, layer)): if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding=q_padding, kv_padding=kv_padding) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding=q_padding, kv_padding=kv_padding) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layer + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
ffn layer transformer. def ffn_layer(x, hparams, losses=None): """ffn layer transformer.""" with tf.variable_scope("ffn"): if hparams.ffn_layer == "none": return x if hparams.ffn_layer == "conv_hidden_relu": y = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) elif hparams.ffn_layer == "normed_conv_hidden_relu": y = common_layers.normed_conv_hidden_relu( x, hparams.norm_type, hparams.layer_norm_epsilon, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, norm_name="convnorm") elif hparams.ffn_layer == "self_attention_ffn": x_shape = tf.shape(x) x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size]) y = common_attention.ffn_self_attention_layer( x, hparams.filter_size, hparams.hidden_size, hparams.num_parts, hparams.attention_dropout, hparams.share_kv) y = tf.reshape(y, x_shape) elif hparams.ffn_layer == "local_moe_tpu": overhead = (hparams.moe_overhead_train if hparams.mode == tf.estimator.ModeKeys.TRAIN else hparams.moe_overhead_eval) x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) y, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) if is_4d: y = tf.reshape(y, x_shape) if losses is None: raise ValueError( "transformer_ffn_layer with type local_moe_tpu must pass in " "a losses list") losses.append(loss) else: assert hparams.ffn_layer == "glu_ffn" y = common_layers.gated_linear_unit_layer(x) return y
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] def get_self_attention_bias(x): """Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] """ x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle( x_shape[1]) return self_attention_bias
Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth]. def postprocess_image(x, rows, cols, hparams): """Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth]. """ batch = common_layers.shape_list(x)[0] x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size]) likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if likelihood == DistributionType.DMOL: depth = hparams.num_mixtures * 10 targets = tf.layers.dense(x, depth, use_bias=False, activation=None, name="output_conv") else: depth = 256 targets = tf.layers.dense(x, depth, use_bias=True, activation=None, name="output_conv") if (hparams.mode == tf.estimator.ModeKeys.PREDICT and hparams.block_raster_scan): y = targets yshape = common_layers.shape_list(y) block_length = hparams.query_shape[0] block_width = hparams.query_shape[1] # Break into block row wise. y = tf.reshape(y, [batch, yshape[1] // block_length, block_length, yshape[2], depth]) yshape = common_layers.shape_list(y) # Break into blocks width wise. y_blocks = tf.reshape(y, [batch, yshape[1], yshape[2], yshape[3] // block_width, block_width, depth]) # Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length, # block_width, depth]. targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5]) return targets
Prepare encoder for images. def prepare_encoder(inputs, hparams, attention_type="local_1d"): """Prepare encoder for images.""" x = prepare_image(inputs, hparams, name="enc_channels") # Add position signals. x = add_pos_signals(x, hparams, "enc_pos") x_shape = common_layers.shape_list(x) if attention_type == "local_1d": x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x.set_shape([None, None, hparams.hidden_size]) elif attention_type == "local_2d": x.set_shape([None, None, None, hparams.hidden_size]) return x
Prepare decoder for images. def prepare_decoder(targets, hparams): """Prepare decoder for images.""" targets_shape = common_layers.shape_list(targets) channels = hparams.num_channels curr_infer_length = None # during training, images are [batch, IMG_LEN, IMG_LEN, 3]. # At inference, they are [batch, curr_infer_length, 1, 1] if hparams.mode == tf.estimator.ModeKeys.PREDICT: curr_infer_length = targets_shape[1] if hparams.block_raster_scan: assert hparams.img_len*channels % hparams.query_shape[1] == 0 assert hparams.img_len % hparams.query_shape[0] == 0 total_block_width = hparams.img_len*channels # Decoding is in block raster scan order. We divide the image into # hparams.query_shape blocks and then decode each block in raster scan. # To make that compatible with our inference pipeline, pad the target so # that rows is a multiple of query_shape and columns is a multiple of # hparams.img_len*channels curr_infer_length = targets_shape[1] block_padding_factor = total_block_width * hparams.query_shape[0] targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % block_padding_factor], [0, 0], [0, 0]]) num_blocks = total_block_width // hparams.query_shape[1] # Reshape the image to represent blocks target_blocks = tf.reshape( targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0], hparams.query_shape[1]]) # Transpose to read the image in 2D fashion. targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4]) else: # add padding to make sure the size of targets is a multiple of img_height # times number of channels. This is needed for positional encodings and # for doing the RGB lookup. padding_factor = channels * hparams.img_len targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]]) targets = tf.reshape(targets, [targets_shape[0], -1, hparams.img_len, channels]) # Preprocess image x = prepare_image(targets, hparams, name="dec_channels") x_shape = common_layers.shape_list(x) if (hparams.dec_attention_type == AttentionType.LOCAL_2D or hparams.dec_attention_type == AttentionType.LOCAL_BLOCK): x = common_attention.right_shift_blockwise(x, hparams.query_shape) x = add_pos_signals(x, hparams, "dec_pos") else: # Add position signals x = tf.reshape(x, [targets_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x = common_layers.shift_right_3d(x) x = tf.reshape(x, [targets_shape[0], x_shape[1], x_shape[2], hparams.hidden_size]) x = add_pos_signals(x, hparams, "dec_pos") x = common_layers.cast_like(x, targets) return x, x_shape[1], x_shape[2]
Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5. def create_output(decoder_output, rows, cols, targets, hparams): """Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5. """ del targets # unused arg decoded_image = postprocess_image(decoder_output, rows, cols, hparams) batch = common_layers.shape_list(decoded_image)[0] depth = common_layers.shape_list(decoded_image)[-1] likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if hparams.mode == tf.estimator.ModeKeys.PREDICT: y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth]) output = y[:, :rows, :, :, :] elif likelihood == DistributionType.CAT: # Unpack the cols dimension of the Categorical. channels = hparams.num_channels output = tf.reshape(decoded_image, [batch, rows, cols // channels, channels, depth]) else: output = decoded_image return output
Get separate embedding for each of the channels. def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"): """Get separate embedding for each of the channels.""" targets_split = tf.split(targets, io_depth, axis=3) rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, [256 * io_depth, hidden_size]) rgb_embedding_var = tf.identity(rgb_embedding_var) rgb_embedding_var *= float(hidden_size)**0.5 channel_target_embs = [] for i in range(io_depth): # Adding the channel offsets to get the right embedding since the # embedding tensor has shape 256 * io_depth, hidden_size target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256 target_embs = common_layers.gather(rgb_embedding_var, target_ids) channel_target_embs.append(target_embs) return tf.concat(channel_target_embs, axis=-1)
Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope("environment/simulate"): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, "action") def step(action): step_response = self._batch_env.step(action) # Current env doesn't return `info`, but EnvProblem does. # TODO(afrozm): The proper way to do this is to make T2TGymEnv return # an empty info return value. if len(step_response) == 3: (observ, reward, done) = step_response else: (observ, reward, done, _) = step_response return (observ, reward.astype(np.float32), done) observ, reward, done = tf.py_func( step, [action], [self.observ_dtype, tf.float32, tf.bool], name="step") reward = tf.check_numerics(reward, "reward") reward.set_shape((len(self),)) done.set_shape((len(self),)) with tf.control_dependencies([self._observ.assign(observ)]): return tf.identity(reward), tf.identity(done)
Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. def _reset_non_empty(self, indices): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ observ = tf.py_func( self._batch_env.reset, [indices], self.observ_dtype, name="reset") observ.set_shape(indices.get_shape().concatenate(self.observ_shape)) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ)]): return tf.identity(observ)
Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean def include_revision(revision_num, skip_factor=1.1): """Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean """ if skip_factor <= 1.0: return True return (int(math.log1p(revision_num) / math.log(skip_factor)) != int( math.log(revision_num + 2.0) / math.log(skip_factor)))
Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings def file_page_generator(my_file, max_page_size=2**28): """Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings """ page_start = " <page>\n" page_end = " </page>\n" chunk_size = max_page_size page_start = " <page>\n" page_end = " </page>\n" leftovers = "" while True: chunk = my_file.read(chunk_size) if not chunk: break chunk = leftovers + chunk current_pos = 0 while True: start_pos = chunk.find(page_start, current_pos) if start_pos == -1: break end_pos = chunk.find(page_end, start_pos) if end_pos == -1: if len(chunk) - start_pos > max_page_size: leftovers = "" else: leftovers = chunk[start_pos:] break raw_page = chunk[start_pos + len(page_start):end_pos] if len(raw_page) < max_page_size: ret = parse_page(raw_page) if ret: yield ret current_pos = end_pos + len(page_end)
Extract the title from a page. Args: page: a string Returns: a string def get_title(page): """Extract the title from a page. Args: page: a string Returns: a string """ start_pos = page.find("<title>") end_pos = page.find("</title>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<title>") return text_encoder.to_unicode_utf8(page[start_pos:end_pos])
Extract the id from a page. Args: page: a string Returns: an integer def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
Extract the revisions of a page. Args: page: a string Returns: a list of strings def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error. def parse_page(raw_page): """Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error. """ ret = {"title": get_title(raw_page), "id": get_id(raw_page)} if ":" in ret["title"]: return None ret["revisions"] = get_revisions(raw_page) return ret
Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string def maybe_copy_file_to_directory(source_filepath, target_directory): """Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string """ if not tf.gfile.Exists(target_directory): tf.logging.info("Creating directory %s" % target_directory) os.mkdir(target_directory) target_filepath = os.path.join(target_directory, os.path.basename(source_filepath)) if not tf.gfile.Exists(target_filepath): tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) tf.gfile.Copy(source_filepath, target_filepath) statinfo = os.stat(target_filepath) tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, statinfo.st_size)) else: tf.logging.info("Not copying, file already found: %s" % target_filepath) return target_filepath
Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): """Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings """ for remote_filepath in corpus_files: filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir) tf.logging.info("Reading from " + filepath) command = ["7z", "x", "-so", filepath] tf.logging.info("Running command: %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1) for page in file_page_generator(p.stdout, 2**max_page_size_exp): yield page
Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string def get_text(revision, strip=True): """Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string """ # text start tag looks like "<text ..otherstuff>" start_pos = revision.find("<text") assert start_pos != -1 end_tag_pos = revision.find(">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(">") end_pos = revision.find("</text>") if end_pos == -1: ret = "" else: ret = revision[end_tag_pos:end_pos] if strip: ret = strip_text(ret) ret = text_encoder.to_unicode_utf8(ret) return ret
Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string def _remove_curly_braces(text): """Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string """ current_pos = 0 depth = 0 ret = "" for match in re.finditer("[{}]", text): if depth == 0: ret += text[current_pos:match.start()] depth += 1 if text[match.start()] == "{" else -1 current_pos = match.end() if depth != 0: # Many articles have mismatched braces, but it still seems better to remove # them than not. pass else: ret += text[current_pos:] return ret
Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string def _remove_double_brackets(text): """Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string """ def replacement_fn(s): if ":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s.find("|") if bar_pos == -1: return s return s[bar_pos + 1:] return _find_and_replace(text, "[[", "]]", replacement_fn)
Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string def _remove_boring_lines(text): """Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string """ lines = text.split("\n") filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)] return "\n".join(filtered)
Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True): """Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder """ num_pages_for_vocab_generation = approx_vocab_size // 3 vocab_file = vocab_filename(approx_vocab_size, strip) def my_generator(data_prefix): """Line generator for vocab.""" count = 0 for page in corpus_page_generator( all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp): revisions = page["revisions"] if revisions: text = get_text(revisions[-1], strip=strip) yield text count += 1 if count % 100 == 0: tf.logging.info("reading pages for vocab %d" % count) if count > num_pages_for_vocab_generation: break return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set. def get_encoder_from_vocab(vocab_filepath): """Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set. """ if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): """Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out """ thrown_out_count = 0 source_target_output = [] if not max_equal_to_diff_ratio: return source_target_input, thrown_out_count for src_tgt in source_target_input: opcodes = fast_match_sequences(*src_tgt) diff_char_count = 0 equal_char_count = 0 for tag, i1, i2, j1, j2 in opcodes: if tag == "diff": # max() prevents double-counting substitutions. diff_char_count += max(i2 - i1, j2 - j1) else: equal_char_count += i2 - i1 if diff_char_count <= max_equal_to_diff_ratio * equal_char_count: source_target_output.append(src_tgt) else: thrown_out_count += 1 return source_target_output, thrown_out_count
Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string def introduce_errors(s, corruption_rate=3e-3, infill_marker="|?|", max_infill_len=8): """Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string """ num_errors = 0 ret = [] operations = [ "delete", # delete a character "insert", # insert a random character from the input string "replace", # replace a character with a random character from # the input string "transpose", # transpose two adjacent characters ] if max_infill_len: operations.append("infill") pos = 0 while pos < len(s): if random.random() >= corruption_rate: ret.append(s[pos]) pos += 1 continue num_errors += 1 operation = operations[random.randint(0, len(operations) - 1)] if operation == "delete": pos += 1 elif operation == "insert": ret.append(s[random.randint(0, len(s) - 1)]) elif operation == "replace": ret.append(s[random.randint(0, len(s) - 1)]) pos += 1 elif operation == "transpose": ret.append(s[pos + 1] if pos + 1 < len(s) else "") ret.append(s[pos]) pos += 2 else: assert operation == "infill" ret.append(infill_marker) pos += random.randint(0, max_infill_len) return "".join(ret), num_errors
Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes. def fast_match_sequences(a, b, a_start=0, a_end=None, b_start=0, b_end=None, min_match_length=3, max_recursion_depth=128): """Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes. """ if a_end is None: a_end = len(a) if b_end is None: b_end = len(b) if a_start == a_end and b_start == b_end: return [] if a_start == a_end or b_start == b_end: return [("diff", a_start, a_end, b_start, b_end)] # Compute an index from value to first occurrence in the b segment. # Technically, we should index and explore all occurrences of a value, # but that might be much slower. b_index = {} for j in range(b_end - 1, b_start - 1, -1): b_index[b[j]] = j # we will look for the longest match we can find. max_match_length = 0 a_pos = a_start while a_pos < a_end: val = a[a_pos] b_pos = b_index.get(val) if b_pos is None: a_pos += 1 continue else: a_match_start = a_pos a_match_end = a_pos + 1 b_match_start = b_pos b_match_end = b_pos + 1 while (a_match_start > a_start and b_match_start > b_start and a[a_match_start - 1] == b[b_match_start - 1]): a_match_start -= 1 b_match_start -= 1 while (a_match_end < a_end and b_match_end < b_end and a[a_match_end] == b[b_match_end]): a_match_end += 1 b_match_end += 1 # Compute the length of the matching segment. We prefer the longest. match_length = a_match_end - a_match_start # Extra credit for matching at the beginning or end of the sequence. if a_match_start == 0 and b_match_start == 0: match_length += min_match_length if a_match_end == len(a) and b_match_end == len(b): match_length += min_match_length if match_length > max_match_length: max_match_length = match_length best_match = (a_match_start, a_match_end, b_match_start, b_match_end) # advance a_pos to the end of this match to avoid wasting time # rediscovering this match. a_pos = a_match_end if max_match_length < min_match_length or max_recursion_depth == 0: return [("diff", a_start, a_end, b_start, b_end)] a_match_start, a_match_end, b_match_start, b_match_end = best_match return (fast_match_sequences( a, b, a_start, a_match_start, b_start, b_match_start, min_match_length, max_recursion_depth - 1) + [ ("equal", a_match_start, a_match_end, b_match_start, b_match_end) ] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end, min_match_length, max_recursion_depth - 1))
Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. def begin(self): """Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. """ variables_to_restore = tf.contrib.framework.get_variables_to_restore( include=self._include, exclude=self._exclude) # remove new_model_scope from variable name prefix assignment_map = {variable.name[len(self._new_model_scope):]: variable for variable in variables_to_restore if variable.name.startswith(self._new_model_scope)} # remove :0 from variable name suffix assignment_map = {name.split(":")[0]: variable for name, variable in six.iteritems(assignment_map) if name.startswith(self._old_model_scope)} self._assignment_map = assignment_map tf.logging.info("restoring %d variables from checkpoint %s"%( len(assignment_map), self._checkpoint_path)) tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map)
Creates a TimeStep with both rewards and actions as optional. def create_time_step(cls, observation=None, done=False, raw_reward=None, processed_reward=None, action=None): """Creates a TimeStep with both rewards and actions as optional.""" return cls(observation, done, raw_reward, processed_reward, action)
Complete attention layer with preprocessing. def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None): """Complete attention layer with preprocessing.""" separabilities = [hparams.separability, hparams.separability] if hparams.separability < 0: separabilities = [hparams.separability - 1, hparams.separability] targets_timed = common_layers.subseparable_conv_block( common_layers.add_timing_signal(targets_shifted), hparams.hidden_size, [((1, 1), (5, 1)), ((4, 1), (5, 1))], normalizer_fn=norm_fn, padding="LEFT", separabilities=separabilities, name="targets_time") if hparams.attention_type == "transformer": targets_timed = tf.squeeze(targets_timed, 2) target_shape = tf.shape(targets_timed) targets_segment = tf.zeros([target_shape[0], target_shape[1]]) target_attention_bias = common_attention.attention_bias( targets_segment, targets_segment, lower_triangular=True) inputs_attention_bias = tf.zeros([ tf.shape(inputs_encoded)[0], hparams.num_heads, tf.shape(targets_segment)[1], tf.shape(inputs_encoded)[1] ]) qv = common_attention.multihead_attention( targets_timed, None, target_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="self_attention") qv = common_attention.multihead_attention( qv, inputs_encoded, inputs_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="encdec_attention") return tf.expand_dims(qv, 2) elif hparams.attention_type == "simple": targets_with_attention = common_layers.simple_attention( targets_timed, inputs_encoded, bias=bias) return norm_fn(targets_shifted + targets_with_attention, name="attn_norm")
A stack of separable convolution blocks with residual connections. def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None): """A stack of separable convolution blocks with residual connections.""" with tf.variable_scope(name): padding_bias = None if mask is not None: padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. if padding == "LEFT": # Do not mask anything when left-padding. mask = None if (hparams.kernel_scheme in _KERNEL_SCHEMES and hparams.dilation_scheme in _DILATION_SCHEMES): kernels = _KERNEL_SCHEMES[hparams.kernel_scheme] dilations = _DILATION_SCHEMES[hparams.dilation_scheme] dilations_and_kernels = list(zip(dilations, kernels)) dilations_and_kernels1 = dilations_and_kernels[:2] dilations_and_kernels2 = dilations_and_kernels[2:] else: k = (hparams.kernel_height, hparams.kernel_width) k2 = (hparams.large_kernel_size, 1) dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)] dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)] separabilities1 = [hparams.separability, hparams.separability] separabilities2 = [hparams.separability] * len(dilations_and_kernels2) if hparams.separability < 0: separabilities1 = [hparams.separability - 1, hparams.separability] separabilities2 = [ hparams.separability - i for i in reversed(range(len(dilations_and_kernels2))) ] def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm( x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) for layer in range(layers): with tf.variable_scope("layer_%d" % layer): y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels1, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities1, name="residual1") x += common_layers.subseparable_conv_block( x + y, hparams.hidden_size, dilations_and_kernels2, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities2, name="residual2") + y if source is not None and hparams.attention_type != "none": x += attention(x, source, norm_fn, hparams, bias=padding_bias) if mask is not None: x *= mask return tf.nn.dropout(x, 1.0 - hparams.dropout)
Experimental rank loss, thanks to kkurach@ for the code. def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Loss telling to be more similar to your own targets than to others. def similarity_cost(inputs_encoded, targets_encoded): """Loss telling to be more similar to your own targets than to others.""" # This is a first very simple version: handle variable-length by padding # to same length and putting everything into batch. In need of a better way. x, y = common_layers.pad_to_same_length(inputs_encoded, targets_encoded) depth = tf.shape(inputs_encoded)[3] x, y = tf.reshape(x, [-1, depth]), tf.reshape(y, [-1, depth]) return rank_loss(x, y)
Middle part of slicenet, connecting encoder and decoder. def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams): """Middle part of slicenet, connecting encoder and decoder.""" def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) # Flatten targets and embed target_space_id. targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2) target_space_emb = tf.tile(target_space_emb, [tf.shape(targets_flat)[0], 1, 1, 1]) # Use attention from each target to look at input and retrieve. targets_shifted = common_layers.shift_right( targets_flat, pad_value=target_space_emb) if hparams.attention_type == "none": targets_with_attention = tf.zeros_like(targets_shifted) else: inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. targets_with_attention = attention( targets_shifted, inputs_encoded, norm_fn, hparams, bias=inputs_padding_bias) # Positional targets: merge attention and raw. kernel = (hparams.kernel_height, hparams.kernel_width) targets_merged = common_layers.subseparable_conv_block( tf.concat([targets_with_attention, targets_shifted], axis=3), hparams.hidden_size, [((1, 1), kernel)], normalizer_fn=norm_fn, padding="LEFT", separability=4, name="targets_merge") return targets_merged, 0.0
Input embeddings -> is_padding. def embedding_to_padding(emb): """Input embeddings -> is_padding.""" emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1, keep_dims=True) return tf.to_float(tf.equal(emb_sum, 0.0))
The slicenet model, main step used for training. def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): # Project to hidden size if necessary if inputs.get_shape().as_list()[-1] != hparams.hidden_size: inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True) # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) inputs_encoded = multi_conv_res( inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, "LEFT", "decoder", hparams.num_hidden_layers, hparams, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss)
Set of hyperparameters. def slicenet_params1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.batch_size = 1024 hparams.hidden_size = 768 hparams.dropout = 0.5 hparams.symbol_dropout = 0.2 hparams.label_smoothing = 0.1 hparams.clip_grad_norm = 2.0 hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 hparams.norm_type = "layer" hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 hparams.weight_decay = 3.0 hparams.num_sampled_classes = 0 hparams.sampling_method = "argmax" hparams.optimizer_adam_epsilon = 1e-6 hparams.optimizer_adam_beta1 = 0.85 hparams.optimizer_adam_beta2 = 0.997 hparams.add_hparam("large_kernel_size", 15) # New ones are added like this. hparams.add_hparam("separability", -2) # A dilation scheme, one of _DILATION_SCHEMES. hparams.add_hparam("dilation_scheme", "1.1.1.1") # A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size. hparams.add_hparam("kernel_scheme", "3.7.15.31") hparams.add_hparam("audio_compression", 8) # attention-related flags hparams.add_hparam("attention_type", "simple") hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("sim_loss_mult", 0.0) # Try 10.0 for experiments. hparams.add_hparam("attention_dropout", 0.2) hparams.shared_embedding_and_softmax_weights = True return hparams
Version with Noam's decay scheme. def slicenet_params1_noam(): """Version with Noam's decay scheme.""" hparams = slicenet_params1() hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 1.0 hparams.learning_rate_warmup_steps = 4000 hparams.initializer = "uniform_unit_scaling" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 return hparams
Version for fast local runs. def slicenet_params1_tiny(): """Version for fast local runs.""" hparams = slicenet_params1() hparams.attention_type = "simple" hparams.separability = 0 hparams.hidden_size = 128 hparams.num_hidden_layers = 2 hparams.batch_size = 512 hparams.learning_rate_warmup_steps = 200 return hparams
Small range of hyperparameters. def slicenet_range1(ranged_hparams): """Small range of hyperparameters.""" rhp = ranged_hparams rhp.set_float("clip_grad_norm", 1.0, 10.0, scale=rhp.LOG_SCALE) rhp.set_float("learning_rate", 0.02, 1.0, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta2", 0.995, 0.998) rhp.set_float("weight_decay", 1.0, 5.0)
Converts a space-separated string of tokens to lists of ids. Also store temporary vocabulary IDs for source OOV tokens. OOVs are represented by their temporary OOV number. E.g., if the vocabulary size is 50k and the source has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. Args: s: human-readable string to be converted. Returns: ids: list of integers ids_extend: list of integers including extended temporary vocab IDs for source OOVs. oovs: A dict storing source OOV words, used for the decoder to copy. The key is OOV word, and the value is the order they appear in the source, starting from 0. source_oov_id_to_token: a list of source OOV tokens, in the same order as they appear in the source. def encode(self, s): """Converts a space-separated string of tokens to lists of ids. Also store temporary vocabulary IDs for source OOV tokens. OOVs are represented by their temporary OOV number. E.g., if the vocabulary size is 50k and the source has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. Args: s: human-readable string to be converted. Returns: ids: list of integers ids_extend: list of integers including extended temporary vocab IDs for source OOVs. oovs: A dict storing source OOV words, used for the decoder to copy. The key is OOV word, and the value is the order they appear in the source, starting from 0. source_oov_id_to_token: a list of source OOV tokens, in the same order as they appear in the source. """ sentence = s tokens = sentence.strip().split() ids = [] ids_extend = [] oovs = {} for t in tokens: if t in self._token_to_id: ids.append(self._token_to_id[t]) ids_extend.append(self._token_to_id[t]) else: next_oov_id = len(oovs) oov_num = oovs.get(t, next_oov_id) if oov_num == next_oov_id: oovs[t] = oov_num ids_extend.append(self.vocab_size + oov_num) ids.append(self._token_to_id[self._replace_oov]) source_oov_id_to_token = [""] * len(oovs) for oov in oovs: source_oov_id_to_token[oovs[oov]] = oov if self._reverse: return ids[::-1], ids_extend[::-1], oovs, source_oov_id_to_token else: return ids, ids_extend, oovs, source_oov_id_to_token
Converts a space-separated string of tokens to lists of ids. Also store a version of extened vocabulary IDs. For target OOVs that are in the source, encode them using the temporary vocab IDs. For target OOVs not in the source, encode them as <UNK> Args: target: target string source_oovs: source OOV words stored in dict, key is the word, value is the order in which they appear in the source starting from 0 Returns: ids: list of integers ids_extend: list of integers including extended vocabulary IDs. def encode_target(self, target, source_oovs): """Converts a space-separated string of tokens to lists of ids. Also store a version of extened vocabulary IDs. For target OOVs that are in the source, encode them using the temporary vocab IDs. For target OOVs not in the source, encode them as <UNK> Args: target: target string source_oovs: source OOV words stored in dict, key is the word, value is the order in which they appear in the source starting from 0 Returns: ids: list of integers ids_extend: list of integers including extended vocabulary IDs. """ tokens = target.strip().split() ids = [] ids_extend = [] for t in tokens: if t in self._token_to_id: i = self._token_to_id[t] ids.append(i) ids_extend.append(i) else: ids.append(self._token_to_id[self._replace_oov]) if t in source_oovs: vocab_idx = self.vocab_size + source_oovs[t] ids_extend.append(vocab_idx) else: ids_extend.append(self._token_to_id[self._replace_oov]) if self._reverse: return ids[::-1], ids_extend[::-1] else: return ids, ids_extend
decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens. def decode_list_oov(self, ids, source_oov_id_to_token): """decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens. """ seq = reversed(ids) if self._reverse else ids tokens = [] for cur_id in seq: if cur_id in self._id_to_token: tokens.append(self._id_to_token[cur_id]) else: tokens.append(source_oov_id_to_token[cur_id - self.vocab_size]) return tokens
Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. def _smallest_size_at_least(height, width, smallest_side): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) smallest_side = tf.to_float(smallest_side) scale = tf.cond( tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(height * scale) new_width = tf.to_int32(width * scale) return new_height, new_width
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_images( image, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image
Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3] def _distort_color(image, color_ordering=0, scope=None): """Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3] """ with tf.name_scope(scope, "distort_color", [image]): if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) elif color_ordering == 2: image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) elif color_ordering == 3: image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) else: raise ValueError("color_ordering must be in [0, 3]") # The random_* ops do not necessarily clamp. return tf.clip_by_value(image, 0.0, 1.0)
Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. def _apply_with_random_selector(x, func, num_cases): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) # Pass the real x only to one of the func calls. return control_flow_ops.merge([ func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) for case in range(num_cases) ])[0]
Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. def _mean_image_subtraction(image, means): """Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. """ if image.get_shape().ndims != 3: raise ValueError("Input must be of size [height, width, C>0]") num_channels = image.get_shape().as_list()[-1] if len(means) != num_channels: raise ValueError("len(means) must match the number of channels") channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) for i in range(num_channels): channels[i] -= means[i] return tf.concat(axis=2, values=channels)
vqa v2 preprocess image. def vqa_v2_preprocess_image( image, height, width, mode, resize_side=512, distort=True, image_model_fn="resnet_v1_152", ): """vqa v2 preprocess image.""" image = tf.image.convert_image_dtype(image, dtype=tf.float32) assert resize_side > 0 if resize_side: image = _aspect_preserving_resize(image, resize_side) if mode == tf.estimator.ModeKeys.TRAIN: image = tf.random_crop(image, [height, width, 3]) else: # Central crop, assuming resize_height > height, resize_width > width. image = tf.image.resize_image_with_crop_or_pad(image, height, width) image = tf.clip_by_value(image, 0.0, 1.0) if mode == tf.estimator.ModeKeys.TRAIN and distort: image = _flip(image) num_distort_cases = 4 # pylint: disable=unnecessary-lambda image = _apply_with_random_selector( image, lambda x, ordering: _distort_color(x, ordering), num_cases=num_distort_cases) if image_model_fn.startswith("resnet_v1"): # resnet_v1 uses vgg preprocessing image = image * 255. image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) elif image_model_fn.startswith("resnet_v2"): # resnet v2 uses inception preprocessing image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention def transformer_prepare_encoder(inputs, target_space, hparams, features=None): """Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention """ ishape_static = inputs.shape.as_list() encoder_input = inputs if features and "inputs_segmentation" in features: # Packed dataset. Keep the examples from seeing each other. inputs_segmentation = features["inputs_segmentation"] inputs_position = features["inputs_position"] targets_segmentation = features["targets_segmentation"] if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: encoder_self_attention_bias = ( common_attention.attention_bias_same_segment( inputs_segmentation, inputs_segmentation)) encoder_decoder_attention_bias = ( common_attention.attention_bias_same_segment(targets_segmentation, inputs_segmentation)) else: encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: # Usual case - not a packed dataset. encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding inputs_position = None if hparams.proximity_bias: encoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(inputs)[1]) if target_space is not None and hparams.get("use_target_space_embedding", True): # Append target_space_id embedding to inputs. emb_target_space = common_layers.embedding( target_space, 32, ishape_static[-1], name="target_space_embedding", dtype=hparams.get("activation_dtype", "float32")) emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": if inputs_position is not None: encoder_input = common_attention.add_timing_signal_1d_given_position( encoder_input, inputs_position) else: encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", inputs_position) encoder_self_attention_bias = common_layers.cast_like( encoder_self_attention_bias, encoder_input) encoder_decoder_attention_bias = common_layers.cast_like( encoder_decoder_attention_bias, encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convolutional layers. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: optional list onto which to append extra training losses attn_bias_for_padding: Padded attention bias in case a unidirectional encoder is being used where future attention is masked. Returns: y: a Tensors def transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder", nonpadding=None, save_weights_to=None, make_image_summary=True, losses=None, attn_bias_for_padding=None): """A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convolutional layers. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: optional list onto which to append extra training losses attn_bias_for_padding: Padded attention bias in case a unidirectional encoder is being used where future attention is masked. Returns: y: a Tensors """ x = encoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS, value=hparams.num_encoder_layers or hparams.num_hidden_layers) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, value=hparams.attention_dropout) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DENSE, value={ "use_bias": "false", "num_heads": hparams.num_heads, "hidden_size": hparams.hidden_size }) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding else: attention_bias = encoder_self_attention_bias if attn_bias_for_padding is not None: attention_bias = attn_bias_for_padding padding = common_attention.attention_bias_to_padding(attention_bias) nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover and not common_layers.is_xla_compiled(): pad_remover = expert_utils.PadRemover(padding) for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): if layer < hparams.get("num_area_layers", 0): max_area_width = hparams.get("max_area_width", 1) max_area_height = hparams.get("max_area_height", 1) memory_height = hparams.get("memory_height", 1) else: max_area_width = 1 max_area_height = 1 memory_height = 1 y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32"), hard_attention_k=hparams.get("hard_attention_k", 0), max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=hparams.get("area_key_mode", "none"), area_value_mode=hparams.get("area_value_mode", "none"), training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN)) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, pad_remover, conv_padding="SAME", nonpadding_mask=nonpadding, losses=losses) x = common_layers.layer_postprocess(x, y, hparams) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NORM, value={"hidden_size": hparams.hidden_size}) return common_layers.layer_preprocess(x, hparams)
Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses. def transformer_ffn_layer(x, hparams, pad_remover=None, conv_padding="LEFT", nonpadding_mask=None, losses=None, cache=None, decode_loop_step=None, readout_filter_size=0, layer_collection=None): """Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses. """ ffn_layer = hparams.ffn_layer relu_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "relu_dropout_broadcast_dims", ""))) if ffn_layer == "conv_hidden_relu": # Backwards compatibility ffn_layer = "dense_relu_dense" if ffn_layer == "dense_relu_dense": # In simple convolution mode, use `pad_remover` to speed up processing. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE, value={ "filter_size": hparams.filter_size, "use_bias": "True", "activation": mlperf_log.RELU }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE, value={ "hidden_size": hparams.hidden_size, "use_bias": "True", }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout) if pad_remover: original_shape = common_layers.shape_list(x) # Collapse `x` across examples, and remove padding positions. x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) conv_output = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=relu_dropout_broadcast_dims, layer_collection=layer_collection) if pad_remover: # Restore `conv_output` to the original shape of `x`, including padding. conv_output = tf.reshape( pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape) return conv_output elif ffn_layer == "conv_relu_conv": return common_layers.conv_relu_conv( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, first_kernel_size=hparams.conv_first_kernel, second_kernel_size=1, padding=conv_padding, nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout, cache=cache, decode_loop_step=decode_loop_step) elif ffn_layer == "parameter_attention": return common_attention.parameter_attention( x, hparams.parameter_attention_key_channels or hparams.hidden_size, hparams.parameter_attention_value_channels or hparams.hidden_size, hparams.hidden_size, readout_filter_size or hparams.filter_size, hparams.num_heads, hparams.attention_dropout) elif ffn_layer == "conv_hidden_relu_with_sepconv": return common_layers.conv_hidden_relu( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, kernel_size=(3, 1), second_kernel_size=(31, 1), padding="LEFT", dropout=hparams.relu_dropout) elif ffn_layer == "sru": return common_layers.sru(x) elif ffn_layer == "local_moe_tpu": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) elif ffn_layer == "local_moe": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe( x, True, expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size], hparams.hidden_size), hparams.moe_num_experts, k=hparams.moe_k, hparams=hparams) losses.append(loss) return ret else: assert ffn_layer == "none" return x
Transformer on languagemodel_lm1b32k_packed. 50M Params. def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
HParams for training languagemodel_lm1b32k_packed. 880M Params. def lmx_h3k_f12k(): """HParams for training languagemodel_lm1b32k_packed. 880M Params.""" hparams = lmx_base() hparams.hidden_size = 3072 hparams.filter_size = 12288 hparams.batch_size = 2048 hparams.weight_dtype = "bfloat16" return hparams
HParams for training languagemodel_lm1b32k_packed. 1470M Params. def lmx_h4k_f16k(): """HParams for training languagemodel_lm1b32k_packed. 1470M Params.""" hparams = lmx_base() hparams.hidden_size = 4096 hparams.filter_size = 16384 hparams.batch_size = 1024 hparams.weight_dtype = "bfloat16" return hparams
Language model using relative attention. def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
Transformer with mixture of experts. 890M Params. def lmx_moe_h1k_f4k_x32(): """Transformer with mixture of experts. 890M Params.""" hparams = lmx_h1k_f4k() hparams.ffn_layer = "local_moe_tpu" hparams.moe_num_experts = 32 hparams.weight_dtype = "bfloat16" hparams.batch_size = 8192 return hparams
Transformer with mixture of experts. 890M Params. def lmx_moe_h1k_f8k_x16(): """Transformer with mixture of experts. 890M Params.""" hparams = lmx_h1k_f4k() hparams.filter_size = 8192 hparams.ffn_layer = "local_moe_tpu" hparams.moe_num_experts = 16 hparams.weight_dtype = "bfloat16" hparams.batch_size = 8192 return hparams
HParams for training languagemodel_lm1b32k_packed. 880M Params. def lmx_h1k_f64k(): """HParams for training languagemodel_lm1b32k_packed. 880M Params.""" hparams = lmx_base() hparams.hidden_size = 1024 hparams.filter_size = 65536 hparams.batch_size = 2048 return hparams
Uncertainty reward based on logits. def compute_uncertainty_reward(logits, predictions): """Uncertainty reward based on logits.""" # TODO(rsepassi): Add support for L1/L2 loss models. Current code only # works for softmax models. vocab_size = logits.shape[-1] assert vocab_size > 1 log_probs = common_layers.log_prob_from_logits(logits) max_log_probs = common_layers.index_last_dim_with_indices(log_probs, predictions) # Threshold neg_log_prob = tf.nn.relu(-max_log_probs - 0.02) # Sum across all but the batch dimension reduce_dims = list(range(len(neg_log_prob.shape)))[1:] summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims) return summed / 10
Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. def _reset_non_empty(self, indices): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ reset_video_op = tf.cond( self._video_condition, lambda: tf.py_func(self._video_reset_writer, [], []), tf.no_op) with tf.control_dependencies([reset_video_op]): inc_op = tf.assign_add(self._episode_counter, 1) with tf.control_dependencies([self.history_buffer.reset(indices), inc_op]): initial_frame_dump_op = tf.cond( self._video_condition, lambda: tf.py_func(self._video_dump_frames, # pylint: disable=g-long-lambda [self.history_buffer.get_all_elements()], []), tf.no_op) observ_assign_op = self._observ.assign( self.history_buffer.get_all_elements()[:, -1, ...]) with tf.control_dependencies([observ_assign_op, initial_frame_dump_op]): reset_model_op = tf.assign(self._reset_model, tf.constant(1.0)) with tf.control_dependencies([reset_model_op]): return tf.gather(self._observ.read_value(), indices)
Set the random seed from flag everywhere. def set_random_seed(): """Set the random seed from flag everywhere.""" tf.set_random_seed(FLAGS.random_seed) random.seed(FLAGS.random_seed) np.random.seed(FLAGS.random_seed)
Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS. def generate_data_for_problem(problem): """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.""" training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem] num_train_shards = FLAGS.num_shards or 10 tf.logging.info("Generating training data for %s.", problem) train_output_files = generator_utils.train_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_train_shards) generator_utils.generate_files(training_gen(), train_output_files, FLAGS.max_cases) num_dev_shards = int(num_train_shards * 0.1) tf.logging.info("Generating development data for %s.", problem) dev_output_files = generator_utils.dev_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_dev_shards) generator_utils.generate_files(dev_gen(), dev_output_files) num_test_shards = int(num_train_shards * 0.1) test_output_files = [] test_gen_data = test_gen() if test_gen_data is not None: tf.logging.info("Generating test data for %s.", problem) test_output_files = generator_utils.test_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_test_shards) generator_utils.generate_files(test_gen_data, test_output_files) all_output_files = train_output_files + dev_output_files + test_output_files generator_utils.shuffle_dataset(all_output_files)
Generate data for `EnvProblem`s. def generate_data_for_env_problem(problem_name): """Generate data for `EnvProblem`s.""" assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " "should be greater than zero") assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" " greather than zero") problem = registry.env_problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) # TODO(msaffar): Handle large values for env_problem_batch_size where we # cannot create that many environments within the same process. problem.initialize(batch_size=FLAGS.env_problem_batch_size) env_problem_utils.play_env_problem_randomly( problem, num_steps=FLAGS.env_problem_max_env_steps) problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id)
Generate data for a registered problem. def generate_data_for_registered_problem(problem_name): """Generate data for a registered problem.""" tf.logging.info("Generating data for %s.", problem_name) if FLAGS.num_shards: raise ValueError("--num_shards should not be set for registered Problem.") problem = registry.problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) if task_id is None and problem.multiprocess_generate: if FLAGS.task_id_start != -1: assert FLAGS.task_id_end != -1 task_id_start = FLAGS.task_id_start task_id_end = FLAGS.task_id_end else: task_id_start = 0 task_id_end = problem.num_generate_tasks pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes) problem.prepare_to_generate(data_dir, tmp_dir) args = [(problem_name, data_dir, tmp_dir, task_id) for task_id in range(task_id_start, task_id_end)] pool.map(generate_data_in_process, args) else: problem.generate_data(data_dir, tmp_dir, task_id)
Traverses directory collecting input and target files. Args: directory: base path to extracted audio and transcripts. Returns: list of (media_base, media_filepath, label) tuples def _collect_data(directory): """Traverses directory collecting input and target files. Args: directory: base path to extracted audio and transcripts. Returns: list of (media_base, media_filepath, label) tuples """ # Returns: data_files = [] transcripts = [ filename for filename in os.listdir(directory) if filename.endswith(".csv") ] for transcript in transcripts: transcript_path = os.path.join(directory, transcript) with open(transcript_path, "r") as transcript_file: transcript_reader = csv.reader(transcript_file) # skip header _ = next(transcript_reader) for transcript_line in transcript_reader: media_name, label = transcript_line[0:2] filename = os.path.join(directory, media_name) data_files.append((media_name, filename, label)) return data_files
Checks if the filename exists under the path. def _file_exists(path, filename): """Checks if the filename exists under the path.""" return os.path.isfile(os.path.join(path, filename))
Checks if the filename is relative, not absolute. def _is_relative(path, filename): """Checks if the filename is relative, not absolute.""" return os.path.abspath(os.path.join(path, filename)).startswith(path)
Define ppo step. def define_ppo_step(data_points, hparams, action_space, lr): """Define ppo step.""" observation, action, discounted_reward, norm_advantage, old_pdf = data_points obs_shape = common_layers.shape_list(observation) observation = tf.reshape( observation, [obs_shape[0] * obs_shape[1]] + obs_shape[2:] ) (logits, new_value) = get_policy(observation, hparams, action_space) logits = tf.reshape(logits, obs_shape[:2] + [action_space.n]) new_value = tf.reshape(new_value, obs_shape[:2]) new_policy_dist = tfp.distributions.Categorical(logits=logits) new_pdf = new_policy_dist.prob(action) ratio = new_pdf / old_pdf clipped_ratio = tf.clip_by_value(ratio, 1 - hparams.clipping_coef, 1 + hparams.clipping_coef) surrogate_objective = tf.minimum(clipped_ratio * norm_advantage, ratio * norm_advantage) policy_loss = -tf.reduce_mean(surrogate_objective) value_error = new_value - discounted_reward value_loss = hparams.value_loss_coef * tf.reduce_mean(value_error ** 2) entropy = new_policy_dist.entropy() entropy_loss = -hparams.entropy_loss_coef * tf.reduce_mean(entropy) losses = [policy_loss, value_loss, entropy_loss] loss = sum(losses) variables = tf.global_variables(hparams.policy_network + "/.*") train_op = optimize.optimize(loss, lr, hparams, variables=variables) with tf.control_dependencies([train_op]): return [tf.identity(x) for x in losses]
PPO epoch. def define_ppo_epoch(memory, hparams, action_space, batch_size): """PPO epoch.""" observation, reward, done, action, old_pdf, value = memory # This is to avoid propagating gradients through simulated environment. observation = tf.stop_gradient(observation) action = tf.stop_gradient(action) reward = tf.stop_gradient(reward) if hasattr(hparams, "rewards_preprocessing_fun"): reward = hparams.rewards_preprocessing_fun(reward) done = tf.stop_gradient(done) value = tf.stop_gradient(value) old_pdf = tf.stop_gradient(old_pdf) advantage = calculate_generalized_advantage_estimator( reward, value, done, hparams.gae_gamma, hparams.gae_lambda) discounted_reward = tf.stop_gradient(advantage + value[:-1]) advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True) advantage_normalized = tf.stop_gradient( (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8)) add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)] number_of_batches = ((hparams.epoch_length-1) * hparams.optimization_epochs // hparams.optimization_batch_size) epoch_length = hparams.epoch_length if hparams.effective_num_agents is not None: number_of_batches *= batch_size number_of_batches //= hparams.effective_num_agents epoch_length //= hparams.effective_num_agents assert number_of_batches > 0, "Set the paremeters so that number_of_batches>0" lr = learning_rate.learning_rate_schedule(hparams) shuffled_indices = [tf.random.shuffle(tf.range(epoch_length - 1)) for _ in range(hparams.optimization_epochs)] shuffled_indices = tf.concat(shuffled_indices, axis=0) shuffled_indices = shuffled_indices[:number_of_batches * hparams.optimization_batch_size] indices_of_batches = tf.reshape(shuffled_indices, shape=(-1, hparams.optimization_batch_size)) input_tensors = [observation, action, discounted_reward, advantage_normalized, old_pdf] ppo_step_rets = tf.scan( lambda a, i: add_lists_elementwise( # pylint: disable=g-long-lambda a, define_ppo_step([tf.gather(t, indices_of_batches[i, :]) for t in input_tensors], hparams, action_space, lr )), tf.range(number_of_batches), [0., 0., 0.], parallel_iterations=1) ppo_summaries = [tf.reduce_mean(ret) / number_of_batches for ret in ppo_step_rets] ppo_summaries.append(lr) summaries_names = [ "policy_loss", "value_loss", "entropy_loss", "learning_rate" ] summaries = [tf.summary.scalar(summary_name, summary) for summary_name, summary in zip(summaries_names, ppo_summaries)] losses_summary = tf.summary.merge(summaries) for summary_name, summary in zip(summaries_names, ppo_summaries): losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ") return losses_summary
Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): # pylint: disable=g-doc-args """Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. """ # pylint: enable=g-doc-args next_value = value[1:, :] next_not_done = 1 - tf.cast(done[1:, :], tf.float32) delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done - value[:-1, :]) return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented. def gym_space_spec(gym_space): """Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented. """ # First try to determine the type. try: tf_dtype = tf.as_dtype(gym_space.dtype) except TypeError as e: tf.logging.error("Cannot convert space's type [%s] to tf.dtype", gym_space.dtype) raise e # Now hand it over to the specialized functions. if isinstance(gym_space, Box): return box_space_spec(gym_space, tf_dtype) elif isinstance(gym_space, Discrete): return discrete_space_spec(gym_space, tf_dtype) else: raise NotImplementedError
Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it. def cardinality(gym_space): """Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it. """ if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64): tf.logging.error("Returning None for a float gym space's cardinality: ", gym_space) return None if isinstance(gym_space, Discrete): return gym_space.n if isinstance(gym_space, Box): # Construct a box with all possible values in this box and take a product. return np.prod(gym_space.high - gym_space.low + 1) raise NotImplementedError
RMSE but will argmax if last dim is not 1. def image_rmse(predictions, labels, weights_fn=common_layers.weights_all): """RMSE but will argmax if last dim is not 1.""" if common_layers.shape_list(predictions)[-1] == 1: predictions = tf.squeeze(predictions, axis=[-1]) else: predictions = tf.argmax(predictions, axis=-1) return padded_rmse(predictions, labels, weights_fn)
Computes mean(abs(preds-target)). def abs_error(predictions, labels, weights_fn=None): """Computes mean(abs(preds-target)).""" del weights_fn # Unused targets = tf.squeeze(labels, axis=[2, 3]) batch_abs_error = tf.abs(predictions - targets) den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32) return (batch_abs_error, den)
Explained variance, also known as R^2. def padded_variance_explained(predictions, labels, weights_fn=common_layers.weights_all): """Explained variance, also known as R^2.""" predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) y_bar = tf.reduce_mean(weights * targets) tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2)) res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2)) r2 = 1. - res_ss / tot_ss return r2, tf.reduce_sum(weights)
Percentage of times that top-k predictions matches labels on non-0s. def padded_accuracy_topk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) effective_k = tf.minimum(k, common_layers.shape_list(padded_predictions)[-1]) _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=-1) return same_topk, weights
Sequence accuracy for L1/L2 losses: round down the predictions to ints. def rounding_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Sequence accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions), axis=-1) weights = weights_fn(labels) labels = tf.to_int32(labels) not_correct = tf.to_float(tf.not_equal(outputs, labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Percentage of times that predictions matches labels everywhere (non-0). def padded_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels everywhere (non-0).""" # If the last dimension is 1 then we're using L1/L2 loss. if common_layers.shape_list(predictions)[-1] == 1: return rounding_sequence_accuracy( predictions, labels, weights_fn=weights_fn) with tf.variable_scope( "padded_sequence_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) # Flatten, keeping batch dim (and num_classes dim for predictions) # TPU argmax can only deal with a limited number of dimensions predictions_shape = common_layers.shape_list(padded_predictions) batch_size = predictions_shape[0] num_classes = predictions_shape[-1] flat_size = common_layers.list_product( common_layers.shape_list(padded_labels)[1:]) padded_predictions = tf.reshape( padded_predictions, [batch_size, common_layers.list_product(predictions_shape[1:-1]), num_classes]) padded_labels = tf.reshape(padded_labels, [batch_size, flat_size]) weights = tf.reshape(weights, [batch_size, flat_size]) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. """ if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("edit_distance", values=[predictions, labels]): # Transform logits into sequence classes by taking max at every step. predictions = tf.to_int32( tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) nonzero_idx = tf.where(tf.not_equal(predictions, 0)) sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64)) labels = tf.squeeze(labels, axis=(2, 3)) nonzero_idx = tf.where(tf.not_equal(labels, 0)) label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64)) distance = tf.reduce_sum( tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) return distance / reference_length, reference_length
Average log-perplexity exluding padding 0s. No smoothing. def padded_neg_log_perplexity(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average log-perplexity exluding padding 0s. No smoothing.""" num, den = common_layers.padded_cross_entropy( predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False) return (-num, den)
Average log-perplexity with custom targets_mask. def padded_neg_log_perplexity_with_masking( predictions, labels, features, weights_fn=None): """Average log-perplexity with custom targets_mask.""" del weights_fn if "targets_mask" not in features: raise ValueError("masked_neg_log_perplexity requires targets_mask feature") # Features are 4 dimensional, so we need to reshape the targets_mask to match # the shape of the labels. A lot of models rely on these features being 4D, # so it's best to update the shape of the mask. extended_targets_mask_shape = common_layers.shape_list( features["targets_mask"]) extended_targets_mask_shape.extend([1, 1]) features["targets_mask"] = tf.reshape(features["targets_mask"], shape=extended_targets_mask_shape) mask_fn = lambda labels: features["targets_mask"] return padded_neg_log_perplexity(predictions, labels, mask_fn)
Average log-perplexity excluding padding 0s. No smoothing. def dmol_neg_log_perplexity(predictions, labels, weights_fn=None): """Average log-perplexity excluding padding 0s. No smoothing.""" del weights_fn # Unused num, den = common_layers.dml_loss( predictions, labels, reduce_sum=False) return (-num, den)
Rounding accuracy for L1/L2 losses: round down the predictions to ints. def rounding_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Rounding accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions)) labels = tf.squeeze(labels) weights = weights_fn(labels) labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, labels)), weights
Percentage of times that predictions matches labels on non-0s. def padded_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels on non-0s.""" # If the last dimension is 1 then we're using L1/L2 loss. if common_layers.shape_list(predictions)[-1] == 1: return rounding_accuracy(predictions, labels, weights_fn=weights_fn) with tf.variable_scope("padded_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) return tf.to_float(tf.equal(outputs, padded_labels)), weights
Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones. def multilabel_accuracy_matchk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones. """ predictions = tf.to_int32(tf.argmax(predictions, axis=-1)) scores = tf.to_float(tf.equal(predictions, labels)) # those label == 0 do not count weights = weights_fn(labels) scores *= weights scores = tf.reduce_sum(scores, axis=[1, 2, 3]) scores = tf.minimum(scores / tf.to_float(k), 1) # every sample count weights = tf.ones(tf.shape(scores), dtype=tf.float32) return scores, weights
Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights """ with tf.variable_scope("softmax_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): """Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights """ with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): del weights_fn predictions = tf.nn.sigmoid(logits) labels = tf.argmax(labels, -1) predictions = tf.argmax(predictions, -1) _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) return accuracy, tf.constant(1.0)
Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights def sigmoid_recall_one_hot(logits, labels, weights_fn=None): """Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights """ with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]): del weights_fn num_classes = logits.shape[-1] predictions = tf.nn.sigmoid(logits) predictions = tf.argmax(predictions, -1) predictions = tf.one_hot(predictions, num_classes) _, recall = tf.metrics.recall(labels=labels, predictions=predictions) return recall, tf.constant(1.0)
Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights """ with tf.variable_scope("sigmoid_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.sigmoid_cross_entropy( multi_class_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights def roc_auc(logits, labels, weights_fn=None): """Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights """ del weights_fn with tf.variable_scope("roc_auc", values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) _, auc = tf.metrics.auc(labels, predictions, curve="ROC") return auc, tf.constant(1.0)
Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum. def create_evaluation_metrics(problems, model_hparams): """Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum. """ def reduce_dimensions(predictions, labels): """Reduce dimensions for high-dimensional predictions and labels.""" # We will treat first dimensions as batch. One example are video frames. if len(predictions.get_shape()) > 5: predictions_shape = common_layers.shape_list(predictions) predictions = tf.reshape( predictions, [predictions_shape[0], predictions_shape[1], -1, predictions_shape[-1]]) labels_shape = common_layers.shape_list(labels) labels = tf.reshape( labels, [labels_shape[0], labels_shape[1], -1]) return predictions, labels def make_problem_specific_metric_fn(metric_fn, weights_fn): """Create a metric fn.""" def problem_metric_fn(predictions, features, labels): """Metric fn.""" # Send along the entire features dict if the metric fn has the kwarg # "features". kwargs = {} args, _, keywords, _ = inspect.getargspec(metric_fn) if ("features" in args) or keywords: kwargs["features"] = features predictions, labels = reduce_dimensions(predictions, labels) scores, weights = metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs) return tf.metrics.mean(scores, weights) return problem_metric_fn def make_image_wrapped_metric_fn(metric_fn): """Metric fn without tf.metrics.mean.""" def image_wrapped_metric_fn(predictions, features, labels, weights_fn=common_layers.weights_all): del weights_fn del features predictions, labels = reduce_dimensions(predictions, labels) return metric_fn(predictions, labels, model_hparams) return image_wrapped_metric_fn def weights_fn_for_mp(problem_task_id): return lambda x: common_layers.weights_multi_problem(x, problem_task_id) eval_metrics = {} for problem_instance in problems: problem_name = problem_instance.name if problem_instance.was_reversed: problem_name += "_rev" metrics = problem_instance.eval_metric_fns(model_hparams) if hasattr(model_hparams.problem, "task_list"): metrics = model_hparams.problem.eval_metric_fns(model_hparams) tm = problem_instance.get_hparams(model_hparams).modality["targets"] if not isinstance(tm, dict): tm = {"targets": tm} for target_name, modality in six.iteritems(tm): weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(modality)) if hasattr(model_hparams.problem, "task_list"): ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop weights_fn = weights_fn_for_mp(ptid) for metric, metric_fn in six.iteritems(metrics): overload_eval_metric_name = getattr( model_hparams, "overload_eval_metric_name", None) if len(problems) == 1 and overload_eval_metric_name: metric_name = "metrics-%s/%s/%s" % ( overload_eval_metric_name, target_name, metric) else: metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric) if metric == Metrics.IMAGE_SUMMARY: eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn) else: eval_metrics[metric_name] = make_problem_specific_metric_fn( metric_fn, weights_fn) return eval_metrics
See create_eager_metrics. def create_eager_metrics_for_problem(problem, model_hparams): """See create_eager_metrics.""" metric_fns = problem.eval_metric_fns(model_hparams) problem_hparams = problem.get_hparams(model_hparams) target_modality = problem_hparams.modality["targets"] weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(target_modality)) return create_eager_metrics_internal(metric_fns, weights_fn=weights_fn)
Create metrics accumulators and averager for Eager mode. Args: metric_names: list<str> from Metrics enum weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val> def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all): """Create metrics accumulators and averager for Eager mode. Args: metric_names: list<str> from Metrics enum weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val> """ metric_fns = dict( [(name, METRICS_FNS[name]) for name in metric_names]) return create_eager_metrics_internal(metric_fns, weights_fn)