INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Download and extract MSCOCO datasets to directory unless it is there.
def _get_mscoco(directory): """Download and extract MSCOCO datasets to directory unless it is there.""" for url in _MSCOCO_URLS: filename = os.path.basename(url) download_url = os.path.join(_MSCOCO_ROOT_URL, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os...
Image generator for MSCOCO captioning problem with token-wise captions. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_fro...
def mscoco_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None): """Image generator for MSCOCO captioning problem with token-wise captions. Arg...
Convert FLAGS to list of args suitable for passing on cmd line.
def flags_as_args(): """Convert FLAGS to list of args suitable for passing on cmd line.""" if hasattr(FLAGS, "flag_values_dict"): args_dict = FLAGS.flag_values_dict() else: args_dict = dict(FLAGS.__dict__["__flags"]) del args_dict["cloud_mlengine"] # Configured later del args_dict["t2t_usr_dir"] a...
Returns master_type for trainingInput.
def get_default_master_type(num_gpus=1): """Returns master_type for trainingInput.""" gpus_to_master_map = { 0: "standard", 1: "standard_p100", 4: "complex_model_m_p100", 8: "complex_model_l_gpu", } if num_gpus not in gpus_to_master_map: raise ValueError("Num gpus must be in %s" % ...
Construct jobSpec for ML Engine job.
def configure_job(): """Construct jobSpec for ML Engine job.""" # See documentation: # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput training_input = { "pythonModule": "tensor2tensor.bin.t2t_trainer", "args": flags_as_args(), "region": text_encoder.native_to_...
Launch job on ML Engine.
def launch_job(job_spec): """Launch job on ML Engine.""" project_id = "projects/{}".format( text_encoder.native_to_unicode(default_project())) credentials = GoogleCredentials.get_application_default() cloudml = discovery.build("ml", "v1", credentials=credentials, cache_discover...
Tar and gzip src_dir and copy to GCS target_dir.
def _tar_and_copy(src_dir, target_dir): """Tar and gzip src_dir and copy to GCS target_dir.""" src_dir = src_dir.rstrip("/") target_dir = target_dir.rstrip("/") tmp_dir = tempfile.gettempdir().rstrip("/") src_base = os.path.basename(src_dir) shell_run( "tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar...
Tar Tensor2Tensor and cp to train_dir.
def tar_and_copy_t2t(train_dir): """Tar Tensor2Tensor and cp to train_dir.""" tf.logging.info("Tarring and pushing local Tensor2Tensor package.") output = text_encoder.native_to_unicode(shell_output( "pip show tensor2tensor")).split("\n") assert output[1].startswith("Version") assert output[7].startswi...
Package, tar, and copy usr_dir to GCS train_dir.
def tar_and_copy_usr_dir(usr_dir, train_dir): """Package, tar, and copy usr_dir to GCS train_dir.""" tf.logging.info("Tarring and pushing t2t_usr_dir.") usr_dir = os.path.abspath(os.path.expanduser(usr_dir)) # Copy usr dir to a temp location top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container") ...
Validates flags are set to acceptable values for CloudML Engine runs.
def validate_flags(): """Validates flags are set to acceptable values for CloudML Engine runs.""" assert not job_dir() assert FLAGS.output_dir.startswith("gs://") assert FLAGS.data_dir.startswith("gs://") assert FLAGS.worker_replicas <= 1 assert FLAGS.ps_replicas <= 0 if FLAGS.hparams_range: assert FL...
Launch t2t_trainer on Cloud ML Engine.
def launch(): """Launch t2t_trainer on Cloud ML Engine.""" validate_flags() job_spec = configure_job() job_name = job_spec["jobId"] tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name, pprint.pformat(job_spec)) assert confirm() train_dir = FLAGS.output_dir t2t_tar = t...
Decorator for Layers, overriding add_weight for trainable initializers.
def add_weight(cls): """Decorator for Layers, overriding add_weight for trainable initializers.""" @functools.wraps(cls.add_weight) def _add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None,...
Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration by taking steps of size hparams.late...
def get_beta(self, kl_loss=0.0): """Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration ...
Get KL loss for all the predicted Gaussians.
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None): """Get KL loss for all the predicted Gaussians.""" kl_loss = 0.0 if means_p is None: means_p = tf.unstack(tf.zeros_like(means)) if log_vars_p is None: log_vars_p = tf.unstack(tf.zeros_like(log_vars)) enumerated_inputs...
Create the latent tower.
def construct_latent_tower(self, images, time_axis): """Create the latent tower.""" # No latent in the first phase first_phase = tf.less( self.get_iteration_num(), self.hparams.num_iterations_1st_stage) # use all frames by default but this allows more # predicted frames at inference time ...
Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights...
def transformer_encode(encoder_function, inputs, target_space, hparams, attention_weights=None, features=None, losses=None, **kwargs): """Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_lengt...
Decode Transformer outputs from encoder representation. Args: decoder_function: the decoder function decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attent...
def transformer_decode(decoder_function, decoder_input, encoder_output, encoder_decoder_attention_bias, decoder_self_attention_bias, hparams, attention_weights=None, ...
Create the initial cache for Transformer fast decoding.
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length, encoder_output, encoder_decoder_attention_bias, scope_prefix): """Create the initial cache for Transformer fast decoding.""" key_channels = hparams.attention_key_channels or hparams...
Given encoder output and a symbols to logits function, does fast decoding. Implements both greedy and beam search decoding for TPU, uses beam search iff beam_size > 1, otherwise beam search related arguments are ignored. Args: encoder_output: A tensor, output from encoder. encoder_decoder_attention_bias...
def fast_decode_tpu(encoder_output, encoder_decoder_attention_bias, symbols_to_logits_fn, hparams, decode_length, vocab_size, init_cache_fn=_init_transformer_cache, beam_size=1, ...
Given encoder output and a symbols to logits function, does fast decoding. Implements both greedy and beam search decoding, uses beam search iff beam_size > 1, otherwise beam search related arguments are ignored. Args: encoder_output: Output from encoder. encoder_decoder_attention_bias: a bias tensor fo...
def fast_decode(encoder_output, encoder_decoder_attention_bias, symbols_to_logits_fn, hparams, decode_length, vocab_size, init_cache_fn=_init_transformer_cache, beam_size=1, top_beams=1, ...
Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attenti...
def transformer_prepare_decoder(targets, hparams, features=None): """Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: de...
A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attenti...
def transformer_decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=None, decode_loop_step=None, ...
Set of hyperparameters.
def transformer_base_v1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.norm_type = "layer" hparams.hidden_size = 512 hparams.batch_size = 4096 hparams.max_length = 256 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 hpar...
Set of hyperparameters.
def transformer_base_v2(): """Set of hyperparameters.""" hparams = transformer_base_v1() hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.learning_rate_warmup_st...
Set of hyperparameters for lm1b packed following tpu params.
def transformer_base_vq_ada_32ex_packed(): """Set of hyperparameters for lm1b packed following tpu params.""" hparams = transformer_base_v2() expert_utils.update_hparams_for_vq_gating(hparams) hparams.moe_num_experts = 32 hparams.gating_type = "vq" # this gives us a batch size of 16 because each seq is len ...
Set of hyperparameters.
def transformer_base_vq1_16_nb1_packed_nda_b01_scales(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.use_scales = int(True) hparams.moe_num_experts = 16 hparams.moe_k = 1 hparams.beta = 0.1 hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequen...
Set of hyperparameters.
def transformer_base_vq1_16_nb1_packed_dan_b01_scales(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.use_scales = int(True) hparams.moe_num_experts = 16 hparams.moe_k = 1 hparams.beta = 0.1 hparams.ema = False return hparams
Set of hyperparameters.
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog(): """Set of hyperparameters.""" hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales() hparams.batch_size = 2048 hparams.max_length = 1024 hparams.filter_size = 3072 return hparams
Set of hyperparameters.
def transformer_ada_lmpackedbase_dialog(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.max_length = 1024 hparams.ffn_layer = "dense_relu_dense" hparams.batch_size = 4096 return hparams
Base parameters for Transformer model.
def transformer_base_v3(): """Base parameters for Transformer model.""" # Update parameters here, then occasionally cut a versioned set, e.g. # transformer_base_v2. hparams = transformer_base_v2() hparams.optimizer_adam_beta2 = 0.997 # New way of specifying learning rate schedule. # Equivalent to previous...
HParams for transformer big model on WMT.
def transformer_big(): """HParams for transformer big model on WMT.""" hparams = transformer_base() hparams.hidden_size = 1024 hparams.filter_size = 4096 # Reduce batch size to 2048 from 4096 to be able to train the model on a GPU # with 12 GB memory. For example, NVIDIA TITAN V GPU. hparams.batch_size = ...
Hparams for transformer on LM for pretraining/finetuning/mixing.
def transformer_tall(): """Hparams for transformer on LM for pretraining/finetuning/mixing.""" hparams = transformer_base() hparams.batch_size = 2048 hparams.hidden_size = 768 hparams.filter_size = 3072 hparams.num_hidden_layers = 12 hparams.num_heads = 12 hparams.label_smoothing = 0.0 hparams.max_len...
Tied means fine-tune CNN/DM summarization as LM.
def transformer_tall_finetune_tied(): """Tied means fine-tune CNN/DM summarization as LM.""" hparams = transformer_tall() hparams.multiproblem_max_input_length = 750 hparams.multiproblem_max_target_length = 100 hparams.multiproblem_schedule_max_examples = 0 hparams.learning_rate_schedule = ("linear_warmup*c...
Fine-tune CNN/DM with a unidirectional encoder and decoder.
def transformer_tall_finetune_uniencdec(): """Fine-tune CNN/DM with a unidirectional encoder and decoder.""" hparams = transformer_tall() hparams.max_input_seq_length = 750 hparams.max_target_seq_length = 100 hparams.optimizer = "true_adam" hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay"...
Train CNN/DM with a unidirectional encoder and decoder.
def transformer_tall_train_uniencdec(): """Train CNN/DM with a unidirectional encoder and decoder.""" hparams = transformer_tall() hparams.max_input_seq_length = 750 hparams.max_target_seq_length = 100 hparams.optimizer = "true_adam" hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hpa...
Hparams for transformer on LM for finetuning on text class problems.
def transformer_tall_finetune_textclass(): """Hparams for transformer on LM for finetuning on text class problems.""" hparams = transformer_tall() hparams.learning_rate_constant = 6.25e-5 hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay") hparams.multiproblem_schedule_max_examples = 0 ...
Hparams for transformer on LM pretraining (with 64k vocab).
def transformer_tall_pretrain_lm(): """Hparams for transformer on LM pretraining (with 64k vocab).""" hparams = transformer_tall() hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.optimizer = "adam_w" hparams.optimizer_adam_beta1 = 0.9 hpar...
Hparams for transformer on LM pretraining (with 64k vocab) on TPU.
def transformer_tall_pretrain_lm_tpu_adafactor(): """Hparams for transformer on LM pretraining (with 64k vocab) on TPU.""" hparams = transformer_tall_pretrain_lm() update_hparams_for_tpu(hparams) hparams.max_length = 1024 # For multi-problem on TPU we need it in absolute examples. hparams.batch_size = 8 h...
Hparams for transformer on LM pretraining on TPU, large model.
def transformer_tall_pretrain_lm_tpu_adafactor_large(): """Hparams for transformer on LM pretraining on TPU, large model.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 hpa...
Hparams for transformer on LM pretraining on TPU with AdamW.
def transformer_tall_pretrain_lm_tpu(): """Hparams for transformer on LM pretraining on TPU with AdamW.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() # Optimizer gets reset in update_hparams_for_tpu so we set it again here. hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("l...
HParams for transformer base model for single GPU.
def transformer_base_single_gpu(): """HParams for transformer base model for single GPU.""" hparams = transformer_base() hparams.batch_size = 1024 hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" hparams.learning_rate_constant = 0.1 hparams.learning_rate_warmup_steps = 16000 return hp...
HParams for parsing on WSJ only.
def transformer_parsing_base(): """HParams for parsing on WSJ only.""" hparams = transformer_base() hparams.attention_dropout = 0.2 hparams.layer_prepostprocess_dropout = 0.2 hparams.max_length = 512 hparams.learning_rate_warmup_steps = 16000 hparams.hidden_size = 1024 hparams.learning_rate = 0.05 hpa...
HParams for parsing on WSJ semi-supervised.
def transformer_parsing_big(): """HParams for parsing on WSJ semi-supervised.""" hparams = transformer_big() hparams.max_length = 512 hparams.shared_source_target_embedding = False hparams.learning_rate_warmup_steps = 4000 hparams.layer_prepostprocess_dropout = 0.1 hparams.batch_size = 2048 hparams.lear...
Small range of hyperparameters.
def transformer_base_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) rhp.set_discrete("learning_rate_warmup_steps", [1000, 2000, 4000, 8000, 16000]) rhp.set_float("...
Use relative position embeddings instead of absolute position encodings.
def transformer_relative(): """Use relative position embeddings instead of absolute position encodings.""" hparams = transformer_base() hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 20 return hparams
HParams for Transformer model on TPU for MLPerf on TPU 2x2.
def transformer_mlperf_tpu(): """HParams for Transformer model on TPU for MLPerf on TPU 2x2.""" hparams = transformer_base_v3() hparams.mlperf_mode = True hparams.symbol_modality_num_shards = 1 hparams.max_length = 256 # ignored when using "_packed" problems hparams.batch_size = 2048 # per-chip batch size...
Change hparams to be compatible with TPU training.
def update_hparams_for_tpu(hparams): """Change hparams to be compatible with TPU training.""" # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_...
Small range of hyperparameters.
def transformer_tpu_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) rhp.set_discrete("learning_rate_warmup_steps", [1000, 2000, 4000, 8000, 16000]) rhp.set_float("i...
No dropout, label smoothing, max_length.
def transformer_clean(): """No dropout, label smoothing, max_length.""" hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
HParams for training languagemodel_lm1b8k on tpu. 92M Params.
def transformer_lm_tpu_0(): """HParams for training languagemodel_lm1b8k on tpu. 92M Params.""" hparams = transformer_clean_big() update_hparams_for_tpu(hparams) hparams.num_heads = 4 # Heads are expensive on TPUs. hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.la...
HParams for training ASR model on LibriSpeech V1.
def transformer_librispeech_v1(): """HParams for training ASR model on LibriSpeech V1.""" hparams = transformer_base() hparams.num_heads = 4 hparams.filter_size = 1024 hparams.hidden_size = 256 hparams.num_encoder_layers = 5 hparams.num_decoder_layers = 3 hparams.learning_rate = 0.15 hparams.batch_si...
HParams for training ASR model on Librispeech on TPU v1.
def transformer_librispeech_tpu_v1(): """HParams for training ASR model on Librispeech on TPU v1.""" hparams = transformer_librispeech_v1() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
HParams for training ASR model on LibriSpeech V2.
def transformer_librispeech_v2(): """HParams for training ASR model on LibriSpeech V2.""" hparams = transformer_base() hparams.max_length = 1240000 hparams.max_input_seq_length = 1550 hparams.max_target_seq_length = 350 hparams.batch_size = 16 hparams.num_decoder_layers = 4 hparams.num_encoder_layers =...
HParams for training ASR model on Librispeech on TPU v2.
def transformer_librispeech_tpu_v2(): """HParams for training ASR model on Librispeech on TPU v2.""" hparams = transformer_librispeech_v2() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
Hparams for machine translation with ~1.1B parameters.
def transformer_tpu_1b(): """Hparams for machine translation with ~1.1B parameters.""" hparams = transformer_tpu() hparams.hidden_size = 2048 hparams.filter_size = 8192 hparams.num_hidden_layers = 8 # smaller batch size to avoid OOM hparams.batch_size = 1024 hparams.activation_dtype = "bfloat16" hpara...
HParams for training languagemodel_wikitext103_l4k.
def transformer_wikitext103_l4k_v0(): """HParams for training languagemodel_wikitext103_l4k.""" hparams = transformer_big() # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay"...
HParams for training languagemodel_wikitext103_l4k with memory.
def transformer_wikitext103_l4k_memory_v0(): """HParams for training languagemodel_wikitext103_l4k with memory.""" hparams = transformer_wikitext103_l4k_v0() hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = 64 hparams.split_targets_strided_training = True hparams.add_hparam("memor...
HParams for training languagemodel_wikitext103_l16k with memory.
def transformer_wikitext103_l16k_memory_v0(): """HParams for training languagemodel_wikitext103_l16k with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.max_length = 16384 hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = int( hparams.max_length / hparams....
HParams for training image_cifar10_plain_gen_flat_rev with memory.
def transformer_cifar10_memory_v0(): """HParams for training image_cifar10_plain_gen_flat_rev with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.num_hidden_layers = 6 hparams.max_length = 32 * 32 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int(...
HParams for training image_imagenet64_gen_flat_rev with memory.
def transformer_imagenet64_memory_v0(): """HParams for training image_imagenet64_gen_flat_rev with memory.""" hparams = transformer_cifar10_memory_v0() hparams.max_length = 64 * 64 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split...
Reshape input from 4D to 3D if necessary.
def maybe_reshape_4d_to_3d(x): """Reshape input from 4D to 3D if necessary.""" x_shape = common_layers.shape_list(x) is_4d = False if len(x_shape) == 4: x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) is_4d = True return x, x_shape, is_4d
Local 2d, self attention layer.
def local_attention_2d(x, hparams, attention_type="local_attention_2d"): """Local 2d, self attention layer.""" # self-attention with tf.variable_scope("local_2d_self_att"): y = common_attention.multihead_attention_2d( x, None, hparams.attention_key_channels or hparams.hidden_size, ...
Local within block self attention.
def local_within_block_attention(x, self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="VALID", kv_padding="VALID"): ...
Local 1d self attention.
def local_attention_1d(x, hparams, attention_type="local_unmasked", q_padding="VALID", kv_padding="VALID"): """Local 1d self attention.""" # self-attention x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_s...
Dilated attention with a masking strategy.
def get_dilated_1d_attention_mask( num_heads, block_size, num_blocks, memory_size, gap_size, name="dilated_mask"): """Dilated attention with a masking strategy.""" mask = np.ones((num_heads, block_size, 2*block_size), np.bool) # now going over every row to do the right assignment of # memory blocks...
Dilated 1d self attention.
def dilated_attention_1d(x, hparams, attention_type="masked_dilated_1d", q_padding="VALID", kv_padding="VALID", gap_size=2): """Dilated 1d self attention.""" # self-attention x, x_shape, is...
Local and global 1d self attention.
def local_global_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Local and global 1d self attention.""" with tf.variable_scope("self_local_global_att"): [x_global, x_lo...
Full self-attention layer.
def full_self_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Full self-attention layer.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) if self_attention_bias is not None: self...
Local 1d self attention.
def encdec_attention_1d(x, encoder_output, encoder_decoder_attention_bias, hparams): """Local 1d self attention.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) encoder_output, _, _ = maybe_reshape_4d_to_3d(encoder_output) with tf.variable...
Multi layer transformer.
def transformer_decoder_layers(inputs, encoder_output, num_layers, hparams, self_attention_bias=None, encoder_decoder_attention_bias=None, ...
Multi layer transformer encoder.
def transformer_encoder_layers(inputs, num_layers, hparams, attention_type=AttentionType.GLOBAL, self_attention_bias=None, q_padding="VALID", ...
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1]
def get_self_attention_bias(x): """Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] """ x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triang...
ffn layer transformer.
def ffn_layer(x, hparams, losses=None): """ffn layer transformer.""" with tf.variable_scope("ffn"): if hparams.ffn_layer == "none": return x if hparams.ffn_layer == "conv_hidden_relu": y = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, ...
Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D da...
def postprocess_image(x, rows, cols, hparams): """Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols...
Prepare encoder for images.
def prepare_encoder(inputs, hparams, attention_type="local_1d"): """Prepare encoder for images.""" x = prepare_image(inputs, hparams, name="enc_channels") # Add position signals. x = add_pos_signals(x, hparams, "enc_pos") x_shape = common_layers.shape_list(x) if attention_type == "local_1d": x = tf.resh...
Prepare decoder for images.
def prepare_decoder(targets, hparams): """Prepare decoder for images.""" targets_shape = common_layers.shape_list(targets) channels = hparams.num_channels curr_infer_length = None # during training, images are [batch, IMG_LEN, IMG_LEN, 3]. # At inference, they are [batch, curr_infer_length, 1, 1] if hpar...
Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number ...
def create_output(decoder_output, rows, cols, targets, hparams): """Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing numb...
Get separate embedding for each of the channels.
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"): """Get separate embedding for each of the channels.""" targets_split = tf.split(targets, io_depth, axis=3) rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, [256 * io_depth, hidden_size]...
Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation.
def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope("environment/simulate"): if action....
Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations.
def _reset_non_empty(self, indices): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ observ = tf.py_func( self._batch_env.reset, [indices], self.observ_dtype, ...
Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an i...
def include_revision(revision_num, skip_factor=1.1): """Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appprox...
Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings
def file_page_generator(my_file, max_page_size=2**28): """Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings """ ...
Extract the title from a page. Args: page: a string Returns: a string
def get_title(page): """Extract the title from a page. Args: page: a string Returns: a string """ start_pos = page.find("<title>") end_pos = page.find("</title>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<title>") return text_encoder.to_unicode_utf8(page[start_pos:end_p...
Extract the id from a page. Args: page: a string Returns: an integer
def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
Extract the revisions of a page. Args: page: a string Returns: a list of strings
def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1:...
Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error.
def parse_page(raw_page): """Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error. """ ret = {"title": get_title(r...
Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string
def maybe_copy_file_to_directory(source_filepath, target_directory): """Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string """ if not tf.gfile.Exists(target_directory): tf.logging...
Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): """Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings """ for remote_filepath in corpus_files: filepath = mayb...
Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string
def get_text(revision, strip=True): """Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string """ # text start tag looks like "<text ..otherstuff>" start_pos = revision.find("<text") assert start_pos != -1 end_tag_pos = revision.find(">", start_pos) ...
Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string
def _remove_curly_braces(text): """Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string """ current_pos = 0 depth = 0 ret = "" for match in re.finditer("[{}]", text): if depth == 0: ret += text[current_pos:...
Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string
def _remove_double_brackets(text): """Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string """ def replacement_fn(s): if ":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s...
Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string
def _remove_boring_lines(text): """Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string """ lines = text.split("\n") filtered = [line for line in lines if re.matc...
Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True): """Get or generate the vocabulary. Args:...
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_t...
def get_encoder_from_vocab(vocab_filepath): """Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder...
Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, ...
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): """Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source a...
Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "...
def introduce_errors(s, corruption_rate=3e-3, infill_marker="|?|", max_infill_len=8): """Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly...
Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are re...
def fast_match_sequences(a, b, a_start=0, a_end=None, b_start=0, b_end=None, min_match_length=3, max_recursion_depth=128): """Compute diffs bet...
Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0.
def begin(self): """Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. """ variable...
Creates a TimeStep with both rewards and actions as optional.
def create_time_step(cls, observation=None, done=False, raw_reward=None, processed_reward=None, action=None): """Creates a TimeStep with both rewards and actions as optional.""" return cls(observa...
Complete attention layer with preprocessing.
def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None): """Complete attention layer with preprocessing.""" separabilities = [hparams.separability, hparams.separability] if hparams.separability < 0: separabilities = [hparams.separability - 1, hparams.separability] targets_timed = common_...