repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
tensorflow/tensor2tensor
tensor2tensor/utils/usr_dir.py
import_usr_dir
def import_usr_dir(usr_dir): """Import module at usr_dir, if provided.""" if not usr_dir: return if usr_dir == INTERNAL_USR_DIR_PACKAGE: # The package has been installed with pip under this name for Cloud ML # Engine so just import it. importlib.import_module(INTERNAL_USR_DIR_PACKAGE) return dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip("/")) containing_dir, module_name = os.path.split(dir_path) tf.logging.info("Importing user module %s from path %s", module_name, containing_dir) sys.path.insert(0, containing_dir) importlib.import_module(module_name) sys.path.pop(0)
python
def import_usr_dir(usr_dir): """Import module at usr_dir, if provided.""" if not usr_dir: return if usr_dir == INTERNAL_USR_DIR_PACKAGE: # The package has been installed with pip under this name for Cloud ML # Engine so just import it. importlib.import_module(INTERNAL_USR_DIR_PACKAGE) return dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip("/")) containing_dir, module_name = os.path.split(dir_path) tf.logging.info("Importing user module %s from path %s", module_name, containing_dir) sys.path.insert(0, containing_dir) importlib.import_module(module_name) sys.path.pop(0)
[ "def", "import_usr_dir", "(", "usr_dir", ")", ":", "if", "not", "usr_dir", ":", "return", "if", "usr_dir", "==", "INTERNAL_USR_DIR_PACKAGE", ":", "# The package has been installed with pip under this name for Cloud ML", "# Engine so just import it.", "importlib", ".", "import_module", "(", "INTERNAL_USR_DIR_PACKAGE", ")", "return", "dir_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "usr_dir", ")", ".", "rstrip", "(", "\"/\"", ")", ")", "containing_dir", ",", "module_name", "=", "os", ".", "path", ".", "split", "(", "dir_path", ")", "tf", ".", "logging", ".", "info", "(", "\"Importing user module %s from path %s\"", ",", "module_name", ",", "containing_dir", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "containing_dir", ")", "importlib", ".", "import_module", "(", "module_name", ")", "sys", ".", "path", ".", "pop", "(", "0", ")" ]
Import module at usr_dir, if provided.
[ "Import", "module", "at", "usr_dir", "if", "provided", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/usr_dir.py#L30-L46
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_hparams.py
basic_params1
def basic_params1(): """A set of basic hyperparameters.""" return hparam.HParams( # If the problem consists of variable-length sequences # (see problem.batch_size_means_tokens()), then this is the number # of tokens per batch per GPU or per TPU core. Otherwise, this is # the number of examples per GPU or per TPU core. batch_size=4096, batch_shuffle_size=512, # If True, then if the features are of variable length, the batch_size is # used as the actual batch size (and not tokens per batch). use_fixed_batch_size=False, num_hidden_layers=4, kernel_height=3, kernel_width=1, hidden_size=64, compress_steps=0, # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. dropout=0.2, clip_grad_norm=2.0, grad_noise_scale=0.0, summarize_grads=False, # Flag for whether mlperf mode is on mlperf_mode=False, # Whether to log the name and size of every variable summarize_vars=False, initializer="orthogonal", initializer_gain=1.5, label_smoothing=0.1, optimizer="adam", optimizer_adam_epsilon=1e-6, optimizer_adam_beta1=0.85, optimizer_adam_beta2=0.997, optimizer_momentum_momentum=0.9, optimizer_momentum_nesterov=False, optimizer_adafactor_beta1=0.0, optimizer_adafactor_beta2=0.999, optimizer_adafactor_factored=True, optimizer_adafactor_decay_type="pow", optimizer_adafactor_memory_exponent=0.8, optimizer_adafactor_clipping_threshold=1.0, optimizer_adafactor_multiply_by_parameter_scale=True, # Number of accumulating steps for multi step optimizers. optimizer_multistep_accumulate_steps=0, # Loss scaling used. # Generally only necessary with mixed precision training. # Mixed precision training only supports exponential scaling currently # To disable the scaler, see to 0/False mixed_precision_optimizer_loss_scaler="exponential", # Determines the initial loss scaling value for mixed precision mixed_precision_optimizer_init_loss_scale=2**15, # Whether to zero gradients that were not computed, so that the # appropriate slots are created. Useful for sharing checkpoints between # models with different sets of heads. optimizer_zero_grads=False, weight_decay=1e-6, weight_noise=0.0, # Defines the learning rate as a product of named functions. # Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS # e.g. "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size" learning_rate_schedule="legacy", learning_rate_constant=1.0, # If learning_rate_schedule=="legacy", # then we specify decay scheme here. Warmup is always exponential, # except with "noam" learning rate decay scheme. # see optimize.legacy_learning_rate_schedule() # TODO(noam): migrate everyone away from this. learning_rate_decay_scheme="none", # decay_steps and decay_staircase for learning_rate_decay_scheme=="exp" learning_rate_decay_steps=5000, learning_rate_decay_staircase=False, learning_rate_minimum=None, learning_rate_decay_rate=1.0, learning_rate_warmup_steps=100, learning_rate_cosine_cycle_steps=250000, learning_rate=0.1, sampling_method="argmax", # "argmax" or "random" sampling_temp=1.0, # temperature for sampling sampling_keep_top_k=-1, # If >0, ignore all but the top k logits # expand the logits a piece at a time - saves memory. factored_logits=False, multiply_embedding_mode="sqrt_depth", # Parameters related to mixtures of experts. moe_hidden_sizes="2048", # hidden layer sizes (comma-separated) moe_num_experts=64, # number of experts per layer moe_k=2, # how many experts to use for each batch element moe_loss_coef=1e-2, # Sequences of operations to perform on layer input and layer output. # Used by common_layers.layer_preprocess, common_layers.layer_postprocess # Each character represents an operation: # none: no preprocessing # d: apply dropout # n: apply normalization (see norm_type and norm_epsilon) # a: add layer input (residual connection - only during postprocess) # The special string "none" is used instead of the empty string # to indicate no pre/postprocessing, since the empty string causes # trouble for hyperparameter tuning. # TODO(noam): The current settings ("", "dan") are the published version # of the transformer. ("n", "da") seems better for harder-to-learn # models, so it should probably be the default. layer_preprocess_sequence="none", layer_postprocess_sequence="dan", # dropout rate to use during layer_preprocess and layer_postprocess layer_prepostprocess_dropout=0.1, # broadcast dimensions for layer_prepostprocess_dropout # a comma-separated list of integers. # see common_layers.dropout_with_broadcast_dims() # Change this to "1" to save memory. layer_prepostprocess_dropout_broadcast_dims="", # dropout some symbols (set them to 0) before embedding. symbol_dropout=0.0, # What type of normalization to use norm_type="layer", # "batch", layer", "noam", "none". # epsilon parameter to normalization function norm_epsilon=1e-6, # pad vocabularies so that this value divides the vocabulary size. vocab_divisor=1, # During training, we drop sequences whose inputs and targets are shorter # than min_length min_length=0, # During training, we drop sequences whose inputs or targets are longer # than max_length. # If max_length==0, we use hparams.batch_size instead. max_length=0, # Pack examples on the fly. pack_dataset=False, # Use custom ops not included in standard tensorflow. use_custom_ops=True, # Split targets on the first axis into chunks of this length. split_targets_chunk_length=0, split_targets_max_chunks=100, split_targets_strided_training=False, # Maximum length in the smallest length bucket. Setting this # flag too high will result in wasteful padding of short # sequences. Due to some (hopefully) temporary hacks in the # data reading and batching code, setting this flag too low # results in a very long batch-shuffling queue. # TODO(noam): change this once the Datasets API changes. min_length_bucket=8, # This flag controls the number of length buckets in the data # reader. The buckets have maximum lengths from # min_bucket_length to (max_length or batch_size), increasing # (approximately) by factors of length_bucket_step. length_bucket_step=1.1, # If set to True, drop sequences longer than max_length during eval. # This affects the validity of the evaluation metrics. eval_drop_long_sequences=False, # If True, run the model autoregressively instead of teacher-forcing # during eval eval_run_autoregressive=False, # (For features with symbol modality) If True, share all of the # input embeddings, target embeddings, and softmax weights. shared_embedding_and_softmax_weights=False, # (For features with symbol modality) If True, share the input embeddings # and target embeddings. shared_embedding=False, # (For features with symbol modality) Number to shard embeddings by. symbol_modality_num_shards=1, # Feature transformations are optional dictionaries comprising key-value # pairs of a feature name (str) and its transformation (function). If not # specified, T2TModel applies a default transformation according to the # feature's modality. Bottom is applicable to all features; loss, top, and # weights_fn are only applicable to target features. # TODO(trandustin): `name` is an optional hparam for legacy reasons, # defining variable scope names. Remove this hparam in the future. bottom={}, loss={}, name={}, top={}, weights_fn={}, # The maximum length of "input" sequence. # Sequences longer than this value will be truncated. 0 or negative values # mean there is no maximum or truncation. # You can change this behavior by overriding preprocess_example() method # in your problem class. max_input_seq_length=0, # The maximum length of "target" sequence. # Sequences longer than this value will be truncated. 0 or negative values # mean there is no maximum or truncation. # You can change this behavior by overriding preprocess_example() method # in your problem class. max_target_seq_length=0, # if nonzero, we split the target sequences on example read. # This is for use with language modeling problems with fixed length # examples. e.g. The examples may be written with length 65536, but we # want to split each example into 64 examples of length 1024. split_to_length=0, # Video settings: how many frames to batch on input and targets. video_num_input_frames=1, video_num_target_frames=1, # This flag allows us to optionally treat a seq-to-seq problem # as a language model. Legal values are: # # "none" - Do not prepend the inputs to the targets. # "prepend_inputs_masked_attention" # replace "targets" in preprocessing with # tf.concat([inputs, [0], targets], axis=1) # i.e. we prepend the inputs to the targets with a single # padding token in between. Use masked self-attention on the # entire resulting sequence. During training, we compute losses on # the combined sequence. During eval, we compute the metrics # on only the targets portion. # "prepend_inputs_full_attention" # similar to the previous option except that each # position in the inputs portion can see the # entire inputs portion. This removes the challenge of # autoregressively predicting the inputs portion. prepend_mode="none", # Scheduled sampling is interesting for auto-regressive models. # It runs an additional step using the generated output as autoregressive # targets, which can improve the models inference results later. The # parameter scheduled_sampling_prob determines with what probability # will such additional step be run. It's turned off (0.0) by default. # This probability will exponentially warm up for the number of # steps determined by scheduled_sampling_warmup_steps. # The tensor used for the n-th pass will consist of outputs from # the (n-1)-th pass mixed with gold truth, with the proportion of gold # determined by scheduled_sampling_gold_mixin_prob. Control the number # of passes with scheduled_sampling_num_passes. scheduled_sampling_prob=0.0, scheduled_sampling_warmup_steps=50000, scheduled_sampling_gold_mixin_prob=0.5, # TODO(duckworthd): Uncomment when we can ascertain why adding an # extra field to HParam causes test failures. # scheduled_sampling_num_passes=1, # This setting controls whether to copy variables around in a daisy chain # (if true) or leave their placement to TensorFlow. It only affects multi # device training and mostly should be turned on for performance. One # exception are recurrent models: with dynamic loops it must be off. daisy_chain_variables=True, # If True in PREDICT mode, then last-position-only optimizations are not # used. force_full_predict=False, # Set this for pure model parallelism. There is only one data shard. no_data_parallelism=False, # dtype used for activations. - "float32" or "bfloat16" # activation_dtype="bfloat16" currently only works on TPU. # It lowers activation-memory usage # and does not appear to affect quality. # You can train on TPU with activation_dtype="bfloat16" and evaluate # on CPU/GPU with activation_dtype="float32" activation_dtype="float32", # dtype used for parameters: "float32" or "bfloat16" # bfloat16 currently only works with optimizer="adafactor". # The savings in memory allow for training larger models. # Weights are encoded as (w*128)^8, using pseudostochastic # roundoff. Initial experiments show that model quality is similar # to baseline for about 3M training steps, but worse thereafter. weight_dtype="float32", # Directory containing a checkpoint for a pretrained model. This will only # be used if a new run is being started. Parameters not found in the # pretrained model will be randomly initialized. Superfluous parameters in # the pretrained model will be ignored. pretrained_model_dir="", # Threshold used for two cases: the primary task probability for the # constant mixing schedule, and the exponential schedule limit for when # mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop # at 20-80 mixing for the primary-others mixing case.) multiproblem_schedule_threshold=0.5, # For more than 2 tasks, we may want to specify per-task thresholds here. # In that case, this needs to be a string with as many floating point # numbers as the number of tasks in the multi-problem. These numbers # are later normalized to add up to 1 and taken as probabilities for # each task. This enforces a constant mixing schedule and if this is # empty then the threshold from above is used for the first task and # the other tasks get the remaining probability split uniformly. multiproblem_per_task_threshold="", # The number of examples at which the proportion of the mixed in datasets # is multiproblem_schedule_threshold multiproblem_schedule_max_examples=1e7, # When training multiproblems, we can mix the data according to different # schedules. Example: a constant schedule mixing 20-80 between the primary # and other tasks. # A list of supported schedules can be found in # `data_generators.multi_problem.py`. multiproblem_mixing_schedule="constant", # A boolean that decides whether input sequence losses and target label # losses in classification problems should be reweighted. multiproblem_reweight_label_loss=False, # How much weight the targets in classification problems receive. Inputs # receive 1 minus this weight. multiproblem_label_weight=0.5, # Hyperparameters for relative attention. # The maximum relative positional distance to learn an embedding for. max_relative_position=0, # If heads share the same relative embedding. heads_share_relative_embedding=False, # If relative embedding terms are added to values too. add_relative_to_values=False, # If enable the host_call which is executed every training step. # There could be a performance drop if host_call function is slow and # cannot keep up with the TPU-side computation. tpu_enable_host_call=False, # Pad batch dim of inputs to nearest multiple of batch multiple. pad_batch=False, # When true, do not evaluate on the language model data when running the # multiproblem since it can take a while. If False, set eval_steps to # something large like 6000 or 10000. multiproblem_target_eval_only=False, # Max out the vocab size to a power of 2 for efficiency and to reserve # extra space in the vocabulary for new task ids and label classes. multiproblem_vocab_size=-1, # When using multiproblem with generation tasks, need to truncate the # inputs and targets manually before concatenating them. multiproblem_max_input_length=-1, multiproblem_max_target_length=-1, # If positive, makes training targets fixed-length in MultiProblem. multiproblem_fixed_train_length=-1, # Load weights from a second model. For instance, when using # pre-trained weights, you might want to initialize the encoder # and decoder by loading different models. warm_start_from_second="", # Area attention hyper parameters area_value_mode="none", area_key_mode="none", # Using area attention for the number of layers from the bottom num_area_layers=0, max_area_width=1, max_area_height=1, memory_height=1 )
python
def basic_params1(): """A set of basic hyperparameters.""" return hparam.HParams( # If the problem consists of variable-length sequences # (see problem.batch_size_means_tokens()), then this is the number # of tokens per batch per GPU or per TPU core. Otherwise, this is # the number of examples per GPU or per TPU core. batch_size=4096, batch_shuffle_size=512, # If True, then if the features are of variable length, the batch_size is # used as the actual batch size (and not tokens per batch). use_fixed_batch_size=False, num_hidden_layers=4, kernel_height=3, kernel_width=1, hidden_size=64, compress_steps=0, # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. dropout=0.2, clip_grad_norm=2.0, grad_noise_scale=0.0, summarize_grads=False, # Flag for whether mlperf mode is on mlperf_mode=False, # Whether to log the name and size of every variable summarize_vars=False, initializer="orthogonal", initializer_gain=1.5, label_smoothing=0.1, optimizer="adam", optimizer_adam_epsilon=1e-6, optimizer_adam_beta1=0.85, optimizer_adam_beta2=0.997, optimizer_momentum_momentum=0.9, optimizer_momentum_nesterov=False, optimizer_adafactor_beta1=0.0, optimizer_adafactor_beta2=0.999, optimizer_adafactor_factored=True, optimizer_adafactor_decay_type="pow", optimizer_adafactor_memory_exponent=0.8, optimizer_adafactor_clipping_threshold=1.0, optimizer_adafactor_multiply_by_parameter_scale=True, # Number of accumulating steps for multi step optimizers. optimizer_multistep_accumulate_steps=0, # Loss scaling used. # Generally only necessary with mixed precision training. # Mixed precision training only supports exponential scaling currently # To disable the scaler, see to 0/False mixed_precision_optimizer_loss_scaler="exponential", # Determines the initial loss scaling value for mixed precision mixed_precision_optimizer_init_loss_scale=2**15, # Whether to zero gradients that were not computed, so that the # appropriate slots are created. Useful for sharing checkpoints between # models with different sets of heads. optimizer_zero_grads=False, weight_decay=1e-6, weight_noise=0.0, # Defines the learning rate as a product of named functions. # Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS # e.g. "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size" learning_rate_schedule="legacy", learning_rate_constant=1.0, # If learning_rate_schedule=="legacy", # then we specify decay scheme here. Warmup is always exponential, # except with "noam" learning rate decay scheme. # see optimize.legacy_learning_rate_schedule() # TODO(noam): migrate everyone away from this. learning_rate_decay_scheme="none", # decay_steps and decay_staircase for learning_rate_decay_scheme=="exp" learning_rate_decay_steps=5000, learning_rate_decay_staircase=False, learning_rate_minimum=None, learning_rate_decay_rate=1.0, learning_rate_warmup_steps=100, learning_rate_cosine_cycle_steps=250000, learning_rate=0.1, sampling_method="argmax", # "argmax" or "random" sampling_temp=1.0, # temperature for sampling sampling_keep_top_k=-1, # If >0, ignore all but the top k logits # expand the logits a piece at a time - saves memory. factored_logits=False, multiply_embedding_mode="sqrt_depth", # Parameters related to mixtures of experts. moe_hidden_sizes="2048", # hidden layer sizes (comma-separated) moe_num_experts=64, # number of experts per layer moe_k=2, # how many experts to use for each batch element moe_loss_coef=1e-2, # Sequences of operations to perform on layer input and layer output. # Used by common_layers.layer_preprocess, common_layers.layer_postprocess # Each character represents an operation: # none: no preprocessing # d: apply dropout # n: apply normalization (see norm_type and norm_epsilon) # a: add layer input (residual connection - only during postprocess) # The special string "none" is used instead of the empty string # to indicate no pre/postprocessing, since the empty string causes # trouble for hyperparameter tuning. # TODO(noam): The current settings ("", "dan") are the published version # of the transformer. ("n", "da") seems better for harder-to-learn # models, so it should probably be the default. layer_preprocess_sequence="none", layer_postprocess_sequence="dan", # dropout rate to use during layer_preprocess and layer_postprocess layer_prepostprocess_dropout=0.1, # broadcast dimensions for layer_prepostprocess_dropout # a comma-separated list of integers. # see common_layers.dropout_with_broadcast_dims() # Change this to "1" to save memory. layer_prepostprocess_dropout_broadcast_dims="", # dropout some symbols (set them to 0) before embedding. symbol_dropout=0.0, # What type of normalization to use norm_type="layer", # "batch", layer", "noam", "none". # epsilon parameter to normalization function norm_epsilon=1e-6, # pad vocabularies so that this value divides the vocabulary size. vocab_divisor=1, # During training, we drop sequences whose inputs and targets are shorter # than min_length min_length=0, # During training, we drop sequences whose inputs or targets are longer # than max_length. # If max_length==0, we use hparams.batch_size instead. max_length=0, # Pack examples on the fly. pack_dataset=False, # Use custom ops not included in standard tensorflow. use_custom_ops=True, # Split targets on the first axis into chunks of this length. split_targets_chunk_length=0, split_targets_max_chunks=100, split_targets_strided_training=False, # Maximum length in the smallest length bucket. Setting this # flag too high will result in wasteful padding of short # sequences. Due to some (hopefully) temporary hacks in the # data reading and batching code, setting this flag too low # results in a very long batch-shuffling queue. # TODO(noam): change this once the Datasets API changes. min_length_bucket=8, # This flag controls the number of length buckets in the data # reader. The buckets have maximum lengths from # min_bucket_length to (max_length or batch_size), increasing # (approximately) by factors of length_bucket_step. length_bucket_step=1.1, # If set to True, drop sequences longer than max_length during eval. # This affects the validity of the evaluation metrics. eval_drop_long_sequences=False, # If True, run the model autoregressively instead of teacher-forcing # during eval eval_run_autoregressive=False, # (For features with symbol modality) If True, share all of the # input embeddings, target embeddings, and softmax weights. shared_embedding_and_softmax_weights=False, # (For features with symbol modality) If True, share the input embeddings # and target embeddings. shared_embedding=False, # (For features with symbol modality) Number to shard embeddings by. symbol_modality_num_shards=1, # Feature transformations are optional dictionaries comprising key-value # pairs of a feature name (str) and its transformation (function). If not # specified, T2TModel applies a default transformation according to the # feature's modality. Bottom is applicable to all features; loss, top, and # weights_fn are only applicable to target features. # TODO(trandustin): `name` is an optional hparam for legacy reasons, # defining variable scope names. Remove this hparam in the future. bottom={}, loss={}, name={}, top={}, weights_fn={}, # The maximum length of "input" sequence. # Sequences longer than this value will be truncated. 0 or negative values # mean there is no maximum or truncation. # You can change this behavior by overriding preprocess_example() method # in your problem class. max_input_seq_length=0, # The maximum length of "target" sequence. # Sequences longer than this value will be truncated. 0 or negative values # mean there is no maximum or truncation. # You can change this behavior by overriding preprocess_example() method # in your problem class. max_target_seq_length=0, # if nonzero, we split the target sequences on example read. # This is for use with language modeling problems with fixed length # examples. e.g. The examples may be written with length 65536, but we # want to split each example into 64 examples of length 1024. split_to_length=0, # Video settings: how many frames to batch on input and targets. video_num_input_frames=1, video_num_target_frames=1, # This flag allows us to optionally treat a seq-to-seq problem # as a language model. Legal values are: # # "none" - Do not prepend the inputs to the targets. # "prepend_inputs_masked_attention" # replace "targets" in preprocessing with # tf.concat([inputs, [0], targets], axis=1) # i.e. we prepend the inputs to the targets with a single # padding token in between. Use masked self-attention on the # entire resulting sequence. During training, we compute losses on # the combined sequence. During eval, we compute the metrics # on only the targets portion. # "prepend_inputs_full_attention" # similar to the previous option except that each # position in the inputs portion can see the # entire inputs portion. This removes the challenge of # autoregressively predicting the inputs portion. prepend_mode="none", # Scheduled sampling is interesting for auto-regressive models. # It runs an additional step using the generated output as autoregressive # targets, which can improve the models inference results later. The # parameter scheduled_sampling_prob determines with what probability # will such additional step be run. It's turned off (0.0) by default. # This probability will exponentially warm up for the number of # steps determined by scheduled_sampling_warmup_steps. # The tensor used for the n-th pass will consist of outputs from # the (n-1)-th pass mixed with gold truth, with the proportion of gold # determined by scheduled_sampling_gold_mixin_prob. Control the number # of passes with scheduled_sampling_num_passes. scheduled_sampling_prob=0.0, scheduled_sampling_warmup_steps=50000, scheduled_sampling_gold_mixin_prob=0.5, # TODO(duckworthd): Uncomment when we can ascertain why adding an # extra field to HParam causes test failures. # scheduled_sampling_num_passes=1, # This setting controls whether to copy variables around in a daisy chain # (if true) or leave their placement to TensorFlow. It only affects multi # device training and mostly should be turned on for performance. One # exception are recurrent models: with dynamic loops it must be off. daisy_chain_variables=True, # If True in PREDICT mode, then last-position-only optimizations are not # used. force_full_predict=False, # Set this for pure model parallelism. There is only one data shard. no_data_parallelism=False, # dtype used for activations. - "float32" or "bfloat16" # activation_dtype="bfloat16" currently only works on TPU. # It lowers activation-memory usage # and does not appear to affect quality. # You can train on TPU with activation_dtype="bfloat16" and evaluate # on CPU/GPU with activation_dtype="float32" activation_dtype="float32", # dtype used for parameters: "float32" or "bfloat16" # bfloat16 currently only works with optimizer="adafactor". # The savings in memory allow for training larger models. # Weights are encoded as (w*128)^8, using pseudostochastic # roundoff. Initial experiments show that model quality is similar # to baseline for about 3M training steps, but worse thereafter. weight_dtype="float32", # Directory containing a checkpoint for a pretrained model. This will only # be used if a new run is being started. Parameters not found in the # pretrained model will be randomly initialized. Superfluous parameters in # the pretrained model will be ignored. pretrained_model_dir="", # Threshold used for two cases: the primary task probability for the # constant mixing schedule, and the exponential schedule limit for when # mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop # at 20-80 mixing for the primary-others mixing case.) multiproblem_schedule_threshold=0.5, # For more than 2 tasks, we may want to specify per-task thresholds here. # In that case, this needs to be a string with as many floating point # numbers as the number of tasks in the multi-problem. These numbers # are later normalized to add up to 1 and taken as probabilities for # each task. This enforces a constant mixing schedule and if this is # empty then the threshold from above is used for the first task and # the other tasks get the remaining probability split uniformly. multiproblem_per_task_threshold="", # The number of examples at which the proportion of the mixed in datasets # is multiproblem_schedule_threshold multiproblem_schedule_max_examples=1e7, # When training multiproblems, we can mix the data according to different # schedules. Example: a constant schedule mixing 20-80 between the primary # and other tasks. # A list of supported schedules can be found in # `data_generators.multi_problem.py`. multiproblem_mixing_schedule="constant", # A boolean that decides whether input sequence losses and target label # losses in classification problems should be reweighted. multiproblem_reweight_label_loss=False, # How much weight the targets in classification problems receive. Inputs # receive 1 minus this weight. multiproblem_label_weight=0.5, # Hyperparameters for relative attention. # The maximum relative positional distance to learn an embedding for. max_relative_position=0, # If heads share the same relative embedding. heads_share_relative_embedding=False, # If relative embedding terms are added to values too. add_relative_to_values=False, # If enable the host_call which is executed every training step. # There could be a performance drop if host_call function is slow and # cannot keep up with the TPU-side computation. tpu_enable_host_call=False, # Pad batch dim of inputs to nearest multiple of batch multiple. pad_batch=False, # When true, do not evaluate on the language model data when running the # multiproblem since it can take a while. If False, set eval_steps to # something large like 6000 or 10000. multiproblem_target_eval_only=False, # Max out the vocab size to a power of 2 for efficiency and to reserve # extra space in the vocabulary for new task ids and label classes. multiproblem_vocab_size=-1, # When using multiproblem with generation tasks, need to truncate the # inputs and targets manually before concatenating them. multiproblem_max_input_length=-1, multiproblem_max_target_length=-1, # If positive, makes training targets fixed-length in MultiProblem. multiproblem_fixed_train_length=-1, # Load weights from a second model. For instance, when using # pre-trained weights, you might want to initialize the encoder # and decoder by loading different models. warm_start_from_second="", # Area attention hyper parameters area_value_mode="none", area_key_mode="none", # Using area attention for the number of layers from the bottom num_area_layers=0, max_area_width=1, max_area_height=1, memory_height=1 )
[ "def", "basic_params1", "(", ")", ":", "return", "hparam", ".", "HParams", "(", "# If the problem consists of variable-length sequences", "# (see problem.batch_size_means_tokens()), then this is the number", "# of tokens per batch per GPU or per TPU core. Otherwise, this is", "# the number of examples per GPU or per TPU core.", "batch_size", "=", "4096", ",", "batch_shuffle_size", "=", "512", ",", "# If True, then if the features are of variable length, the batch_size is", "# used as the actual batch size (and not tokens per batch).", "use_fixed_batch_size", "=", "False", ",", "num_hidden_layers", "=", "4", ",", "kernel_height", "=", "3", ",", "kernel_width", "=", "1", ",", "hidden_size", "=", "64", ",", "compress_steps", "=", "0", ",", "# All hyperparameters ending in \"dropout\" are automatically set to 0.0", "# when not in training mode.", "dropout", "=", "0.2", ",", "clip_grad_norm", "=", "2.0", ",", "grad_noise_scale", "=", "0.0", ",", "summarize_grads", "=", "False", ",", "# Flag for whether mlperf mode is on", "mlperf_mode", "=", "False", ",", "# Whether to log the name and size of every variable", "summarize_vars", "=", "False", ",", "initializer", "=", "\"orthogonal\"", ",", "initializer_gain", "=", "1.5", ",", "label_smoothing", "=", "0.1", ",", "optimizer", "=", "\"adam\"", ",", "optimizer_adam_epsilon", "=", "1e-6", ",", "optimizer_adam_beta1", "=", "0.85", ",", "optimizer_adam_beta2", "=", "0.997", ",", "optimizer_momentum_momentum", "=", "0.9", ",", "optimizer_momentum_nesterov", "=", "False", ",", "optimizer_adafactor_beta1", "=", "0.0", ",", "optimizer_adafactor_beta2", "=", "0.999", ",", "optimizer_adafactor_factored", "=", "True", ",", "optimizer_adafactor_decay_type", "=", "\"pow\"", ",", "optimizer_adafactor_memory_exponent", "=", "0.8", ",", "optimizer_adafactor_clipping_threshold", "=", "1.0", ",", "optimizer_adafactor_multiply_by_parameter_scale", "=", "True", ",", "# Number of accumulating steps for multi step optimizers.", "optimizer_multistep_accumulate_steps", "=", "0", ",", "# Loss scaling used.", "# Generally only necessary with mixed precision training.", "# Mixed precision training only supports exponential scaling currently", "# To disable the scaler, see to 0/False", "mixed_precision_optimizer_loss_scaler", "=", "\"exponential\"", ",", "# Determines the initial loss scaling value for mixed precision", "mixed_precision_optimizer_init_loss_scale", "=", "2", "**", "15", ",", "# Whether to zero gradients that were not computed, so that the", "# appropriate slots are created. Useful for sharing checkpoints between", "# models with different sets of heads.", "optimizer_zero_grads", "=", "False", ",", "weight_decay", "=", "1e-6", ",", "weight_noise", "=", "0.0", ",", "# Defines the learning rate as a product of named functions.", "# Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS", "# e.g. \"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\"", "learning_rate_schedule", "=", "\"legacy\"", ",", "learning_rate_constant", "=", "1.0", ",", "# If learning_rate_schedule==\"legacy\",", "# then we specify decay scheme here. Warmup is always exponential,", "# except with \"noam\" learning rate decay scheme.", "# see optimize.legacy_learning_rate_schedule()", "# TODO(noam): migrate everyone away from this.", "learning_rate_decay_scheme", "=", "\"none\"", ",", "# decay_steps and decay_staircase for learning_rate_decay_scheme==\"exp\"", "learning_rate_decay_steps", "=", "5000", ",", "learning_rate_decay_staircase", "=", "False", ",", "learning_rate_minimum", "=", "None", ",", "learning_rate_decay_rate", "=", "1.0", ",", "learning_rate_warmup_steps", "=", "100", ",", "learning_rate_cosine_cycle_steps", "=", "250000", ",", "learning_rate", "=", "0.1", ",", "sampling_method", "=", "\"argmax\"", ",", "# \"argmax\" or \"random\"", "sampling_temp", "=", "1.0", ",", "# temperature for sampling", "sampling_keep_top_k", "=", "-", "1", ",", "# If >0, ignore all but the top k logits", "# expand the logits a piece at a time - saves memory.", "factored_logits", "=", "False", ",", "multiply_embedding_mode", "=", "\"sqrt_depth\"", ",", "# Parameters related to mixtures of experts.", "moe_hidden_sizes", "=", "\"2048\"", ",", "# hidden layer sizes (comma-separated)", "moe_num_experts", "=", "64", ",", "# number of experts per layer", "moe_k", "=", "2", ",", "# how many experts to use for each batch element", "moe_loss_coef", "=", "1e-2", ",", "# Sequences of operations to perform on layer input and layer output.", "# Used by common_layers.layer_preprocess, common_layers.layer_postprocess", "# Each character represents an operation:", "# none: no preprocessing", "# d: apply dropout", "# n: apply normalization (see norm_type and norm_epsilon)", "# a: add layer input (residual connection - only during postprocess)", "# The special string \"none\" is used instead of the empty string", "# to indicate no pre/postprocessing, since the empty string causes", "# trouble for hyperparameter tuning.", "# TODO(noam): The current settings (\"\", \"dan\") are the published version", "# of the transformer. (\"n\", \"da\") seems better for harder-to-learn", "# models, so it should probably be the default.", "layer_preprocess_sequence", "=", "\"none\"", ",", "layer_postprocess_sequence", "=", "\"dan\"", ",", "# dropout rate to use during layer_preprocess and layer_postprocess", "layer_prepostprocess_dropout", "=", "0.1", ",", "# broadcast dimensions for layer_prepostprocess_dropout", "# a comma-separated list of integers.", "# see common_layers.dropout_with_broadcast_dims()", "# Change this to \"1\" to save memory.", "layer_prepostprocess_dropout_broadcast_dims", "=", "\"\"", ",", "# dropout some symbols (set them to 0) before embedding.", "symbol_dropout", "=", "0.0", ",", "# What type of normalization to use", "norm_type", "=", "\"layer\"", ",", "# \"batch\", layer\", \"noam\", \"none\".", "# epsilon parameter to normalization function", "norm_epsilon", "=", "1e-6", ",", "# pad vocabularies so that this value divides the vocabulary size.", "vocab_divisor", "=", "1", ",", "# During training, we drop sequences whose inputs and targets are shorter", "# than min_length", "min_length", "=", "0", ",", "# During training, we drop sequences whose inputs or targets are longer", "# than max_length.", "# If max_length==0, we use hparams.batch_size instead.", "max_length", "=", "0", ",", "# Pack examples on the fly.", "pack_dataset", "=", "False", ",", "# Use custom ops not included in standard tensorflow.", "use_custom_ops", "=", "True", ",", "# Split targets on the first axis into chunks of this length.", "split_targets_chunk_length", "=", "0", ",", "split_targets_max_chunks", "=", "100", ",", "split_targets_strided_training", "=", "False", ",", "# Maximum length in the smallest length bucket. Setting this", "# flag too high will result in wasteful padding of short", "# sequences. Due to some (hopefully) temporary hacks in the", "# data reading and batching code, setting this flag too low", "# results in a very long batch-shuffling queue.", "# TODO(noam): change this once the Datasets API changes.", "min_length_bucket", "=", "8", ",", "# This flag controls the number of length buckets in the data", "# reader. The buckets have maximum lengths from", "# min_bucket_length to (max_length or batch_size), increasing", "# (approximately) by factors of length_bucket_step.", "length_bucket_step", "=", "1.1", ",", "# If set to True, drop sequences longer than max_length during eval.", "# This affects the validity of the evaluation metrics.", "eval_drop_long_sequences", "=", "False", ",", "# If True, run the model autoregressively instead of teacher-forcing", "# during eval", "eval_run_autoregressive", "=", "False", ",", "# (For features with symbol modality) If True, share all of the", "# input embeddings, target embeddings, and softmax weights.", "shared_embedding_and_softmax_weights", "=", "False", ",", "# (For features with symbol modality) If True, share the input embeddings", "# and target embeddings.", "shared_embedding", "=", "False", ",", "# (For features with symbol modality) Number to shard embeddings by.", "symbol_modality_num_shards", "=", "1", ",", "# Feature transformations are optional dictionaries comprising key-value", "# pairs of a feature name (str) and its transformation (function). If not", "# specified, T2TModel applies a default transformation according to the", "# feature's modality. Bottom is applicable to all features; loss, top, and", "# weights_fn are only applicable to target features.", "# TODO(trandustin): `name` is an optional hparam for legacy reasons,", "# defining variable scope names. Remove this hparam in the future.", "bottom", "=", "{", "}", ",", "loss", "=", "{", "}", ",", "name", "=", "{", "}", ",", "top", "=", "{", "}", ",", "weights_fn", "=", "{", "}", ",", "# The maximum length of \"input\" sequence.", "# Sequences longer than this value will be truncated. 0 or negative values", "# mean there is no maximum or truncation.", "# You can change this behavior by overriding preprocess_example() method", "# in your problem class.", "max_input_seq_length", "=", "0", ",", "# The maximum length of \"target\" sequence.", "# Sequences longer than this value will be truncated. 0 or negative values", "# mean there is no maximum or truncation.", "# You can change this behavior by overriding preprocess_example() method", "# in your problem class.", "max_target_seq_length", "=", "0", ",", "# if nonzero, we split the target sequences on example read.", "# This is for use with language modeling problems with fixed length", "# examples. e.g. The examples may be written with length 65536, but we", "# want to split each example into 64 examples of length 1024.", "split_to_length", "=", "0", ",", "# Video settings: how many frames to batch on input and targets.", "video_num_input_frames", "=", "1", ",", "video_num_target_frames", "=", "1", ",", "# This flag allows us to optionally treat a seq-to-seq problem", "# as a language model. Legal values are:", "#", "# \"none\" - Do not prepend the inputs to the targets.", "# \"prepend_inputs_masked_attention\"", "# replace \"targets\" in preprocessing with", "# tf.concat([inputs, [0], targets], axis=1)", "# i.e. we prepend the inputs to the targets with a single", "# padding token in between. Use masked self-attention on the", "# entire resulting sequence. During training, we compute losses on", "# the combined sequence. During eval, we compute the metrics", "# on only the targets portion.", "# \"prepend_inputs_full_attention\"", "# similar to the previous option except that each", "# position in the inputs portion can see the", "# entire inputs portion. This removes the challenge of", "# autoregressively predicting the inputs portion.", "prepend_mode", "=", "\"none\"", ",", "# Scheduled sampling is interesting for auto-regressive models.", "# It runs an additional step using the generated output as autoregressive", "# targets, which can improve the models inference results later. The", "# parameter scheduled_sampling_prob determines with what probability", "# will such additional step be run. It's turned off (0.0) by default.", "# This probability will exponentially warm up for the number of", "# steps determined by scheduled_sampling_warmup_steps.", "# The tensor used for the n-th pass will consist of outputs from", "# the (n-1)-th pass mixed with gold truth, with the proportion of gold", "# determined by scheduled_sampling_gold_mixin_prob. Control the number", "# of passes with scheduled_sampling_num_passes.", "scheduled_sampling_prob", "=", "0.0", ",", "scheduled_sampling_warmup_steps", "=", "50000", ",", "scheduled_sampling_gold_mixin_prob", "=", "0.5", ",", "# TODO(duckworthd): Uncomment when we can ascertain why adding an", "# extra field to HParam causes test failures.", "# scheduled_sampling_num_passes=1,", "# This setting controls whether to copy variables around in a daisy chain", "# (if true) or leave their placement to TensorFlow. It only affects multi", "# device training and mostly should be turned on for performance. One", "# exception are recurrent models: with dynamic loops it must be off.", "daisy_chain_variables", "=", "True", ",", "# If True in PREDICT mode, then last-position-only optimizations are not", "# used.", "force_full_predict", "=", "False", ",", "# Set this for pure model parallelism. There is only one data shard.", "no_data_parallelism", "=", "False", ",", "# dtype used for activations. - \"float32\" or \"bfloat16\"", "# activation_dtype=\"bfloat16\" currently only works on TPU.", "# It lowers activation-memory usage", "# and does not appear to affect quality.", "# You can train on TPU with activation_dtype=\"bfloat16\" and evaluate", "# on CPU/GPU with activation_dtype=\"float32\"", "activation_dtype", "=", "\"float32\"", ",", "# dtype used for parameters: \"float32\" or \"bfloat16\"", "# bfloat16 currently only works with optimizer=\"adafactor\".", "# The savings in memory allow for training larger models.", "# Weights are encoded as (w*128)^8, using pseudostochastic", "# roundoff. Initial experiments show that model quality is similar", "# to baseline for about 3M training steps, but worse thereafter.", "weight_dtype", "=", "\"float32\"", ",", "# Directory containing a checkpoint for a pretrained model. This will only", "# be used if a new run is being started. Parameters not found in the", "# pretrained model will be randomly initialized. Superfluous parameters in", "# the pretrained model will be ignored.", "pretrained_model_dir", "=", "\"\"", ",", "# Threshold used for two cases: the primary task probability for the", "# constant mixing schedule, and the exponential schedule limit for when", "# mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop", "# at 20-80 mixing for the primary-others mixing case.)", "multiproblem_schedule_threshold", "=", "0.5", ",", "# For more than 2 tasks, we may want to specify per-task thresholds here.", "# In that case, this needs to be a string with as many floating point", "# numbers as the number of tasks in the multi-problem. These numbers", "# are later normalized to add up to 1 and taken as probabilities for", "# each task. This enforces a constant mixing schedule and if this is", "# empty then the threshold from above is used for the first task and", "# the other tasks get the remaining probability split uniformly.", "multiproblem_per_task_threshold", "=", "\"\"", ",", "# The number of examples at which the proportion of the mixed in datasets", "# is multiproblem_schedule_threshold", "multiproblem_schedule_max_examples", "=", "1e7", ",", "# When training multiproblems, we can mix the data according to different", "# schedules. Example: a constant schedule mixing 20-80 between the primary", "# and other tasks.", "# A list of supported schedules can be found in", "# `data_generators.multi_problem.py`.", "multiproblem_mixing_schedule", "=", "\"constant\"", ",", "# A boolean that decides whether input sequence losses and target label", "# losses in classification problems should be reweighted.", "multiproblem_reweight_label_loss", "=", "False", ",", "# How much weight the targets in classification problems receive. Inputs", "# receive 1 minus this weight.", "multiproblem_label_weight", "=", "0.5", ",", "# Hyperparameters for relative attention.", "# The maximum relative positional distance to learn an embedding for.", "max_relative_position", "=", "0", ",", "# If heads share the same relative embedding.", "heads_share_relative_embedding", "=", "False", ",", "# If relative embedding terms are added to values too.", "add_relative_to_values", "=", "False", ",", "# If enable the host_call which is executed every training step.", "# There could be a performance drop if host_call function is slow and", "# cannot keep up with the TPU-side computation.", "tpu_enable_host_call", "=", "False", ",", "# Pad batch dim of inputs to nearest multiple of batch multiple.", "pad_batch", "=", "False", ",", "# When true, do not evaluate on the language model data when running the", "# multiproblem since it can take a while. If False, set eval_steps to", "# something large like 6000 or 10000.", "multiproblem_target_eval_only", "=", "False", ",", "# Max out the vocab size to a power of 2 for efficiency and to reserve", "# extra space in the vocabulary for new task ids and label classes.", "multiproblem_vocab_size", "=", "-", "1", ",", "# When using multiproblem with generation tasks, need to truncate the", "# inputs and targets manually before concatenating them.", "multiproblem_max_input_length", "=", "-", "1", ",", "multiproblem_max_target_length", "=", "-", "1", ",", "# If positive, makes training targets fixed-length in MultiProblem.", "multiproblem_fixed_train_length", "=", "-", "1", ",", "# Load weights from a second model. For instance, when using", "# pre-trained weights, you might want to initialize the encoder", "# and decoder by loading different models.", "warm_start_from_second", "=", "\"\"", ",", "# Area attention hyper parameters", "area_value_mode", "=", "\"none\"", ",", "area_key_mode", "=", "\"none\"", ",", "# Using area attention for the number of layers from the bottom", "num_area_layers", "=", "0", ",", "max_area_width", "=", "1", ",", "max_area_height", "=", "1", ",", "memory_height", "=", "1", ")" ]
A set of basic hyperparameters.
[ "A", "set", "of", "basic", "hyperparameters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_hparams.py#L29-L351
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_hparams.py
basic_range1
def basic_range1(ranged_hparams): """A basic range of hyperparameters.""" rhp = ranged_hparams rhp.set_discrete("batch_size", [1024, 2048, 4096]) rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6]) rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE) rhp.set_discrete("kernel_height", [1, 3, 5, 7]) rhp.set_discrete("kernel_width", [1, 3, 5, 7]) rhp.set_discrete("compress_steps", [0, 1, 2]) rhp.set_float("dropout", 0.0, 0.5) rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE) rhp.set_float("label_smoothing", 0.0, 0.2) rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE) rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE) rhp.set_categorical("initializer", ["uniform", "orthogonal", "uniform_unit_scaling"]) rhp.set_float("initializer_gain", 0.5, 3.5) rhp.set_categorical("learning_rate_decay_scheme", ["none", "sqrt", "noam", "exp"]) rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) rhp.set_categorical( "optimizer", ["adam", "adagrad", "momentum", "rms_prop", "sgd", "yellow_fin"])
python
def basic_range1(ranged_hparams): """A basic range of hyperparameters.""" rhp = ranged_hparams rhp.set_discrete("batch_size", [1024, 2048, 4096]) rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6]) rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE) rhp.set_discrete("kernel_height", [1, 3, 5, 7]) rhp.set_discrete("kernel_width", [1, 3, 5, 7]) rhp.set_discrete("compress_steps", [0, 1, 2]) rhp.set_float("dropout", 0.0, 0.5) rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE) rhp.set_float("label_smoothing", 0.0, 0.2) rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE) rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE) rhp.set_categorical("initializer", ["uniform", "orthogonal", "uniform_unit_scaling"]) rhp.set_float("initializer_gain", 0.5, 3.5) rhp.set_categorical("learning_rate_decay_scheme", ["none", "sqrt", "noam", "exp"]) rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) rhp.set_categorical( "optimizer", ["adam", "adagrad", "momentum", "rms_prop", "sgd", "yellow_fin"])
[ "def", "basic_range1", "(", "ranged_hparams", ")", ":", "rhp", "=", "ranged_hparams", "rhp", ".", "set_discrete", "(", "\"batch_size\"", ",", "[", "1024", ",", "2048", ",", "4096", "]", ")", "rhp", ".", "set_discrete", "(", "\"num_hidden_layers\"", ",", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", "]", ")", "rhp", ".", "set_discrete", "(", "\"hidden_size\"", ",", "[", "32", ",", "64", ",", "128", ",", "256", ",", "512", "]", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_discrete", "(", "\"kernel_height\"", ",", "[", "1", ",", "3", ",", "5", ",", "7", "]", ")", "rhp", ".", "set_discrete", "(", "\"kernel_width\"", ",", "[", "1", ",", "3", ",", "5", ",", "7", "]", ")", "rhp", ".", "set_discrete", "(", "\"compress_steps\"", ",", "[", "0", ",", "1", ",", "2", "]", ")", "rhp", ".", "set_float", "(", "\"dropout\"", ",", "0.0", ",", "0.5", ")", "rhp", ".", "set_float", "(", "\"weight_decay\"", ",", "1e-4", ",", "10.0", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_float", "(", "\"label_smoothing\"", ",", "0.0", ",", "0.2", ")", "rhp", ".", "set_float", "(", "\"clip_grad_norm\"", ",", "0.01", ",", "50.0", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_float", "(", "\"learning_rate\"", ",", "0.005", ",", "2.0", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_categorical", "(", "\"initializer\"", ",", "[", "\"uniform\"", ",", "\"orthogonal\"", ",", "\"uniform_unit_scaling\"", "]", ")", "rhp", ".", "set_float", "(", "\"initializer_gain\"", ",", "0.5", ",", "3.5", ")", "rhp", ".", "set_categorical", "(", "\"learning_rate_decay_scheme\"", ",", "[", "\"none\"", ",", "\"sqrt\"", ",", "\"noam\"", ",", "\"exp\"", "]", ")", "rhp", ".", "set_float", "(", "\"optimizer_adam_epsilon\"", ",", "1e-7", ",", "1e-2", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_float", "(", "\"optimizer_adam_beta1\"", ",", "0.8", ",", "0.9", ")", "rhp", ".", "set_float", "(", "\"optimizer_adam_beta2\"", ",", "0.995", ",", "0.999", ")", "rhp", ".", "set_categorical", "(", "\"optimizer\"", ",", "[", "\"adam\"", ",", "\"adagrad\"", ",", "\"momentum\"", ",", "\"rms_prop\"", ",", "\"sgd\"", ",", "\"yellow_fin\"", "]", ")" ]
A basic range of hyperparameters.
[ "A", "basic", "range", "of", "hyperparameters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_hparams.py#L473-L497
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_hparams.py
RangedHParams._check_reset_and_type_change
def _check_reset_and_type_change(self, name, orig_ctr): """Check if name is in orig_ctr or in one of the other type containers.""" # Resetting a hyperparameter if name in orig_ctr: tf.logging.warning("Overwriting hparam %s", name) ctr_names = [ (self._categorical_params, "categorical"), (self._discrete_params, "discrete"), (self._float_params, "float"), (self._int_params, "int"), ] ctrs, names = list(zip(*ctr_names)) orig_name = names[ctrs.index(orig_ctr)] for ctr, ctr_name in ctr_names: if ctr is orig_ctr: continue # Using a different type for the same hyperparameter name if name in ctr: raise ValueError("Setting hyperparameter %s as type %s, but a " "hyperparemeter of the same name was originally " "registered as type %s" % (name, ctr_name, orig_name))
python
def _check_reset_and_type_change(self, name, orig_ctr): """Check if name is in orig_ctr or in one of the other type containers.""" # Resetting a hyperparameter if name in orig_ctr: tf.logging.warning("Overwriting hparam %s", name) ctr_names = [ (self._categorical_params, "categorical"), (self._discrete_params, "discrete"), (self._float_params, "float"), (self._int_params, "int"), ] ctrs, names = list(zip(*ctr_names)) orig_name = names[ctrs.index(orig_ctr)] for ctr, ctr_name in ctr_names: if ctr is orig_ctr: continue # Using a different type for the same hyperparameter name if name in ctr: raise ValueError("Setting hyperparameter %s as type %s, but a " "hyperparemeter of the same name was originally " "registered as type %s" % (name, ctr_name, orig_name))
[ "def", "_check_reset_and_type_change", "(", "self", ",", "name", ",", "orig_ctr", ")", ":", "# Resetting a hyperparameter", "if", "name", "in", "orig_ctr", ":", "tf", ".", "logging", ".", "warning", "(", "\"Overwriting hparam %s\"", ",", "name", ")", "ctr_names", "=", "[", "(", "self", ".", "_categorical_params", ",", "\"categorical\"", ")", ",", "(", "self", ".", "_discrete_params", ",", "\"discrete\"", ")", ",", "(", "self", ".", "_float_params", ",", "\"float\"", ")", ",", "(", "self", ".", "_int_params", ",", "\"int\"", ")", ",", "]", "ctrs", ",", "names", "=", "list", "(", "zip", "(", "*", "ctr_names", ")", ")", "orig_name", "=", "names", "[", "ctrs", ".", "index", "(", "orig_ctr", ")", "]", "for", "ctr", ",", "ctr_name", "in", "ctr_names", ":", "if", "ctr", "is", "orig_ctr", ":", "continue", "# Using a different type for the same hyperparameter name", "if", "name", "in", "ctr", ":", "raise", "ValueError", "(", "\"Setting hyperparameter %s as type %s, but a \"", "\"hyperparemeter of the same name was originally \"", "\"registered as type %s\"", "%", "(", "name", ",", "ctr_name", ",", "orig_name", ")", ")" ]
Check if name is in orig_ctr or in one of the other type containers.
[ "Check", "if", "name", "is", "in", "orig_ctr", "or", "in", "one", "of", "the", "other", "type", "containers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_hparams.py#L374-L397
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_hparams.py
RangedHParams.to_parameter_specs
def to_parameter_specs(self, name_prefix=""): """To list of dicts suitable for Cloud ML Engine hyperparameter tuning.""" specs = [] for name, categories, _ in self._categorical_params.values(): spec = { "parameterName": name_prefix + name, "type": "CATEGORICAL", "categoricalValues": categories, } specs.append(spec) for name, feasible_points, scale, _ in self._discrete_params.values(): spec = { "parameterName": name_prefix + name, "type": "DISCRETE", "discreteValues": feasible_points, } if scale: spec["scaleType"] = self.SCALES_STR[scale] specs.append(spec) for name, min_val, max_val, scale, _ in self._float_params.values(): spec = { "parameterName": name_prefix + name, "type": "DOUBLE", "minValue": min_val, "maxValue": max_val, } if scale: spec["scaleType"] = self.SCALES_STR[scale] specs.append(spec) for name, min_val, max_val, scale, _ in self._int_params.values(): spec = { "parameterName": name_prefix + name, "type": "INTEGER", "minValue": min_val, "maxValue": max_val, } if scale: spec["scaleType"] = self.SCALES_STR[scale] specs.append(spec) return specs
python
def to_parameter_specs(self, name_prefix=""): """To list of dicts suitable for Cloud ML Engine hyperparameter tuning.""" specs = [] for name, categories, _ in self._categorical_params.values(): spec = { "parameterName": name_prefix + name, "type": "CATEGORICAL", "categoricalValues": categories, } specs.append(spec) for name, feasible_points, scale, _ in self._discrete_params.values(): spec = { "parameterName": name_prefix + name, "type": "DISCRETE", "discreteValues": feasible_points, } if scale: spec["scaleType"] = self.SCALES_STR[scale] specs.append(spec) for name, min_val, max_val, scale, _ in self._float_params.values(): spec = { "parameterName": name_prefix + name, "type": "DOUBLE", "minValue": min_val, "maxValue": max_val, } if scale: spec["scaleType"] = self.SCALES_STR[scale] specs.append(spec) for name, min_val, max_val, scale, _ in self._int_params.values(): spec = { "parameterName": name_prefix + name, "type": "INTEGER", "minValue": min_val, "maxValue": max_val, } if scale: spec["scaleType"] = self.SCALES_STR[scale] specs.append(spec) return specs
[ "def", "to_parameter_specs", "(", "self", ",", "name_prefix", "=", "\"\"", ")", ":", "specs", "=", "[", "]", "for", "name", ",", "categories", ",", "_", "in", "self", ".", "_categorical_params", ".", "values", "(", ")", ":", "spec", "=", "{", "\"parameterName\"", ":", "name_prefix", "+", "name", ",", "\"type\"", ":", "\"CATEGORICAL\"", ",", "\"categoricalValues\"", ":", "categories", ",", "}", "specs", ".", "append", "(", "spec", ")", "for", "name", ",", "feasible_points", ",", "scale", ",", "_", "in", "self", ".", "_discrete_params", ".", "values", "(", ")", ":", "spec", "=", "{", "\"parameterName\"", ":", "name_prefix", "+", "name", ",", "\"type\"", ":", "\"DISCRETE\"", ",", "\"discreteValues\"", ":", "feasible_points", ",", "}", "if", "scale", ":", "spec", "[", "\"scaleType\"", "]", "=", "self", ".", "SCALES_STR", "[", "scale", "]", "specs", ".", "append", "(", "spec", ")", "for", "name", ",", "min_val", ",", "max_val", ",", "scale", ",", "_", "in", "self", ".", "_float_params", ".", "values", "(", ")", ":", "spec", "=", "{", "\"parameterName\"", ":", "name_prefix", "+", "name", ",", "\"type\"", ":", "\"DOUBLE\"", ",", "\"minValue\"", ":", "min_val", ",", "\"maxValue\"", ":", "max_val", ",", "}", "if", "scale", ":", "spec", "[", "\"scaleType\"", "]", "=", "self", ".", "SCALES_STR", "[", "scale", "]", "specs", ".", "append", "(", "spec", ")", "for", "name", ",", "min_val", ",", "max_val", ",", "scale", ",", "_", "in", "self", ".", "_int_params", ".", "values", "(", ")", ":", "spec", "=", "{", "\"parameterName\"", ":", "name_prefix", "+", "name", ",", "\"type\"", ":", "\"INTEGER\"", ",", "\"minValue\"", ":", "min_val", ",", "\"maxValue\"", ":", "max_val", ",", "}", "if", "scale", ":", "spec", "[", "\"scaleType\"", "]", "=", "self", ".", "SCALES_STR", "[", "scale", "]", "specs", ".", "append", "(", "spec", ")", "return", "specs" ]
To list of dicts suitable for Cloud ML Engine hyperparameter tuning.
[ "To", "list", "of", "dicts", "suitable", "for", "Cloud", "ML", "Engine", "hyperparameter", "tuning", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_hparams.py#L426-L469
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
register_game
def register_game(game_name, game_mode="NoFrameskip-v4"): """Create and register problems for the game. Args: game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist". game_mode: the frame skip and sticky keys config. Raises: ValueError: if game_name or game_mode are wrong. """ if game_name not in ATARI_GAMES: raise ValueError("Game %s not in ATARI_GAMES" % game_name) if game_mode not in ATARI_GAME_MODES: raise ValueError("Unknown ATARI game mode: %s." % game_mode) camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode # Create and register the Problem cls = type("Gym%sRandom" % camel_game_name, (T2TGymEnv,), {"base_env_name": camel_game_name}) registry.register_problem(cls)
python
def register_game(game_name, game_mode="NoFrameskip-v4"): """Create and register problems for the game. Args: game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist". game_mode: the frame skip and sticky keys config. Raises: ValueError: if game_name or game_mode are wrong. """ if game_name not in ATARI_GAMES: raise ValueError("Game %s not in ATARI_GAMES" % game_name) if game_mode not in ATARI_GAME_MODES: raise ValueError("Unknown ATARI game mode: %s." % game_mode) camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode # Create and register the Problem cls = type("Gym%sRandom" % camel_game_name, (T2TGymEnv,), {"base_env_name": camel_game_name}) registry.register_problem(cls)
[ "def", "register_game", "(", "game_name", ",", "game_mode", "=", "\"NoFrameskip-v4\"", ")", ":", "if", "game_name", "not", "in", "ATARI_GAMES", ":", "raise", "ValueError", "(", "\"Game %s not in ATARI_GAMES\"", "%", "game_name", ")", "if", "game_mode", "not", "in", "ATARI_GAME_MODES", ":", "raise", "ValueError", "(", "\"Unknown ATARI game mode: %s.\"", "%", "game_mode", ")", "camel_game_name", "=", "misc_utils", ".", "snakecase_to_camelcase", "(", "game_name", ")", "+", "game_mode", "# Create and register the Problem", "cls", "=", "type", "(", "\"Gym%sRandom\"", "%", "camel_game_name", ",", "(", "T2TGymEnv", ",", ")", ",", "{", "\"base_env_name\"", ":", "camel_game_name", "}", ")", "registry", ".", "register_problem", "(", "cls", ")" ]
Create and register problems for the game. Args: game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist". game_mode: the frame skip and sticky keys config. Raises: ValueError: if game_name or game_mode are wrong.
[ "Create", "and", "register", "problems", "for", "the", "game", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L884-L902
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv._decode_png
def _decode_png(self, encoded_observation): """Decodes a single observation from PNG.""" return self._session.obj.run( self._decoded_image_t.obj, feed_dict={self._encoded_image_p.obj: encoded_observation} )
python
def _decode_png(self, encoded_observation): """Decodes a single observation from PNG.""" return self._session.obj.run( self._decoded_image_t.obj, feed_dict={self._encoded_image_p.obj: encoded_observation} )
[ "def", "_decode_png", "(", "self", ",", "encoded_observation", ")", ":", "return", "self", ".", "_session", ".", "obj", ".", "run", "(", "self", ".", "_decoded_image_t", ".", "obj", ",", "feed_dict", "=", "{", "self", ".", "_encoded_image_p", ".", "obj", ":", "encoded_observation", "}", ")" ]
Decodes a single observation from PNG.
[ "Decodes", "a", "single", "observation", "from", "PNG", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L227-L232
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv._encode_observations
def _encode_observations(self, observations): """Encodes observations as PNG.""" return [ Observation( self._session.obj.run( self._encoded_image_t.obj, feed_dict={self._decoded_image_p.obj: observation} ), self._decode_png ) for observation in observations ]
python
def _encode_observations(self, observations): """Encodes observations as PNG.""" return [ Observation( self._session.obj.run( self._encoded_image_t.obj, feed_dict={self._decoded_image_p.obj: observation} ), self._decode_png ) for observation in observations ]
[ "def", "_encode_observations", "(", "self", ",", "observations", ")", ":", "return", "[", "Observation", "(", "self", ".", "_session", ".", "obj", ".", "run", "(", "self", ".", "_encoded_image_t", ".", "obj", ",", "feed_dict", "=", "{", "self", ".", "_decoded_image_p", ".", "obj", ":", "observation", "}", ")", ",", "self", ".", "_decode_png", ")", "for", "observation", "in", "observations", "]" ]
Encodes observations as PNG.
[ "Encodes", "observations", "as", "PNG", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L234-L245
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv.step
def step(self, actions): """Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded. """ if self._store_rollouts and \ self._rollouts_by_epoch_and_split[self.current_epoch]: raise ValueError( "Data for current epoch has already been loaded from disk." ) (obs, unclipped_rewards, dones) = self._step(actions) obs = self._preprocess_observations(obs) (min_reward, max_reward) = self.reward_range rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) if self._store_rollouts: unclipped_rewards = unclipped_rewards.astype(np.float64) encoded_obs = self._encode_observations(obs) for (rollout, frame, action) in zip( self._current_batch_rollouts, self._current_batch_frames, actions ): rollout.append(frame._replace(action=action)) # orud = (observation, reward, unclipped_reward, done) self._current_batch_frames = [ Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones) ] return (obs, rewards, dones)
python
def step(self, actions): """Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded. """ if self._store_rollouts and \ self._rollouts_by_epoch_and_split[self.current_epoch]: raise ValueError( "Data for current epoch has already been loaded from disk." ) (obs, unclipped_rewards, dones) = self._step(actions) obs = self._preprocess_observations(obs) (min_reward, max_reward) = self.reward_range rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) if self._store_rollouts: unclipped_rewards = unclipped_rewards.astype(np.float64) encoded_obs = self._encode_observations(obs) for (rollout, frame, action) in zip( self._current_batch_rollouts, self._current_batch_frames, actions ): rollout.append(frame._replace(action=action)) # orud = (observation, reward, unclipped_reward, done) self._current_batch_frames = [ Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones) ] return (obs, rewards, dones)
[ "def", "step", "(", "self", ",", "actions", ")", ":", "if", "self", ".", "_store_rollouts", "and", "self", ".", "_rollouts_by_epoch_and_split", "[", "self", ".", "current_epoch", "]", ":", "raise", "ValueError", "(", "\"Data for current epoch has already been loaded from disk.\"", ")", "(", "obs", ",", "unclipped_rewards", ",", "dones", ")", "=", "self", ".", "_step", "(", "actions", ")", "obs", "=", "self", ".", "_preprocess_observations", "(", "obs", ")", "(", "min_reward", ",", "max_reward", ")", "=", "self", ".", "reward_range", "rewards", "=", "np", ".", "around", "(", "np", ".", "clip", "(", "unclipped_rewards", ",", "min_reward", ",", "max_reward", ")", ")", "if", "self", ".", "_store_rollouts", ":", "unclipped_rewards", "=", "unclipped_rewards", ".", "astype", "(", "np", ".", "float64", ")", "encoded_obs", "=", "self", ".", "_encode_observations", "(", "obs", ")", "for", "(", "rollout", ",", "frame", ",", "action", ")", "in", "zip", "(", "self", ".", "_current_batch_rollouts", ",", "self", ".", "_current_batch_frames", ",", "actions", ")", ":", "rollout", ".", "append", "(", "frame", ".", "_replace", "(", "action", "=", "action", ")", ")", "# orud = (observation, reward, unclipped_reward, done)", "self", ".", "_current_batch_frames", "=", "[", "Frame", "(", "*", "orud", ",", "action", "=", "None", ")", "for", "orud", "in", "zip", "(", "encoded_obs", ",", "rewards", ",", "unclipped_rewards", ",", "dones", ")", "]", "return", "(", "obs", ",", "rewards", ",", "dones", ")" ]
Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded.
[ "Makes", "a", "step", "in", "all", "environments", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L264-L301
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv.reset
def reset(self, indices=None): """Resets environments at given indices. Does any preprocessing and adds rollouts to history. Args: indices: Indices of environments to reset. Returns: Batch of initial observations of reset environments. Raises: ValueError: when there's no current epoch. """ if self._store_rollouts and self.current_epoch is None: raise ValueError( "No current epoch. start_new_epoch() should first be called." ) if indices is None: indices = np.arange(self.batch_size) new_obs = self._reset(indices) if self._should_preprocess_on_reset: new_obs = self._preprocess_observations(new_obs) if self._store_rollouts: encoded_obs = self._encode_observations(new_obs) for (index, ob) in zip(indices, encoded_obs): frame = self._current_batch_frames[index] if frame is not None: rollout = self._current_batch_rollouts[index] rollout.append(frame._replace(action=0)) self._current_epoch_rollouts.append(rollout) self._current_batch_rollouts[index] = [] self._current_batch_frames[index] = Frame( observation=ob, reward=0, unclipped_reward=0, done=False, action=None ) return new_obs
python
def reset(self, indices=None): """Resets environments at given indices. Does any preprocessing and adds rollouts to history. Args: indices: Indices of environments to reset. Returns: Batch of initial observations of reset environments. Raises: ValueError: when there's no current epoch. """ if self._store_rollouts and self.current_epoch is None: raise ValueError( "No current epoch. start_new_epoch() should first be called." ) if indices is None: indices = np.arange(self.batch_size) new_obs = self._reset(indices) if self._should_preprocess_on_reset: new_obs = self._preprocess_observations(new_obs) if self._store_rollouts: encoded_obs = self._encode_observations(new_obs) for (index, ob) in zip(indices, encoded_obs): frame = self._current_batch_frames[index] if frame is not None: rollout = self._current_batch_rollouts[index] rollout.append(frame._replace(action=0)) self._current_epoch_rollouts.append(rollout) self._current_batch_rollouts[index] = [] self._current_batch_frames[index] = Frame( observation=ob, reward=0, unclipped_reward=0, done=False, action=None ) return new_obs
[ "def", "reset", "(", "self", ",", "indices", "=", "None", ")", ":", "if", "self", ".", "_store_rollouts", "and", "self", ".", "current_epoch", "is", "None", ":", "raise", "ValueError", "(", "\"No current epoch. start_new_epoch() should first be called.\"", ")", "if", "indices", "is", "None", ":", "indices", "=", "np", ".", "arange", "(", "self", ".", "batch_size", ")", "new_obs", "=", "self", ".", "_reset", "(", "indices", ")", "if", "self", ".", "_should_preprocess_on_reset", ":", "new_obs", "=", "self", ".", "_preprocess_observations", "(", "new_obs", ")", "if", "self", ".", "_store_rollouts", ":", "encoded_obs", "=", "self", ".", "_encode_observations", "(", "new_obs", ")", "for", "(", "index", ",", "ob", ")", "in", "zip", "(", "indices", ",", "encoded_obs", ")", ":", "frame", "=", "self", ".", "_current_batch_frames", "[", "index", "]", "if", "frame", "is", "not", "None", ":", "rollout", "=", "self", ".", "_current_batch_rollouts", "[", "index", "]", "rollout", ".", "append", "(", "frame", ".", "_replace", "(", "action", "=", "0", ")", ")", "self", ".", "_current_epoch_rollouts", ".", "append", "(", "rollout", ")", "self", ".", "_current_batch_rollouts", "[", "index", "]", "=", "[", "]", "self", ".", "_current_batch_frames", "[", "index", "]", "=", "Frame", "(", "observation", "=", "ob", ",", "reward", "=", "0", ",", "unclipped_reward", "=", "0", ",", "done", "=", "False", ",", "action", "=", "None", ")", "return", "new_obs" ]
Resets environments at given indices. Does any preprocessing and adds rollouts to history. Args: indices: Indices of environments to reset. Returns: Batch of initial observations of reset environments. Raises: ValueError: when there's no current epoch.
[ "Resets", "environments", "at", "given", "indices", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L314-L351
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv.extra_reading_spec
def extra_reading_spec(self): """Additional data fields to store on disk and their decoders.""" field_names = ("frame_number", "action", "reward", "done") data_fields = { name: tf.FixedLenFeature([1], tf.int64) for name in field_names } decoders = { name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name) for name in field_names } return (data_fields, decoders)
python
def extra_reading_spec(self): """Additional data fields to store on disk and their decoders.""" field_names = ("frame_number", "action", "reward", "done") data_fields = { name: tf.FixedLenFeature([1], tf.int64) for name in field_names } decoders = { name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name) for name in field_names } return (data_fields, decoders)
[ "def", "extra_reading_spec", "(", "self", ")", ":", "field_names", "=", "(", "\"frame_number\"", ",", "\"action\"", ",", "\"reward\"", ",", "\"done\"", ")", "data_fields", "=", "{", "name", ":", "tf", ".", "FixedLenFeature", "(", "[", "1", "]", ",", "tf", ".", "int64", ")", "for", "name", "in", "field_names", "}", "decoders", "=", "{", "name", ":", "tf", ".", "contrib", ".", "slim", ".", "tfexample_decoder", ".", "Tensor", "(", "tensor_key", "=", "name", ")", "for", "name", "in", "field_names", "}", "return", "(", "data_fields", ",", "decoders", ")" ]
Additional data fields to store on disk and their decoders.
[ "Additional", "data", "fields", "to", "store", "on", "disk", "and", "their", "decoders", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L373-L383
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv._split_current_epoch
def _split_current_epoch(self): """Splits frames in the current epoch according to self.dataset_splits. Rollouts can be broken on shard boundary. This is desirable when we have few long rollouts and we want to make sure we have data in the dev set. """ num_frames = self._calc_num_frames(self._current_epoch_rollouts) num_shards = sum(split["shards"] for split in self.dataset_splits) shard_size = num_frames // num_shards splits = self.dataset_splits num_saved_frames = 0 split_index = 0 split_begin_index = 0 rollouts_by_split = collections.defaultdict(list) def split_size(split_index): return splits[split_index]["shards"] * shard_size for rollout in self._current_epoch_rollouts: num_saved_frames_current_rollout = 0 # Split the rollout into chunks corresponding to dataset splits. In most # cases there should be only one chunk. On dataset split boundary there # will be two. If a rollout is longer then the size of a dataset split, # there might be more. while num_saved_frames_current_rollout < len(rollout): max_chunk_length = ( split_begin_index + split_size(split_index) - num_saved_frames ) if split_index == len(splits) - 1: # Put the remainder in the last split to preserve the ordering. max_chunk_length = len(rollout) rollout_chunk = rollout[ num_saved_frames_current_rollout: (num_saved_frames_current_rollout + max_chunk_length) ] rollouts_by_split[splits[split_index]["split"]].append(rollout_chunk) num_saved_frames_current_rollout += len(rollout_chunk) num_saved_frames += len(rollout_chunk) if num_saved_frames == split_begin_index + split_size(split_index): split_begin_index += split_size(split_index) split_index = min(split_index + 1, len(splits) - 1) self._rollouts_by_epoch_and_split[self.current_epoch] = rollouts_by_split self._current_epoch_rollouts = []
python
def _split_current_epoch(self): """Splits frames in the current epoch according to self.dataset_splits. Rollouts can be broken on shard boundary. This is desirable when we have few long rollouts and we want to make sure we have data in the dev set. """ num_frames = self._calc_num_frames(self._current_epoch_rollouts) num_shards = sum(split["shards"] for split in self.dataset_splits) shard_size = num_frames // num_shards splits = self.dataset_splits num_saved_frames = 0 split_index = 0 split_begin_index = 0 rollouts_by_split = collections.defaultdict(list) def split_size(split_index): return splits[split_index]["shards"] * shard_size for rollout in self._current_epoch_rollouts: num_saved_frames_current_rollout = 0 # Split the rollout into chunks corresponding to dataset splits. In most # cases there should be only one chunk. On dataset split boundary there # will be two. If a rollout is longer then the size of a dataset split, # there might be more. while num_saved_frames_current_rollout < len(rollout): max_chunk_length = ( split_begin_index + split_size(split_index) - num_saved_frames ) if split_index == len(splits) - 1: # Put the remainder in the last split to preserve the ordering. max_chunk_length = len(rollout) rollout_chunk = rollout[ num_saved_frames_current_rollout: (num_saved_frames_current_rollout + max_chunk_length) ] rollouts_by_split[splits[split_index]["split"]].append(rollout_chunk) num_saved_frames_current_rollout += len(rollout_chunk) num_saved_frames += len(rollout_chunk) if num_saved_frames == split_begin_index + split_size(split_index): split_begin_index += split_size(split_index) split_index = min(split_index + 1, len(splits) - 1) self._rollouts_by_epoch_and_split[self.current_epoch] = rollouts_by_split self._current_epoch_rollouts = []
[ "def", "_split_current_epoch", "(", "self", ")", ":", "num_frames", "=", "self", ".", "_calc_num_frames", "(", "self", ".", "_current_epoch_rollouts", ")", "num_shards", "=", "sum", "(", "split", "[", "\"shards\"", "]", "for", "split", "in", "self", ".", "dataset_splits", ")", "shard_size", "=", "num_frames", "//", "num_shards", "splits", "=", "self", ".", "dataset_splits", "num_saved_frames", "=", "0", "split_index", "=", "0", "split_begin_index", "=", "0", "rollouts_by_split", "=", "collections", ".", "defaultdict", "(", "list", ")", "def", "split_size", "(", "split_index", ")", ":", "return", "splits", "[", "split_index", "]", "[", "\"shards\"", "]", "*", "shard_size", "for", "rollout", "in", "self", ".", "_current_epoch_rollouts", ":", "num_saved_frames_current_rollout", "=", "0", "# Split the rollout into chunks corresponding to dataset splits. In most", "# cases there should be only one chunk. On dataset split boundary there", "# will be two. If a rollout is longer then the size of a dataset split,", "# there might be more.", "while", "num_saved_frames_current_rollout", "<", "len", "(", "rollout", ")", ":", "max_chunk_length", "=", "(", "split_begin_index", "+", "split_size", "(", "split_index", ")", "-", "num_saved_frames", ")", "if", "split_index", "==", "len", "(", "splits", ")", "-", "1", ":", "# Put the remainder in the last split to preserve the ordering.", "max_chunk_length", "=", "len", "(", "rollout", ")", "rollout_chunk", "=", "rollout", "[", "num_saved_frames_current_rollout", ":", "(", "num_saved_frames_current_rollout", "+", "max_chunk_length", ")", "]", "rollouts_by_split", "[", "splits", "[", "split_index", "]", "[", "\"split\"", "]", "]", ".", "append", "(", "rollout_chunk", ")", "num_saved_frames_current_rollout", "+=", "len", "(", "rollout_chunk", ")", "num_saved_frames", "+=", "len", "(", "rollout_chunk", ")", "if", "num_saved_frames", "==", "split_begin_index", "+", "split_size", "(", "split_index", ")", ":", "split_begin_index", "+=", "split_size", "(", "split_index", ")", "split_index", "=", "min", "(", "split_index", "+", "1", ",", "len", "(", "splits", ")", "-", "1", ")", "self", ".", "_rollouts_by_epoch_and_split", "[", "self", ".", "current_epoch", "]", "=", "rollouts_by_split", "self", ".", "_current_epoch_rollouts", "=", "[", "]" ]
Splits frames in the current epoch according to self.dataset_splits. Rollouts can be broken on shard boundary. This is desirable when we have few long rollouts and we want to make sure we have data in the dev set.
[ "Splits", "frames", "in", "the", "current", "epoch", "according", "to", "self", ".", "dataset_splits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L417-L462
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv.splits_and_paths
def splits_and_paths(self, data_dir): """List of pairs (split, paths) for the current epoch.""" filepath_fns = { problem.DatasetSplit.TRAIN: self.training_filepaths, problem.DatasetSplit.EVAL: self.dev_filepaths, problem.DatasetSplit.TEST: self.test_filepaths, } def append_epoch(paths): return [ "{}.{}".format(path, self.current_epoch) for path in paths ] # We set shuffled=True as we don't want to shuffle on disk later. return [ (split["split"], append_epoch(filepath_fns[split["split"]]( data_dir, split["shards"], shuffled=True ))) for split in self.dataset_splits ]
python
def splits_and_paths(self, data_dir): """List of pairs (split, paths) for the current epoch.""" filepath_fns = { problem.DatasetSplit.TRAIN: self.training_filepaths, problem.DatasetSplit.EVAL: self.dev_filepaths, problem.DatasetSplit.TEST: self.test_filepaths, } def append_epoch(paths): return [ "{}.{}".format(path, self.current_epoch) for path in paths ] # We set shuffled=True as we don't want to shuffle on disk later. return [ (split["split"], append_epoch(filepath_fns[split["split"]]( data_dir, split["shards"], shuffled=True ))) for split in self.dataset_splits ]
[ "def", "splits_and_paths", "(", "self", ",", "data_dir", ")", ":", "filepath_fns", "=", "{", "problem", ".", "DatasetSplit", ".", "TRAIN", ":", "self", ".", "training_filepaths", ",", "problem", ".", "DatasetSplit", ".", "EVAL", ":", "self", ".", "dev_filepaths", ",", "problem", ".", "DatasetSplit", ".", "TEST", ":", "self", ".", "test_filepaths", ",", "}", "def", "append_epoch", "(", "paths", ")", ":", "return", "[", "\"{}.{}\"", ".", "format", "(", "path", ",", "self", ".", "current_epoch", ")", "for", "path", "in", "paths", "]", "# We set shuffled=True as we don't want to shuffle on disk later.", "return", "[", "(", "split", "[", "\"split\"", "]", ",", "append_epoch", "(", "filepath_fns", "[", "split", "[", "\"split\"", "]", "]", "(", "data_dir", ",", "split", "[", "\"shards\"", "]", ",", "shuffled", "=", "True", ")", ")", ")", "for", "split", "in", "self", ".", "dataset_splits", "]" ]
List of pairs (split, paths) for the current epoch.
[ "List", "of", "pairs", "(", "split", "paths", ")", "for", "the", "current", "epoch", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L464-L484
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TEnv.generate_data
def generate_data(self, data_dir, tmp_dir=None, task_id=-1): """Saves the current epoch rollouts to disk, split into train/dev sets.""" if not self._rollouts_by_epoch_and_split[self.current_epoch]: # Data not loaded from disk. self._split_current_epoch() rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch] splits_and_paths = self.splits_and_paths(data_dir) for (split, paths) in splits_and_paths: rollouts = rollouts_by_split[split] num_frames = self._calc_num_frames(rollouts) shard_size = num_frames // len(paths) frame_gen = self._generate_frames(rollouts) for (path_index, path) in enumerate(paths): limit = shard_size # Put the remainder in the last shard to preserve the ordering. if path_index == len(paths) - 1: limit = None generator_utils.generate_files( itertools.islice(frame_gen, limit), [path], cycle_every_n=float("inf") )
python
def generate_data(self, data_dir, tmp_dir=None, task_id=-1): """Saves the current epoch rollouts to disk, split into train/dev sets.""" if not self._rollouts_by_epoch_and_split[self.current_epoch]: # Data not loaded from disk. self._split_current_epoch() rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch] splits_and_paths = self.splits_and_paths(data_dir) for (split, paths) in splits_and_paths: rollouts = rollouts_by_split[split] num_frames = self._calc_num_frames(rollouts) shard_size = num_frames // len(paths) frame_gen = self._generate_frames(rollouts) for (path_index, path) in enumerate(paths): limit = shard_size # Put the remainder in the last shard to preserve the ordering. if path_index == len(paths) - 1: limit = None generator_utils.generate_files( itertools.islice(frame_gen, limit), [path], cycle_every_n=float("inf") )
[ "def", "generate_data", "(", "self", ",", "data_dir", ",", "tmp_dir", "=", "None", ",", "task_id", "=", "-", "1", ")", ":", "if", "not", "self", ".", "_rollouts_by_epoch_and_split", "[", "self", ".", "current_epoch", "]", ":", "# Data not loaded from disk.", "self", ".", "_split_current_epoch", "(", ")", "rollouts_by_split", "=", "self", ".", "_rollouts_by_epoch_and_split", "[", "self", ".", "current_epoch", "]", "splits_and_paths", "=", "self", ".", "splits_and_paths", "(", "data_dir", ")", "for", "(", "split", ",", "paths", ")", "in", "splits_and_paths", ":", "rollouts", "=", "rollouts_by_split", "[", "split", "]", "num_frames", "=", "self", ".", "_calc_num_frames", "(", "rollouts", ")", "shard_size", "=", "num_frames", "//", "len", "(", "paths", ")", "frame_gen", "=", "self", ".", "_generate_frames", "(", "rollouts", ")", "for", "(", "path_index", ",", "path", ")", "in", "enumerate", "(", "paths", ")", ":", "limit", "=", "shard_size", "# Put the remainder in the last shard to preserve the ordering.", "if", "path_index", "==", "len", "(", "paths", ")", "-", "1", ":", "limit", "=", "None", "generator_utils", ".", "generate_files", "(", "itertools", ".", "islice", "(", "frame_gen", ",", "limit", ")", ",", "[", "path", "]", ",", "cycle_every_n", "=", "float", "(", "\"inf\"", ")", ")" ]
Saves the current epoch rollouts to disk, split into train/dev sets.
[ "Saves", "the", "current", "epoch", "rollouts", "to", "disk", "split", "into", "train", "/", "dev", "sets", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L494-L517
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
T2TGymEnv.set_initial_state
def set_initial_state(self, initial_state, initial_frames): """Sets the state that will be used on next reset.""" self._initial_state = initial_state self._initial_frames = initial_frames[:, -1, ...] self._should_preprocess_on_reset = False
python
def set_initial_state(self, initial_state, initial_frames): """Sets the state that will be used on next reset.""" self._initial_state = initial_state self._initial_frames = initial_frames[:, -1, ...] self._should_preprocess_on_reset = False
[ "def", "set_initial_state", "(", "self", ",", "initial_state", ",", "initial_frames", ")", ":", "self", ".", "_initial_state", "=", "initial_state", "self", ".", "_initial_frames", "=", "initial_frames", "[", ":", ",", "-", "1", ",", "...", "]", "self", ".", "_should_preprocess_on_reset", "=", "False" ]
Sets the state that will be used on next reset.
[ "Sets", "the", "state", "that", "will", "be", "used", "on", "next", "reset", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L723-L727
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
image_to_tf_summary_value
def image_to_tf_summary_value(image, tag): """Converts a NumPy image to a tf.Summary.Value object. Args: image: 3-D NumPy array. tag: name for tf.Summary.Value for display in tensorboard. Returns: image_summary: A tf.Summary.Value object. """ curr_image = np.asarray(image, dtype=np.uint8) height, width, n_channels = curr_image.shape # If monochrome image, then reshape to [height, width] if n_channels == 1: curr_image = np.reshape(curr_image, [height, width]) s = io.BytesIO() matplotlib_pyplot().imsave(s, curr_image, format="png") img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=height, width=width, colorspace=n_channels) return tf.Summary.Value(tag=tag, image=img_sum)
python
def image_to_tf_summary_value(image, tag): """Converts a NumPy image to a tf.Summary.Value object. Args: image: 3-D NumPy array. tag: name for tf.Summary.Value for display in tensorboard. Returns: image_summary: A tf.Summary.Value object. """ curr_image = np.asarray(image, dtype=np.uint8) height, width, n_channels = curr_image.shape # If monochrome image, then reshape to [height, width] if n_channels == 1: curr_image = np.reshape(curr_image, [height, width]) s = io.BytesIO() matplotlib_pyplot().imsave(s, curr_image, format="png") img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=height, width=width, colorspace=n_channels) return tf.Summary.Value(tag=tag, image=img_sum)
[ "def", "image_to_tf_summary_value", "(", "image", ",", "tag", ")", ":", "curr_image", "=", "np", ".", "asarray", "(", "image", ",", "dtype", "=", "np", ".", "uint8", ")", "height", ",", "width", ",", "n_channels", "=", "curr_image", ".", "shape", "# If monochrome image, then reshape to [height, width]", "if", "n_channels", "==", "1", ":", "curr_image", "=", "np", ".", "reshape", "(", "curr_image", ",", "[", "height", ",", "width", "]", ")", "s", "=", "io", ".", "BytesIO", "(", ")", "matplotlib_pyplot", "(", ")", ".", "imsave", "(", "s", ",", "curr_image", ",", "format", "=", "\"png\"", ")", "img_sum", "=", "tf", ".", "Summary", ".", "Image", "(", "encoded_image_string", "=", "s", ".", "getvalue", "(", ")", ",", "height", "=", "height", ",", "width", "=", "width", ",", "colorspace", "=", "n_channels", ")", "return", "tf", ".", "Summary", ".", "Value", "(", "tag", "=", "tag", ",", "image", "=", "img_sum", ")" ]
Converts a NumPy image to a tf.Summary.Value object. Args: image: 3-D NumPy array. tag: name for tf.Summary.Value for display in tensorboard. Returns: image_summary: A tf.Summary.Value object.
[ "Converts", "a", "NumPy", "image", "to", "a", "tf", ".", "Summary", ".", "Value", "object", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L43-L62
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
convert_predictions_to_image_summaries
def convert_predictions_to_image_summaries(hook_args): """Optionally converts images from hooks_args to image summaries. Args: hook_args: DecodeHookArgs namedtuple Returns: summaries: list of tf.Summary values if hook_args.decode_hpara """ decode_hparams = hook_args.decode_hparams if not decode_hparams.display_decoded_images: return [] predictions = hook_args.predictions[0] # Display ten random inputs and outputs so that tensorboard does not hang. all_summaries = [] rand_predictions = np.random.choice(predictions, size=10) for ind, prediction in enumerate(rand_predictions): output_summary = image_to_tf_summary_value( prediction["outputs"], tag="%d_output" % ind) input_summary = image_to_tf_summary_value( prediction["inputs"], tag="%d_input" % ind) all_summaries.append(input_summary) all_summaries.append(output_summary) return all_summaries
python
def convert_predictions_to_image_summaries(hook_args): """Optionally converts images from hooks_args to image summaries. Args: hook_args: DecodeHookArgs namedtuple Returns: summaries: list of tf.Summary values if hook_args.decode_hpara """ decode_hparams = hook_args.decode_hparams if not decode_hparams.display_decoded_images: return [] predictions = hook_args.predictions[0] # Display ten random inputs and outputs so that tensorboard does not hang. all_summaries = [] rand_predictions = np.random.choice(predictions, size=10) for ind, prediction in enumerate(rand_predictions): output_summary = image_to_tf_summary_value( prediction["outputs"], tag="%d_output" % ind) input_summary = image_to_tf_summary_value( prediction["inputs"], tag="%d_input" % ind) all_summaries.append(input_summary) all_summaries.append(output_summary) return all_summaries
[ "def", "convert_predictions_to_image_summaries", "(", "hook_args", ")", ":", "decode_hparams", "=", "hook_args", ".", "decode_hparams", "if", "not", "decode_hparams", ".", "display_decoded_images", ":", "return", "[", "]", "predictions", "=", "hook_args", ".", "predictions", "[", "0", "]", "# Display ten random inputs and outputs so that tensorboard does not hang.", "all_summaries", "=", "[", "]", "rand_predictions", "=", "np", ".", "random", ".", "choice", "(", "predictions", ",", "size", "=", "10", ")", "for", "ind", ",", "prediction", "in", "enumerate", "(", "rand_predictions", ")", ":", "output_summary", "=", "image_to_tf_summary_value", "(", "prediction", "[", "\"outputs\"", "]", ",", "tag", "=", "\"%d_output\"", "%", "ind", ")", "input_summary", "=", "image_to_tf_summary_value", "(", "prediction", "[", "\"inputs\"", "]", ",", "tag", "=", "\"%d_input\"", "%", "ind", ")", "all_summaries", ".", "append", "(", "input_summary", ")", "all_summaries", ".", "append", "(", "output_summary", ")", "return", "all_summaries" ]
Optionally converts images from hooks_args to image summaries. Args: hook_args: DecodeHookArgs namedtuple Returns: summaries: list of tf.Summary values if hook_args.decode_hpara
[ "Optionally", "converts", "images", "from", "hooks_args", "to", "image", "summaries", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L65-L88
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
resize_by_area
def resize_by_area(img, size): """image resize function used by quite a few image problems.""" return tf.to_int64( tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
python
def resize_by_area(img, size): """image resize function used by quite a few image problems.""" return tf.to_int64( tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
[ "def", "resize_by_area", "(", "img", ",", "size", ")", ":", "return", "tf", ".", "to_int64", "(", "tf", ".", "image", ".", "resize_images", "(", "img", ",", "[", "size", ",", "size", "]", ",", "tf", ".", "image", ".", "ResizeMethod", ".", "AREA", ")", ")" ]
image resize function used by quite a few image problems.
[ "image", "resize", "function", "used", "by", "quite", "a", "few", "image", "problems", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L91-L94
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
make_multiscale
def make_multiscale(image, resolutions, resize_method=tf.image.ResizeMethod.BICUBIC, num_channels=3): """Returns list of scaled images, one for each resolution. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. resize_method: tf.image.ResizeMethod. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels]. """ scaled_images = [] for height in resolutions: scaled_image = tf.image.resize_images( image, size=[height, height], # assuming that height = width method=resize_method) scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([height, height, num_channels]) scaled_images.append(scaled_image) return scaled_images
python
def make_multiscale(image, resolutions, resize_method=tf.image.ResizeMethod.BICUBIC, num_channels=3): """Returns list of scaled images, one for each resolution. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. resize_method: tf.image.ResizeMethod. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels]. """ scaled_images = [] for height in resolutions: scaled_image = tf.image.resize_images( image, size=[height, height], # assuming that height = width method=resize_method) scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([height, height, num_channels]) scaled_images.append(scaled_image) return scaled_images
[ "def", "make_multiscale", "(", "image", ",", "resolutions", ",", "resize_method", "=", "tf", ".", "image", ".", "ResizeMethod", ".", "BICUBIC", ",", "num_channels", "=", "3", ")", ":", "scaled_images", "=", "[", "]", "for", "height", "in", "resolutions", ":", "scaled_image", "=", "tf", ".", "image", ".", "resize_images", "(", "image", ",", "size", "=", "[", "height", ",", "height", "]", ",", "# assuming that height = width", "method", "=", "resize_method", ")", "scaled_image", "=", "tf", ".", "to_int64", "(", "scaled_image", ")", "scaled_image", ".", "set_shape", "(", "[", "height", ",", "height", ",", "num_channels", "]", ")", "scaled_images", ".", "append", "(", "scaled_image", ")", "return", "scaled_images" ]
Returns list of scaled images, one for each resolution. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. resize_method: tf.image.ResizeMethod. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels].
[ "Returns", "list", "of", "scaled", "images", "one", "for", "each", "resolution", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L97-L122
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
make_multiscale_dilated
def make_multiscale_dilated(image, resolutions, num_channels=3): """Returns list of scaled images, one for each resolution. Resizes by skipping every nth pixel. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. The function assumes VALID padding, so the original image's height must be divisible by each resolution's height to return the exact resolution size. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels] if resolutions properly divide the original image's height; otherwise shape height and width is up to valid skips. """ image_height = common_layers.shape_list(image)[0] scaled_images = [] for height in resolutions: dilation_rate = image_height // height # assuming height = width scaled_image = image[::dilation_rate, ::dilation_rate] scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([None, None, num_channels]) scaled_images.append(scaled_image) return scaled_images
python
def make_multiscale_dilated(image, resolutions, num_channels=3): """Returns list of scaled images, one for each resolution. Resizes by skipping every nth pixel. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. The function assumes VALID padding, so the original image's height must be divisible by each resolution's height to return the exact resolution size. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels] if resolutions properly divide the original image's height; otherwise shape height and width is up to valid skips. """ image_height = common_layers.shape_list(image)[0] scaled_images = [] for height in resolutions: dilation_rate = image_height // height # assuming height = width scaled_image = image[::dilation_rate, ::dilation_rate] scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([None, None, num_channels]) scaled_images.append(scaled_image) return scaled_images
[ "def", "make_multiscale_dilated", "(", "image", ",", "resolutions", ",", "num_channels", "=", "3", ")", ":", "image_height", "=", "common_layers", ".", "shape_list", "(", "image", ")", "[", "0", "]", "scaled_images", "=", "[", "]", "for", "height", "in", "resolutions", ":", "dilation_rate", "=", "image_height", "//", "height", "# assuming height = width", "scaled_image", "=", "image", "[", ":", ":", "dilation_rate", ",", ":", ":", "dilation_rate", "]", "scaled_image", "=", "tf", ".", "to_int64", "(", "scaled_image", ")", "scaled_image", ".", "set_shape", "(", "[", "None", ",", "None", ",", "num_channels", "]", ")", "scaled_images", ".", "append", "(", "scaled_image", ")", "return", "scaled_images" ]
Returns list of scaled images, one for each resolution. Resizes by skipping every nth pixel. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. The function assumes VALID padding, so the original image's height must be divisible by each resolution's height to return the exact resolution size. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels] if resolutions properly divide the original image's height; otherwise shape height and width is up to valid skips.
[ "Returns", "list", "of", "scaled", "images", "one", "for", "each", "resolution", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L125-L151
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
encode_images_as_png
def encode_images_as_png(images): """Yield images encoded as pngs.""" if tf.executing_eagerly(): for image in images: yield tf.image.encode_png(image).numpy() else: (height, width, channels) = images[0].shape with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for image in images: enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) yield enc_string
python
def encode_images_as_png(images): """Yield images encoded as pngs.""" if tf.executing_eagerly(): for image in images: yield tf.image.encode_png(image).numpy() else: (height, width, channels) = images[0].shape with tf.Graph().as_default(): image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels)) encoded_image_t = tf.image.encode_png(image_t) with tf.Session() as sess: for image in images: enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) yield enc_string
[ "def", "encode_images_as_png", "(", "images", ")", ":", "if", "tf", ".", "executing_eagerly", "(", ")", ":", "for", "image", "in", "images", ":", "yield", "tf", ".", "image", ".", "encode_png", "(", "image", ")", ".", "numpy", "(", ")", "else", ":", "(", "height", ",", "width", ",", "channels", ")", "=", "images", "[", "0", "]", ".", "shape", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "image_t", "=", "tf", ".", "placeholder", "(", "dtype", "=", "tf", ".", "uint8", ",", "shape", "=", "(", "height", ",", "width", ",", "channels", ")", ")", "encoded_image_t", "=", "tf", ".", "image", ".", "encode_png", "(", "image_t", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "for", "image", "in", "images", ":", "enc_string", "=", "sess", ".", "run", "(", "encoded_image_t", ",", "feed_dict", "=", "{", "image_t", ":", "image", "}", ")", "yield", "enc_string" ]
Yield images encoded as pngs.
[ "Yield", "images", "encoded", "as", "pngs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L266-L279
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
image_generator
def image_generator(images, labels): """Generator for images that takes image and labels lists and creates pngs. Args: images: list of images given as [width x height x channels] numpy arrays. labels: list of ints, same length as images. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as PNG, * image/format: the string "png" representing image format, * image/class/label: an integer representing the label, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a singleton list of the corresponding type. Raises: ValueError: if images is an empty list. """ if not images: raise ValueError("Must provide some images for the generator.") width, height, _ = images[0].shape for (enc_image, label) in zip(encode_images_as_png(images), labels): yield { "image/encoded": [enc_image], "image/format": ["png"], "image/class/label": [int(label)], "image/height": [height], "image/width": [width] }
python
def image_generator(images, labels): """Generator for images that takes image and labels lists and creates pngs. Args: images: list of images given as [width x height x channels] numpy arrays. labels: list of ints, same length as images. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as PNG, * image/format: the string "png" representing image format, * image/class/label: an integer representing the label, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a singleton list of the corresponding type. Raises: ValueError: if images is an empty list. """ if not images: raise ValueError("Must provide some images for the generator.") width, height, _ = images[0].shape for (enc_image, label) in zip(encode_images_as_png(images), labels): yield { "image/encoded": [enc_image], "image/format": ["png"], "image/class/label": [int(label)], "image/height": [height], "image/width": [width] }
[ "def", "image_generator", "(", "images", ",", "labels", ")", ":", "if", "not", "images", ":", "raise", "ValueError", "(", "\"Must provide some images for the generator.\"", ")", "width", ",", "height", ",", "_", "=", "images", "[", "0", "]", ".", "shape", "for", "(", "enc_image", ",", "label", ")", "in", "zip", "(", "encode_images_as_png", "(", "images", ")", ",", "labels", ")", ":", "yield", "{", "\"image/encoded\"", ":", "[", "enc_image", "]", ",", "\"image/format\"", ":", "[", "\"png\"", "]", ",", "\"image/class/label\"", ":", "[", "int", "(", "label", ")", "]", ",", "\"image/height\"", ":", "[", "height", "]", ",", "\"image/width\"", ":", "[", "width", "]", "}" ]
Generator for images that takes image and labels lists and creates pngs. Args: images: list of images given as [width x height x channels] numpy arrays. labels: list of ints, same length as images. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as PNG, * image/format: the string "png" representing image format, * image/class/label: an integer representing the label, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a singleton list of the corresponding type. Raises: ValueError: if images is an empty list.
[ "Generator", "for", "images", "that", "takes", "image", "and", "labels", "lists", "and", "creates", "pngs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L282-L311
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
image_augmentation
def image_augmentation(images, do_colors=False, crop_size=None): """Image augmentation: cropping, flipping, and color transforms.""" if crop_size is None: crop_size = [299, 299] images = tf.random_crop(images, crop_size + [3]) images = tf.image.random_flip_left_right(images) if do_colors: # More augmentation, but might be slow. images = tf.image.random_brightness(images, max_delta=32. / 255.) images = tf.image.random_saturation(images, lower=0.5, upper=1.5) images = tf.image.random_hue(images, max_delta=0.2) images = tf.image.random_contrast(images, lower=0.5, upper=1.5) return images
python
def image_augmentation(images, do_colors=False, crop_size=None): """Image augmentation: cropping, flipping, and color transforms.""" if crop_size is None: crop_size = [299, 299] images = tf.random_crop(images, crop_size + [3]) images = tf.image.random_flip_left_right(images) if do_colors: # More augmentation, but might be slow. images = tf.image.random_brightness(images, max_delta=32. / 255.) images = tf.image.random_saturation(images, lower=0.5, upper=1.5) images = tf.image.random_hue(images, max_delta=0.2) images = tf.image.random_contrast(images, lower=0.5, upper=1.5) return images
[ "def", "image_augmentation", "(", "images", ",", "do_colors", "=", "False", ",", "crop_size", "=", "None", ")", ":", "if", "crop_size", "is", "None", ":", "crop_size", "=", "[", "299", ",", "299", "]", "images", "=", "tf", ".", "random_crop", "(", "images", ",", "crop_size", "+", "[", "3", "]", ")", "images", "=", "tf", ".", "image", ".", "random_flip_left_right", "(", "images", ")", "if", "do_colors", ":", "# More augmentation, but might be slow.", "images", "=", "tf", ".", "image", ".", "random_brightness", "(", "images", ",", "max_delta", "=", "32.", "/", "255.", ")", "images", "=", "tf", ".", "image", ".", "random_saturation", "(", "images", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "images", "=", "tf", ".", "image", ".", "random_hue", "(", "images", ",", "max_delta", "=", "0.2", ")", "images", "=", "tf", ".", "image", ".", "random_contrast", "(", "images", ",", "lower", "=", "0.5", ",", "upper", "=", "1.5", ")", "return", "images" ]
Image augmentation: cropping, flipping, and color transforms.
[ "Image", "augmentation", ":", "cropping", "flipping", "and", "color", "transforms", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L378-L389
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
cifar_image_augmentation
def cifar_image_augmentation(images): """Image augmentation suitable for CIFAR-10/100. As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). Args: images: a Tensor. Returns: Tensor of the same shape as images. """ images = tf.image.resize_image_with_crop_or_pad(images, 40, 40) images = tf.random_crop(images, [32, 32, 3]) images = tf.image.random_flip_left_right(images) return images
python
def cifar_image_augmentation(images): """Image augmentation suitable for CIFAR-10/100. As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). Args: images: a Tensor. Returns: Tensor of the same shape as images. """ images = tf.image.resize_image_with_crop_or_pad(images, 40, 40) images = tf.random_crop(images, [32, 32, 3]) images = tf.image.random_flip_left_right(images) return images
[ "def", "cifar_image_augmentation", "(", "images", ")", ":", "images", "=", "tf", ".", "image", ".", "resize_image_with_crop_or_pad", "(", "images", ",", "40", ",", "40", ")", "images", "=", "tf", ".", "random_crop", "(", "images", ",", "[", "32", ",", "32", ",", "3", "]", ")", "images", "=", "tf", ".", "image", ".", "random_flip_left_right", "(", "images", ")", "return", "images" ]
Image augmentation suitable for CIFAR-10/100. As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). Args: images: a Tensor. Returns: Tensor of the same shape as images.
[ "Image", "augmentation", "suitable", "for", "CIFAR", "-", "10", "/", "100", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L392-L405
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/image_utils.py
random_shift
def random_shift(image, wsr=0.1, hsr=0.1): """Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr. """ height, width, _ = common_layers.shape_list(image) width_range, height_range = wsr*width, hsr*height height_translations = tf.random_uniform((1,), -height_range, height_range) width_translations = tf.random_uniform((1,), -width_range, width_range) translations = tf.concat((height_translations, width_translations), axis=0) return tf.contrib.image.translate(image, translations=translations)
python
def random_shift(image, wsr=0.1, hsr=0.1): """Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr. """ height, width, _ = common_layers.shape_list(image) width_range, height_range = wsr*width, hsr*height height_translations = tf.random_uniform((1,), -height_range, height_range) width_translations = tf.random_uniform((1,), -width_range, width_range) translations = tf.concat((height_translations, width_translations), axis=0) return tf.contrib.image.translate(image, translations=translations)
[ "def", "random_shift", "(", "image", ",", "wsr", "=", "0.1", ",", "hsr", "=", "0.1", ")", ":", "height", ",", "width", ",", "_", "=", "common_layers", ".", "shape_list", "(", "image", ")", "width_range", ",", "height_range", "=", "wsr", "*", "width", ",", "hsr", "*", "height", "height_translations", "=", "tf", ".", "random_uniform", "(", "(", "1", ",", ")", ",", "-", "height_range", ",", "height_range", ")", "width_translations", "=", "tf", ".", "random_uniform", "(", "(", "1", ",", ")", ",", "-", "width_range", ",", "width_range", ")", "translations", "=", "tf", ".", "concat", "(", "(", "height_translations", ",", "width_translations", ")", ",", "axis", "=", "0", ")", "return", "tf", ".", "contrib", ".", "image", ".", "translate", "(", "image", ",", "translations", "=", "translations", ")" ]
Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr.
[ "Apply", "random", "horizontal", "and", "vertical", "shift", "to", "images", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/image_utils.py#L408-L425
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_standardized_layers
def get_standardized_layers(hparams, dp=None): """Get the common attention and feed-forward layers. The returned layer functions will have the following signature: y, extra_loss = fct(x) extra_loss is set to 0.0 if the layer doesn't have extra loss. If dp is provided, the layers will be distributed within the devices. If moe wants to be used, both dp and model need to be set. Args: hparams (tf.HParams): the model hparameters dp (expert_utils.Parallelism): A data parallelism object. If not given, the dp calls are simply ignored. Returns: dict[str:fct]: A dictionary containing the standardized functions """ def partial(fct, *args, **kwargs): """Same as functools.partial but with functools.wraps.""" return functools.wraps(fct)(functools.partial(fct, *args, **kwargs)) def register_layer( fct_in, default_args=None, default_kwargs=None, use_dp=True, recompute_grad=False, ): """Turn a function into its standardized version. Args: fct_in (fct): The function to register default_args (list): The default parameters to add to the function. default_kwargs (dict): The default parameters to add to the function. Those arguments can be overwritten when calling the function. use_dp (bool): Wrap the function call within a dataparallelism object if dp is available. Some layers (like MOE) must be called without dp. recompute_grad (bool): If True, recompute the function during the backward pass to save memory Returns: fct: the standardized layer function. """ # The kwargs given when calling the function overwrite the default ones fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {})) @functools.wraps(fct_in) def decorator(x, *args, **kwargs): """Call the layer function.""" fct = fct_in # For closure. Could use nonlocal with Python 3 # Eventually create the memory optimized version of the function if recompute_grad: fct = partial(fct, **kwargs) # recompute_grad only accept args fct = common_layers.recompute_grad(fct) kwargs = {} # Eventually use dp (if given and not MoE) if use_dp and dp is not None: y = dp(fct, x, *args, **kwargs) else: y = fct(x, *args, **kwargs) # Eventually capture the extra loss extra_loss = 0.0 if isinstance(y, tuple): y, extra_loss = y return y, extra_loss return decorator total_key_depth = hparams.attention_key_channels or hparams.hidden_size total_value_depth = hparams.attention_value_channels or hparams.hidden_size # Attention layers: # === Multi-head full attention layer === multihead_attention_fn = register_layer( multihead_attention, default_kwargs=dict( memory_antecedent=None, # Self-attention by default bias=None, total_key_depth=total_key_depth, total_value_depth=total_value_depth, output_depth=hparams.hidden_size, num_heads=hparams.num_heads, dropout_rate=hparams.attention_dropout, )) # === Memory efficient full-attention layer === # Save memory by not storing the activations and # recomputing them during the backward pass memeff_attention_base_fn = register_layer( multihead_attention, default_kwargs=dict( total_key_depth=total_key_depth, total_value_depth=total_value_depth, output_depth=hparams.hidden_size, num_heads=hparams.num_heads, dropout_rate=hparams.attention_dropout, ), recompute_grad=True, ) def memeff_attention_fn(*args, **kwargs): """Modify args/kwargs for compatibility with recompute_grad.""" kwargs = kwargs.copy() assert len(args) == 1 x = args[0] memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None if kwargs.get("bias", None) is not None: # Case where bias has been set args = (x, memory_antecedent, kwargs.pop("bias")) else: # Otherwise, only 2 args. This is necessary as recompute_grad does not # support None values. args = (x, memory_antecedent) return memeff_attention_base_fn(*args, **kwargs) # === Local attention (unmasked) layer === # Reuse same parameters as multihead_attention # Don't mask the future local_attention_fn = partial( multihead_attention_fn, block_length=hparams.attention_loc_block_length, block_width=hparams.attention_loc_block_width, attention_type="local_unmasked", ) # === Local attention (masked) layer === # Reuse same parameters as multihead_attention # Only works for self attention. Always mask the future. local_attention_masked_fn = partial( multihead_attention_fn, block_length=hparams.attention_loc_block_length, attention_type="local_mask_right", ) # === Masked memory-compressed multihead self attention layer === # Only works for self attention. Always mask the future. compressed_attention_masked_fn = register_layer( multihead_self_attention_reduced, default_kwargs=dict( factor=hparams.attention_red_factor, nonlinearity=hparams.attention_red_nonlinearity, reduction_type=hparams.attention_red_type, multihead_params=dict( total_key_depth=total_key_depth, total_value_depth=total_value_depth, num_heads=hparams.num_heads, dropout_rate=hparams.attention_dropout, ), ), ) # === Unmasked memory-compressed multihead self attention layer === # Only works for self attention. Never mask the future. Bias never added compressed_attention_fn = partial( compressed_attention_masked_fn, add_mask=False, ) # Feed-forwards layers: # === FC layer === conv_hidden_relu = register_layer( common_layers.conv_hidden_relu, default_kwargs=dict( hidden_size=hparams.filter_size, output_size=hparams.hidden_size, dropout=hparams.relu_dropout, ), ) # === Separable convolution layer === # No mask applied sep_conv_relu = partial( conv_hidden_relu, padding="SAME", # Parameters copied from the transformer model, could add hparams kernel_size=(3, 1), second_kernel_size=(31, 1), ) # === Separable convolution layer (masked version) === # Mask the future sep_conv_relu_masked = partial( sep_conv_relu, padding="LEFT", # Mask future for decoder ) # Define all available layers cur_layers = dict( # Attention layers: a=multihead_attention_fn, # Multihead full attention loc=local_attention_fn, # Local attention locm=local_attention_masked_fn, # Local attention (masked) red=compressed_attention_fn, # Memory-compressed attention redm=compressed_attention_masked_fn, # Memory-compressed att (masked) mem=memeff_attention_fn, # Memory efficient # Feed-forward layers: fc=conv_hidden_relu, # Fully connected sep=sep_conv_relu, # Separable convolution (unmasked) sepm=sep_conv_relu_masked, # Separable convolution (masked) ) return cur_layers
python
def get_standardized_layers(hparams, dp=None): """Get the common attention and feed-forward layers. The returned layer functions will have the following signature: y, extra_loss = fct(x) extra_loss is set to 0.0 if the layer doesn't have extra loss. If dp is provided, the layers will be distributed within the devices. If moe wants to be used, both dp and model need to be set. Args: hparams (tf.HParams): the model hparameters dp (expert_utils.Parallelism): A data parallelism object. If not given, the dp calls are simply ignored. Returns: dict[str:fct]: A dictionary containing the standardized functions """ def partial(fct, *args, **kwargs): """Same as functools.partial but with functools.wraps.""" return functools.wraps(fct)(functools.partial(fct, *args, **kwargs)) def register_layer( fct_in, default_args=None, default_kwargs=None, use_dp=True, recompute_grad=False, ): """Turn a function into its standardized version. Args: fct_in (fct): The function to register default_args (list): The default parameters to add to the function. default_kwargs (dict): The default parameters to add to the function. Those arguments can be overwritten when calling the function. use_dp (bool): Wrap the function call within a dataparallelism object if dp is available. Some layers (like MOE) must be called without dp. recompute_grad (bool): If True, recompute the function during the backward pass to save memory Returns: fct: the standardized layer function. """ # The kwargs given when calling the function overwrite the default ones fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {})) @functools.wraps(fct_in) def decorator(x, *args, **kwargs): """Call the layer function.""" fct = fct_in # For closure. Could use nonlocal with Python 3 # Eventually create the memory optimized version of the function if recompute_grad: fct = partial(fct, **kwargs) # recompute_grad only accept args fct = common_layers.recompute_grad(fct) kwargs = {} # Eventually use dp (if given and not MoE) if use_dp and dp is not None: y = dp(fct, x, *args, **kwargs) else: y = fct(x, *args, **kwargs) # Eventually capture the extra loss extra_loss = 0.0 if isinstance(y, tuple): y, extra_loss = y return y, extra_loss return decorator total_key_depth = hparams.attention_key_channels or hparams.hidden_size total_value_depth = hparams.attention_value_channels or hparams.hidden_size # Attention layers: # === Multi-head full attention layer === multihead_attention_fn = register_layer( multihead_attention, default_kwargs=dict( memory_antecedent=None, # Self-attention by default bias=None, total_key_depth=total_key_depth, total_value_depth=total_value_depth, output_depth=hparams.hidden_size, num_heads=hparams.num_heads, dropout_rate=hparams.attention_dropout, )) # === Memory efficient full-attention layer === # Save memory by not storing the activations and # recomputing them during the backward pass memeff_attention_base_fn = register_layer( multihead_attention, default_kwargs=dict( total_key_depth=total_key_depth, total_value_depth=total_value_depth, output_depth=hparams.hidden_size, num_heads=hparams.num_heads, dropout_rate=hparams.attention_dropout, ), recompute_grad=True, ) def memeff_attention_fn(*args, **kwargs): """Modify args/kwargs for compatibility with recompute_grad.""" kwargs = kwargs.copy() assert len(args) == 1 x = args[0] memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None if kwargs.get("bias", None) is not None: # Case where bias has been set args = (x, memory_antecedent, kwargs.pop("bias")) else: # Otherwise, only 2 args. This is necessary as recompute_grad does not # support None values. args = (x, memory_antecedent) return memeff_attention_base_fn(*args, **kwargs) # === Local attention (unmasked) layer === # Reuse same parameters as multihead_attention # Don't mask the future local_attention_fn = partial( multihead_attention_fn, block_length=hparams.attention_loc_block_length, block_width=hparams.attention_loc_block_width, attention_type="local_unmasked", ) # === Local attention (masked) layer === # Reuse same parameters as multihead_attention # Only works for self attention. Always mask the future. local_attention_masked_fn = partial( multihead_attention_fn, block_length=hparams.attention_loc_block_length, attention_type="local_mask_right", ) # === Masked memory-compressed multihead self attention layer === # Only works for self attention. Always mask the future. compressed_attention_masked_fn = register_layer( multihead_self_attention_reduced, default_kwargs=dict( factor=hparams.attention_red_factor, nonlinearity=hparams.attention_red_nonlinearity, reduction_type=hparams.attention_red_type, multihead_params=dict( total_key_depth=total_key_depth, total_value_depth=total_value_depth, num_heads=hparams.num_heads, dropout_rate=hparams.attention_dropout, ), ), ) # === Unmasked memory-compressed multihead self attention layer === # Only works for self attention. Never mask the future. Bias never added compressed_attention_fn = partial( compressed_attention_masked_fn, add_mask=False, ) # Feed-forwards layers: # === FC layer === conv_hidden_relu = register_layer( common_layers.conv_hidden_relu, default_kwargs=dict( hidden_size=hparams.filter_size, output_size=hparams.hidden_size, dropout=hparams.relu_dropout, ), ) # === Separable convolution layer === # No mask applied sep_conv_relu = partial( conv_hidden_relu, padding="SAME", # Parameters copied from the transformer model, could add hparams kernel_size=(3, 1), second_kernel_size=(31, 1), ) # === Separable convolution layer (masked version) === # Mask the future sep_conv_relu_masked = partial( sep_conv_relu, padding="LEFT", # Mask future for decoder ) # Define all available layers cur_layers = dict( # Attention layers: a=multihead_attention_fn, # Multihead full attention loc=local_attention_fn, # Local attention locm=local_attention_masked_fn, # Local attention (masked) red=compressed_attention_fn, # Memory-compressed attention redm=compressed_attention_masked_fn, # Memory-compressed att (masked) mem=memeff_attention_fn, # Memory efficient # Feed-forward layers: fc=conv_hidden_relu, # Fully connected sep=sep_conv_relu, # Separable convolution (unmasked) sepm=sep_conv_relu_masked, # Separable convolution (masked) ) return cur_layers
[ "def", "get_standardized_layers", "(", "hparams", ",", "dp", "=", "None", ")", ":", "def", "partial", "(", "fct", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Same as functools.partial but with functools.wraps.\"\"\"", "return", "functools", ".", "wraps", "(", "fct", ")", "(", "functools", ".", "partial", "(", "fct", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "def", "register_layer", "(", "fct_in", ",", "default_args", "=", "None", ",", "default_kwargs", "=", "None", ",", "use_dp", "=", "True", ",", "recompute_grad", "=", "False", ",", ")", ":", "\"\"\"Turn a function into its standardized version.\n\n Args:\n fct_in (fct): The function to register\n default_args (list): The default parameters to add to the function.\n default_kwargs (dict): The default parameters to add to the function.\n Those arguments can be overwritten when calling the function.\n use_dp (bool): Wrap the function call within a dataparallelism object if\n dp is available. Some layers (like MOE) must be called without dp.\n recompute_grad (bool): If True, recompute the function during the\n backward pass to save memory\n\n Returns:\n fct: the standardized layer function.\n \"\"\"", "# The kwargs given when calling the function overwrite the default ones", "fct_in", "=", "partial", "(", "fct_in", ",", "*", "(", "default_args", "or", "[", "]", ")", ",", "*", "*", "(", "default_kwargs", "or", "{", "}", ")", ")", "@", "functools", ".", "wraps", "(", "fct_in", ")", "def", "decorator", "(", "x", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Call the layer function.\"\"\"", "fct", "=", "fct_in", "# For closure. Could use nonlocal with Python 3", "# Eventually create the memory optimized version of the function", "if", "recompute_grad", ":", "fct", "=", "partial", "(", "fct", ",", "*", "*", "kwargs", ")", "# recompute_grad only accept args", "fct", "=", "common_layers", ".", "recompute_grad", "(", "fct", ")", "kwargs", "=", "{", "}", "# Eventually use dp (if given and not MoE)", "if", "use_dp", "and", "dp", "is", "not", "None", ":", "y", "=", "dp", "(", "fct", ",", "x", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "y", "=", "fct", "(", "x", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Eventually capture the extra loss", "extra_loss", "=", "0.0", "if", "isinstance", "(", "y", ",", "tuple", ")", ":", "y", ",", "extra_loss", "=", "y", "return", "y", ",", "extra_loss", "return", "decorator", "total_key_depth", "=", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", "total_value_depth", "=", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", "# Attention layers:", "# === Multi-head full attention layer ===", "multihead_attention_fn", "=", "register_layer", "(", "multihead_attention", ",", "default_kwargs", "=", "dict", "(", "memory_antecedent", "=", "None", ",", "# Self-attention by default", "bias", "=", "None", ",", "total_key_depth", "=", "total_key_depth", ",", "total_value_depth", "=", "total_value_depth", ",", "output_depth", "=", "hparams", ".", "hidden_size", ",", "num_heads", "=", "hparams", ".", "num_heads", ",", "dropout_rate", "=", "hparams", ".", "attention_dropout", ",", ")", ")", "# === Memory efficient full-attention layer ===", "# Save memory by not storing the activations and", "# recomputing them during the backward pass", "memeff_attention_base_fn", "=", "register_layer", "(", "multihead_attention", ",", "default_kwargs", "=", "dict", "(", "total_key_depth", "=", "total_key_depth", ",", "total_value_depth", "=", "total_value_depth", ",", "output_depth", "=", "hparams", ".", "hidden_size", ",", "num_heads", "=", "hparams", ".", "num_heads", ",", "dropout_rate", "=", "hparams", ".", "attention_dropout", ",", ")", ",", "recompute_grad", "=", "True", ",", ")", "def", "memeff_attention_fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Modify args/kwargs for compatibility with recompute_grad.\"\"\"", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "assert", "len", "(", "args", ")", "==", "1", "x", "=", "args", "[", "0", "]", "memory_antecedent", "=", "kwargs", ".", "pop", "(", "\"memory_antecedent\"", ",", "x", ")", "# Same as x if None", "if", "kwargs", ".", "get", "(", "\"bias\"", ",", "None", ")", "is", "not", "None", ":", "# Case where bias has been set", "args", "=", "(", "x", ",", "memory_antecedent", ",", "kwargs", ".", "pop", "(", "\"bias\"", ")", ")", "else", ":", "# Otherwise, only 2 args. This is necessary as recompute_grad does not", "# support None values.", "args", "=", "(", "x", ",", "memory_antecedent", ")", "return", "memeff_attention_base_fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# === Local attention (unmasked) layer ===", "# Reuse same parameters as multihead_attention", "# Don't mask the future", "local_attention_fn", "=", "partial", "(", "multihead_attention_fn", ",", "block_length", "=", "hparams", ".", "attention_loc_block_length", ",", "block_width", "=", "hparams", ".", "attention_loc_block_width", ",", "attention_type", "=", "\"local_unmasked\"", ",", ")", "# === Local attention (masked) layer ===", "# Reuse same parameters as multihead_attention", "# Only works for self attention. Always mask the future.", "local_attention_masked_fn", "=", "partial", "(", "multihead_attention_fn", ",", "block_length", "=", "hparams", ".", "attention_loc_block_length", ",", "attention_type", "=", "\"local_mask_right\"", ",", ")", "# === Masked memory-compressed multihead self attention layer ===", "# Only works for self attention. Always mask the future.", "compressed_attention_masked_fn", "=", "register_layer", "(", "multihead_self_attention_reduced", ",", "default_kwargs", "=", "dict", "(", "factor", "=", "hparams", ".", "attention_red_factor", ",", "nonlinearity", "=", "hparams", ".", "attention_red_nonlinearity", ",", "reduction_type", "=", "hparams", ".", "attention_red_type", ",", "multihead_params", "=", "dict", "(", "total_key_depth", "=", "total_key_depth", ",", "total_value_depth", "=", "total_value_depth", ",", "num_heads", "=", "hparams", ".", "num_heads", ",", "dropout_rate", "=", "hparams", ".", "attention_dropout", ",", ")", ",", ")", ",", ")", "# === Unmasked memory-compressed multihead self attention layer ===", "# Only works for self attention. Never mask the future. Bias never added", "compressed_attention_fn", "=", "partial", "(", "compressed_attention_masked_fn", ",", "add_mask", "=", "False", ",", ")", "# Feed-forwards layers:", "# === FC layer ===", "conv_hidden_relu", "=", "register_layer", "(", "common_layers", ".", "conv_hidden_relu", ",", "default_kwargs", "=", "dict", "(", "hidden_size", "=", "hparams", ".", "filter_size", ",", "output_size", "=", "hparams", ".", "hidden_size", ",", "dropout", "=", "hparams", ".", "relu_dropout", ",", ")", ",", ")", "# === Separable convolution layer ===", "# No mask applied", "sep_conv_relu", "=", "partial", "(", "conv_hidden_relu", ",", "padding", "=", "\"SAME\"", ",", "# Parameters copied from the transformer model, could add hparams", "kernel_size", "=", "(", "3", ",", "1", ")", ",", "second_kernel_size", "=", "(", "31", ",", "1", ")", ",", ")", "# === Separable convolution layer (masked version) ===", "# Mask the future", "sep_conv_relu_masked", "=", "partial", "(", "sep_conv_relu", ",", "padding", "=", "\"LEFT\"", ",", "# Mask future for decoder", ")", "# Define all available layers", "cur_layers", "=", "dict", "(", "# Attention layers:", "a", "=", "multihead_attention_fn", ",", "# Multihead full attention", "loc", "=", "local_attention_fn", ",", "# Local attention", "locm", "=", "local_attention_masked_fn", ",", "# Local attention (masked)", "red", "=", "compressed_attention_fn", ",", "# Memory-compressed attention", "redm", "=", "compressed_attention_masked_fn", ",", "# Memory-compressed att (masked)", "mem", "=", "memeff_attention_fn", ",", "# Memory efficient", "# Feed-forward layers:", "fc", "=", "conv_hidden_relu", ",", "# Fully connected", "sep", "=", "sep_conv_relu", ",", "# Separable convolution (unmasked)", "sepm", "=", "sep_conv_relu_masked", ",", "# Separable convolution (masked)", ")", "return", "cur_layers" ]
Get the common attention and feed-forward layers. The returned layer functions will have the following signature: y, extra_loss = fct(x) extra_loss is set to 0.0 if the layer doesn't have extra loss. If dp is provided, the layers will be distributed within the devices. If moe wants to be used, both dp and model need to be set. Args: hparams (tf.HParams): the model hparameters dp (expert_utils.Parallelism): A data parallelism object. If not given, the dp calls are simply ignored. Returns: dict[str:fct]: A dictionary containing the standardized functions
[ "Get", "the", "common", "attention", "and", "feed", "-", "forward", "layers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L91-L299
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_standard_attention_hparams
def add_standard_attention_hparams(hparams): """Adds the hparams used by get_standardized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # hparams used and which should have been defined outside (in # common_hparams): # Global flags # hparams.mode # hparams.hidden_size # Pre-post processing flags # hparams.layer_preprocess_sequence # hparams.layer_postprocess_sequence # hparams.layer_prepostprocess_dropout # hparams.norm_type # hparams.norm_epsilon # Mixture-of-Expert flags # hparams.moe_hidden_sizes # hparams.moe_num_experts # hparams.moe_k # hparams.moe_loss_coef # Attention layers flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) # Attention: Local (unmasked only): How much to look left. hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") # Fully connected layers flags # To be more consistent, should use filter_size to also control the MOE # size if moe_hidden_sizes not set. hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
python
def add_standard_attention_hparams(hparams): """Adds the hparams used by get_standardized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # hparams used and which should have been defined outside (in # common_hparams): # Global flags # hparams.mode # hparams.hidden_size # Pre-post processing flags # hparams.layer_preprocess_sequence # hparams.layer_postprocess_sequence # hparams.layer_prepostprocess_dropout # hparams.norm_type # hparams.norm_epsilon # Mixture-of-Expert flags # hparams.moe_hidden_sizes # hparams.moe_num_experts # hparams.moe_k # hparams.moe_loss_coef # Attention layers flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) # Attention: Local (unmasked only): How much to look left. hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") # Fully connected layers flags # To be more consistent, should use filter_size to also control the MOE # size if moe_hidden_sizes not set. hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
[ "def", "add_standard_attention_hparams", "(", "hparams", ")", ":", "# All hyperparameters ending in \"dropout\" are automatically set to 0.0", "# when not in training mode.", "# hparams used and which should have been defined outside (in", "# common_hparams):", "# Global flags", "# hparams.mode", "# hparams.hidden_size", "# Pre-post processing flags", "# hparams.layer_preprocess_sequence", "# hparams.layer_postprocess_sequence", "# hparams.layer_prepostprocess_dropout", "# hparams.norm_type", "# hparams.norm_epsilon", "# Mixture-of-Expert flags", "# hparams.moe_hidden_sizes", "# hparams.moe_num_experts", "# hparams.moe_k", "# hparams.moe_loss_coef", "# Attention layers flags", "hparams", ".", "add_hparam", "(", "\"num_heads\"", ",", "8", ")", "hparams", ".", "add_hparam", "(", "\"attention_key_channels\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"attention_value_channels\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"attention_dropout\"", ",", "0.0", ")", "# Attention: Local", "hparams", ".", "add_hparam", "(", "\"attention_loc_block_length\"", ",", "256", ")", "# Attention: Local (unmasked only): How much to look left.", "hparams", ".", "add_hparam", "(", "\"attention_loc_block_width\"", ",", "128", ")", "# Attention: Memory-compressed", "hparams", ".", "add_hparam", "(", "\"attention_red_factor\"", ",", "3", ")", "hparams", ".", "add_hparam", "(", "\"attention_red_type\"", ",", "\"conv\"", ")", "hparams", ".", "add_hparam", "(", "\"attention_red_nonlinearity\"", ",", "\"none\"", ")", "# Fully connected layers flags", "# To be more consistent, should use filter_size to also control the MOE", "# size if moe_hidden_sizes not set.", "hparams", ".", "add_hparam", "(", "\"filter_size\"", ",", "2048", ")", "hparams", ".", "add_hparam", "(", "\"relu_dropout\"", ",", "0.0", ")", "return", "hparams" ]
Adds the hparams used by get_standardized_layers.
[ "Adds", "the", "hparams", "used", "by", "get_standardized_layers", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L302-L344
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
encoder_decoder_attention_loss
def encoder_decoder_attention_loss(expected_attention_logits, actual_attentions, loss_type="kl_divergence", loss_multiplier=1.0): """Computes encdec attention loss between expected and actual attentions. Args: expected_attention_logits: Tensor storing the expected encoder-decoder attention logits with shape [batch_size, target_length, input_length]. actual_attentions: Dictionary with actual attention logits for different attention types and hidden layers. loss_type: type of the loss function. loss_multiplier: multiplier for the attention loss. Returns: KL_divergence loss between the actual and expected attention logits. """ def combine_attentions(attention_list): """Combine different layer attentions and then average over layers/heads.""" # Stack all hidden layer attention tensors to get a tensor with shape # [num_hidden_layers, batch_size, num_heads, target_length, input_length]. attentions = tf.stack(attention_list) # Reduce mean across all layers (axis=0) and all heads (axis=2) to get a # tensor with shape [batch_size, target_length, input_length]. return tf.reduce_mean(attentions, [0, 2]) def kl_divergence_loss(expected_logits, actual_logits): p = tfp.distributions.Categorical(logits=expected_logits) q = tfp.distributions.Categorical(logits=actual_logits) return tfp.distributions.kl_divergence(p, q) def mse_loss(expected_logits, actual_weights): expected_weights = tf.nn.softmax(expected_logits) return tf.losses.mean_squared_error(expected_weights, actual_weights) # For each hidden layer, we have attention-logit and attention-weight tensors # with shape [batch_size, num_heads, target_length, input_length]. loss = 0.0 if loss_type == "mse": actual_encdec_attention_weights = [ t for layer_key, t in actual_attentions.items() if "encdec_attention" in layer_key and not layer_key.endswith("/logits") ] actual_attention_weights = combine_attentions( actual_encdec_attention_weights) loss = mse_loss(expected_attention_logits, actual_attention_weights) else: actual_encdec_attention_logits = [ t for layer_key, t in actual_attentions.items() if "encdec_attention" in layer_key and layer_key.endswith("/logits") ] actual_attention_logits = combine_attentions(actual_encdec_attention_logits) loss = kl_divergence_loss(expected_attention_logits, actual_attention_logits) return loss * loss_multiplier
python
def encoder_decoder_attention_loss(expected_attention_logits, actual_attentions, loss_type="kl_divergence", loss_multiplier=1.0): """Computes encdec attention loss between expected and actual attentions. Args: expected_attention_logits: Tensor storing the expected encoder-decoder attention logits with shape [batch_size, target_length, input_length]. actual_attentions: Dictionary with actual attention logits for different attention types and hidden layers. loss_type: type of the loss function. loss_multiplier: multiplier for the attention loss. Returns: KL_divergence loss between the actual and expected attention logits. """ def combine_attentions(attention_list): """Combine different layer attentions and then average over layers/heads.""" # Stack all hidden layer attention tensors to get a tensor with shape # [num_hidden_layers, batch_size, num_heads, target_length, input_length]. attentions = tf.stack(attention_list) # Reduce mean across all layers (axis=0) and all heads (axis=2) to get a # tensor with shape [batch_size, target_length, input_length]. return tf.reduce_mean(attentions, [0, 2]) def kl_divergence_loss(expected_logits, actual_logits): p = tfp.distributions.Categorical(logits=expected_logits) q = tfp.distributions.Categorical(logits=actual_logits) return tfp.distributions.kl_divergence(p, q) def mse_loss(expected_logits, actual_weights): expected_weights = tf.nn.softmax(expected_logits) return tf.losses.mean_squared_error(expected_weights, actual_weights) # For each hidden layer, we have attention-logit and attention-weight tensors # with shape [batch_size, num_heads, target_length, input_length]. loss = 0.0 if loss_type == "mse": actual_encdec_attention_weights = [ t for layer_key, t in actual_attentions.items() if "encdec_attention" in layer_key and not layer_key.endswith("/logits") ] actual_attention_weights = combine_attentions( actual_encdec_attention_weights) loss = mse_loss(expected_attention_logits, actual_attention_weights) else: actual_encdec_attention_logits = [ t for layer_key, t in actual_attentions.items() if "encdec_attention" in layer_key and layer_key.endswith("/logits") ] actual_attention_logits = combine_attentions(actual_encdec_attention_logits) loss = kl_divergence_loss(expected_attention_logits, actual_attention_logits) return loss * loss_multiplier
[ "def", "encoder_decoder_attention_loss", "(", "expected_attention_logits", ",", "actual_attentions", ",", "loss_type", "=", "\"kl_divergence\"", ",", "loss_multiplier", "=", "1.0", ")", ":", "def", "combine_attentions", "(", "attention_list", ")", ":", "\"\"\"Combine different layer attentions and then average over layers/heads.\"\"\"", "# Stack all hidden layer attention tensors to get a tensor with shape", "# [num_hidden_layers, batch_size, num_heads, target_length, input_length].", "attentions", "=", "tf", ".", "stack", "(", "attention_list", ")", "# Reduce mean across all layers (axis=0) and all heads (axis=2) to get a", "# tensor with shape [batch_size, target_length, input_length].", "return", "tf", ".", "reduce_mean", "(", "attentions", ",", "[", "0", ",", "2", "]", ")", "def", "kl_divergence_loss", "(", "expected_logits", ",", "actual_logits", ")", ":", "p", "=", "tfp", ".", "distributions", ".", "Categorical", "(", "logits", "=", "expected_logits", ")", "q", "=", "tfp", ".", "distributions", ".", "Categorical", "(", "logits", "=", "actual_logits", ")", "return", "tfp", ".", "distributions", ".", "kl_divergence", "(", "p", ",", "q", ")", "def", "mse_loss", "(", "expected_logits", ",", "actual_weights", ")", ":", "expected_weights", "=", "tf", ".", "nn", ".", "softmax", "(", "expected_logits", ")", "return", "tf", ".", "losses", ".", "mean_squared_error", "(", "expected_weights", ",", "actual_weights", ")", "# For each hidden layer, we have attention-logit and attention-weight tensors", "# with shape [batch_size, num_heads, target_length, input_length].", "loss", "=", "0.0", "if", "loss_type", "==", "\"mse\"", ":", "actual_encdec_attention_weights", "=", "[", "t", "for", "layer_key", ",", "t", "in", "actual_attentions", ".", "items", "(", ")", "if", "\"encdec_attention\"", "in", "layer_key", "and", "not", "layer_key", ".", "endswith", "(", "\"/logits\"", ")", "]", "actual_attention_weights", "=", "combine_attentions", "(", "actual_encdec_attention_weights", ")", "loss", "=", "mse_loss", "(", "expected_attention_logits", ",", "actual_attention_weights", ")", "else", ":", "actual_encdec_attention_logits", "=", "[", "t", "for", "layer_key", ",", "t", "in", "actual_attentions", ".", "items", "(", ")", "if", "\"encdec_attention\"", "in", "layer_key", "and", "layer_key", ".", "endswith", "(", "\"/logits\"", ")", "]", "actual_attention_logits", "=", "combine_attentions", "(", "actual_encdec_attention_logits", ")", "loss", "=", "kl_divergence_loss", "(", "expected_attention_logits", ",", "actual_attention_logits", ")", "return", "loss", "*", "loss_multiplier" ]
Computes encdec attention loss between expected and actual attentions. Args: expected_attention_logits: Tensor storing the expected encoder-decoder attention logits with shape [batch_size, target_length, input_length]. actual_attentions: Dictionary with actual attention logits for different attention types and hidden layers. loss_type: type of the loss function. loss_multiplier: multiplier for the attention loss. Returns: KL_divergence loss between the actual and expected attention logits.
[ "Computes", "encdec", "attention", "loss", "between", "expected", "and", "actual", "attentions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L347-L402
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_timing_signal_1d
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4, start_index=0): """Gets a bunch of sinusoids of different frequencies. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: length: scalar, length of timing signal sequence. channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor of timing signals [1, length, channels] """ position = tf.to_float(tf.range(length) + start_index) num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / tf.maximum(tf.to_float(num_timescales) - 1, 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) signal = tf.reshape(signal, [1, length, channels]) return signal
python
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4, start_index=0): """Gets a bunch of sinusoids of different frequencies. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: length: scalar, length of timing signal sequence. channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor of timing signals [1, length, channels] """ position = tf.to_float(tf.range(length) + start_index) num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / tf.maximum(tf.to_float(num_timescales) - 1, 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) signal = tf.reshape(signal, [1, length, channels]) return signal
[ "def", "get_timing_signal_1d", "(", "length", ",", "channels", ",", "min_timescale", "=", "1.0", ",", "max_timescale", "=", "1.0e4", ",", "start_index", "=", "0", ")", ":", "position", "=", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "length", ")", "+", "start_index", ")", "num_timescales", "=", "channels", "//", "2", "log_timescale_increment", "=", "(", "math", ".", "log", "(", "float", "(", "max_timescale", ")", "/", "float", "(", "min_timescale", ")", ")", "/", "tf", ".", "maximum", "(", "tf", ".", "to_float", "(", "num_timescales", ")", "-", "1", ",", "1", ")", ")", "inv_timescales", "=", "min_timescale", "*", "tf", ".", "exp", "(", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "num_timescales", ")", ")", "*", "-", "log_timescale_increment", ")", "scaled_time", "=", "tf", ".", "expand_dims", "(", "position", ",", "1", ")", "*", "tf", ".", "expand_dims", "(", "inv_timescales", ",", "0", ")", "signal", "=", "tf", ".", "concat", "(", "[", "tf", ".", "sin", "(", "scaled_time", ")", ",", "tf", ".", "cos", "(", "scaled_time", ")", "]", ",", "axis", "=", "1", ")", "signal", "=", "tf", ".", "pad", "(", "signal", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "tf", ".", "mod", "(", "channels", ",", "2", ")", "]", "]", ")", "signal", "=", "tf", ".", "reshape", "(", "signal", ",", "[", "1", ",", "length", ",", "channels", "]", ")", "return", "signal" ]
Gets a bunch of sinusoids of different frequencies. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: length: scalar, length of timing signal sequence. channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor of timing signals [1, length, channels]
[ "Gets", "a", "bunch", "of", "sinusoids", "of", "different", "frequencies", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L406-L452
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_timing_signal_1d
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, start_index=0): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor the same shape as x. """ length = common_layers.shape_list(x)[1] channels = common_layers.shape_list(x)[2] signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale, start_index) return x + common_layers.cast_like(signal, x)
python
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, start_index=0): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor the same shape as x. """ length = common_layers.shape_list(x)[1] channels = common_layers.shape_list(x)[2] signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale, start_index) return x + common_layers.cast_like(signal, x)
[ "def", "add_timing_signal_1d", "(", "x", ",", "min_timescale", "=", "1.0", ",", "max_timescale", "=", "1.0e4", ",", "start_index", "=", "0", ")", ":", "length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "1", "]", "channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "signal", "=", "get_timing_signal_1d", "(", "length", ",", "channels", ",", "min_timescale", ",", "max_timescale", ",", "start_index", ")", "return", "x", "+", "common_layers", ".", "cast_like", "(", "signal", ",", "x", ")" ]
Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor the same shape as x.
[ "Adds", "a", "bunch", "of", "sinusoids", "of", "different", "frequencies", "to", "a", "Tensor", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L456-L492
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_layer_timing_signal_learned_1d
def get_layer_timing_signal_learned_1d(channels, layer, num_layers): """get n-dimensional embedding as the layer (vertical) timing signal. Adds embeddings to represent the position of the layer in the tower. Args: channels: dimension of the timing signal layer: layer num num_layers: total number of layers Returns: a Tensor of timing signals [1, 1, channels]. """ shape = [num_layers, 1, 1, channels] layer_embedding = ( tf.get_variable( "layer_embedding", shape, initializer=tf.random_normal_initializer(0, channels**-0.5)) * (channels**0.5)) return layer_embedding[layer, :, :, :]
python
def get_layer_timing_signal_learned_1d(channels, layer, num_layers): """get n-dimensional embedding as the layer (vertical) timing signal. Adds embeddings to represent the position of the layer in the tower. Args: channels: dimension of the timing signal layer: layer num num_layers: total number of layers Returns: a Tensor of timing signals [1, 1, channels]. """ shape = [num_layers, 1, 1, channels] layer_embedding = ( tf.get_variable( "layer_embedding", shape, initializer=tf.random_normal_initializer(0, channels**-0.5)) * (channels**0.5)) return layer_embedding[layer, :, :, :]
[ "def", "get_layer_timing_signal_learned_1d", "(", "channels", ",", "layer", ",", "num_layers", ")", ":", "shape", "=", "[", "num_layers", ",", "1", ",", "1", ",", "channels", "]", "layer_embedding", "=", "(", "tf", ".", "get_variable", "(", "\"layer_embedding\"", ",", "shape", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "0", ",", "channels", "**", "-", "0.5", ")", ")", "*", "(", "channels", "**", "0.5", ")", ")", "return", "layer_embedding", "[", "layer", ",", ":", ",", ":", ",", ":", "]" ]
get n-dimensional embedding as the layer (vertical) timing signal. Adds embeddings to represent the position of the layer in the tower. Args: channels: dimension of the timing signal layer: layer num num_layers: total number of layers Returns: a Tensor of timing signals [1, 1, channels].
[ "get", "n", "-", "dimensional", "embedding", "as", "the", "layer", "(", "vertical", ")", "timing", "signal", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L496-L516
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_layer_timing_signal_learned_1d
def add_layer_timing_signal_learned_1d(x, layer, num_layers): """Add n-dimensional embedding as the layer (vertical) timing signal. Adds embeddings to represent the position of the layer in the tower. Args: x: a tensor with shape [batch, length, depth] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_learned_1d(channels, layer, num_layers) x += signal return x
python
def add_layer_timing_signal_learned_1d(x, layer, num_layers): """Add n-dimensional embedding as the layer (vertical) timing signal. Adds embeddings to represent the position of the layer in the tower. Args: x: a tensor with shape [batch, length, depth] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_learned_1d(channels, layer, num_layers) x += signal return x
[ "def", "add_layer_timing_signal_learned_1d", "(", "x", ",", "layer", ",", "num_layers", ")", ":", "channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "-", "1", "]", "signal", "=", "get_layer_timing_signal_learned_1d", "(", "channels", ",", "layer", ",", "num_layers", ")", "x", "+=", "signal", "return", "x" ]
Add n-dimensional embedding as the layer (vertical) timing signal. Adds embeddings to represent the position of the layer in the tower. Args: x: a tensor with shape [batch, length, depth] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x.
[ "Add", "n", "-", "dimensional", "embedding", "as", "the", "layer", "(", "vertical", ")", "timing", "signal", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L520-L536
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_layer_timing_signal_sinusoid_1d
def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers): """Add sinusoids of different frequencies as layer (vertical) timing signal. Args: channels: dimension of the timing signal layer: layer num num_layers: total number of layers Returns: a Tensor of timing signals [1, 1, channels]. """ signal = get_timing_signal_1d(num_layers, channels) layer_signal = tf.expand_dims(signal[:, layer, :], axis=1) return layer_signal
python
def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers): """Add sinusoids of different frequencies as layer (vertical) timing signal. Args: channels: dimension of the timing signal layer: layer num num_layers: total number of layers Returns: a Tensor of timing signals [1, 1, channels]. """ signal = get_timing_signal_1d(num_layers, channels) layer_signal = tf.expand_dims(signal[:, layer, :], axis=1) return layer_signal
[ "def", "get_layer_timing_signal_sinusoid_1d", "(", "channels", ",", "layer", ",", "num_layers", ")", ":", "signal", "=", "get_timing_signal_1d", "(", "num_layers", ",", "channels", ")", "layer_signal", "=", "tf", ".", "expand_dims", "(", "signal", "[", ":", ",", "layer", ",", ":", "]", ",", "axis", "=", "1", ")", "return", "layer_signal" ]
Add sinusoids of different frequencies as layer (vertical) timing signal. Args: channels: dimension of the timing signal layer: layer num num_layers: total number of layers Returns: a Tensor of timing signals [1, 1, channels].
[ "Add", "sinusoids", "of", "different", "frequencies", "as", "layer", "(", "vertical", ")", "timing", "signal", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L540-L555
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_layer_timing_signal_sinusoid_1d
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): """Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) return x + signal
python
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): """Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) return x + signal
[ "def", "add_layer_timing_signal_sinusoid_1d", "(", "x", ",", "layer", ",", "num_layers", ")", ":", "channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "-", "1", "]", "signal", "=", "get_layer_timing_signal_sinusoid_1d", "(", "channels", ",", "layer", ",", "num_layers", ")", "return", "x", "+", "signal" ]
Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x.
[ "Add", "sinusoids", "of", "different", "frequencies", "as", "layer", "(", "vertical", ")", "timing", "signal", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L559-L574
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_timing_signal_1d_given_position
def add_timing_signal_1d_given_position(x, position, min_timescale=1.0, max_timescale=1.0e4): """Adds sinusoids of diff frequencies to a Tensor, with timing position given. Args: x: a Tensor with shape [batch, length, channels] position: a Tensor with shape [batch, length] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[2] num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) signal = common_layers.cast_like(signal, x) return x + signal
python
def add_timing_signal_1d_given_position(x, position, min_timescale=1.0, max_timescale=1.0e4): """Adds sinusoids of diff frequencies to a Tensor, with timing position given. Args: x: a Tensor with shape [batch, length, channels] position: a Tensor with shape [batch, length] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ channels = common_layers.shape_list(x)[2] num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) signal = common_layers.cast_like(signal, x) return x + signal
[ "def", "add_timing_signal_1d_given_position", "(", "x", ",", "position", ",", "min_timescale", "=", "1.0", ",", "max_timescale", "=", "1.0e4", ")", ":", "channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "num_timescales", "=", "channels", "//", "2", "log_timescale_increment", "=", "(", "math", ".", "log", "(", "float", "(", "max_timescale", ")", "/", "float", "(", "min_timescale", ")", ")", "/", "(", "tf", ".", "to_float", "(", "num_timescales", ")", "-", "1", ")", ")", "inv_timescales", "=", "min_timescale", "*", "tf", ".", "exp", "(", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "num_timescales", ")", ")", "*", "-", "log_timescale_increment", ")", "scaled_time", "=", "(", "tf", ".", "expand_dims", "(", "tf", ".", "to_float", "(", "position", ")", ",", "2", ")", "*", "tf", ".", "expand_dims", "(", "tf", ".", "expand_dims", "(", "inv_timescales", ",", "0", ")", ",", "0", ")", ")", "signal", "=", "tf", ".", "concat", "(", "[", "tf", ".", "sin", "(", "scaled_time", ")", ",", "tf", ".", "cos", "(", "scaled_time", ")", "]", ",", "axis", "=", "2", ")", "signal", "=", "tf", ".", "pad", "(", "signal", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "tf", ".", "mod", "(", "channels", ",", "2", ")", "]", "]", ")", "signal", "=", "common_layers", ".", "cast_like", "(", "signal", ",", "x", ")", "return", "x", "+", "signal" ]
Adds sinusoids of diff frequencies to a Tensor, with timing position given. Args: x: a Tensor with shape [batch, length, channels] position: a Tensor with shape [batch, length] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x.
[ "Adds", "sinusoids", "of", "diff", "frequencies", "to", "a", "Tensor", "with", "timing", "position", "given", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L578-L606
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_timing_signal_nd
def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase in one of the positional dimensions. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(a+b) and cos(a+b) can be experessed in terms of b, sin(a) and cos(a). x is a Tensor with n "positional" dimensions, e.g. one dimension for a sequence or two dimensions for an image We use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels // (n * 2). For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, d1 ... dn, channels] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ num_dims = len(x.get_shape().as_list()) - 2 channels = common_layers.shape_list(x)[-1] num_timescales = channels // (num_dims * 2) log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) for dim in range(num_dims): length = common_layers.shape_list(x)[dim + 1] position = tf.to_float(tf.range(length)) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims( inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) prepad = dim * 2 * num_timescales postpad = channels - (dim + 1) * 2 * num_timescales signal = tf.pad(signal, [[0, 0], [prepad, postpad]]) for _ in range(1 + dim): signal = tf.expand_dims(signal, 0) for _ in range(num_dims - 1 - dim): signal = tf.expand_dims(signal, -2) x += signal return x
python
def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase in one of the positional dimensions. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(a+b) and cos(a+b) can be experessed in terms of b, sin(a) and cos(a). x is a Tensor with n "positional" dimensions, e.g. one dimension for a sequence or two dimensions for an image We use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels // (n * 2). For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, d1 ... dn, channels] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x. """ num_dims = len(x.get_shape().as_list()) - 2 channels = common_layers.shape_list(x)[-1] num_timescales = channels // (num_dims * 2) log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) for dim in range(num_dims): length = common_layers.shape_list(x)[dim + 1] position = tf.to_float(tf.range(length)) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims( inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) prepad = dim * 2 * num_timescales postpad = channels - (dim + 1) * 2 * num_timescales signal = tf.pad(signal, [[0, 0], [prepad, postpad]]) for _ in range(1 + dim): signal = tf.expand_dims(signal, 0) for _ in range(num_dims - 1 - dim): signal = tf.expand_dims(signal, -2) x += signal return x
[ "def", "add_timing_signal_nd", "(", "x", ",", "min_timescale", "=", "1.0", ",", "max_timescale", "=", "1.0e4", ")", ":", "num_dims", "=", "len", "(", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", "-", "2", "channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "-", "1", "]", "num_timescales", "=", "channels", "//", "(", "num_dims", "*", "2", ")", "log_timescale_increment", "=", "(", "math", ".", "log", "(", "float", "(", "max_timescale", ")", "/", "float", "(", "min_timescale", ")", ")", "/", "(", "tf", ".", "to_float", "(", "num_timescales", ")", "-", "1", ")", ")", "inv_timescales", "=", "min_timescale", "*", "tf", ".", "exp", "(", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "num_timescales", ")", ")", "*", "-", "log_timescale_increment", ")", "for", "dim", "in", "range", "(", "num_dims", ")", ":", "length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "dim", "+", "1", "]", "position", "=", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "length", ")", ")", "scaled_time", "=", "tf", ".", "expand_dims", "(", "position", ",", "1", ")", "*", "tf", ".", "expand_dims", "(", "inv_timescales", ",", "0", ")", "signal", "=", "tf", ".", "concat", "(", "[", "tf", ".", "sin", "(", "scaled_time", ")", ",", "tf", ".", "cos", "(", "scaled_time", ")", "]", ",", "axis", "=", "1", ")", "prepad", "=", "dim", "*", "2", "*", "num_timescales", "postpad", "=", "channels", "-", "(", "dim", "+", "1", ")", "*", "2", "*", "num_timescales", "signal", "=", "tf", ".", "pad", "(", "signal", ",", "[", "[", "0", ",", "0", "]", ",", "[", "prepad", ",", "postpad", "]", "]", ")", "for", "_", "in", "range", "(", "1", "+", "dim", ")", ":", "signal", "=", "tf", ".", "expand_dims", "(", "signal", ",", "0", ")", "for", "_", "in", "range", "(", "num_dims", "-", "1", "-", "dim", ")", ":", "signal", "=", "tf", ".", "expand_dims", "(", "signal", ",", "-", "2", ")", "x", "+=", "signal", "return", "x" ]
Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase in one of the positional dimensions. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(a+b) and cos(a+b) can be experessed in terms of b, sin(a) and cos(a). x is a Tensor with n "positional" dimensions, e.g. one dimension for a sequence or two dimensions for an image We use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels // (n * 2). For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, d1 ... dn, channels] min_timescale: a float max_timescale: a float Returns: a Tensor the same shape as x.
[ "Adds", "a", "bunch", "of", "sinusoids", "of", "different", "frequencies", "to", "a", "Tensor", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L610-L663
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_positional_embedding
def add_positional_embedding(x, max_length, name=None, positions=None): """Adds positional embedding. Args: x: Tensor with shape [batch, length, depth]. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. positions: Tensor with shape [batch, length]. Returns: Tensor of same shape as x. """ with tf.name_scope("add_positional_embedding"): _, length, depth = common_layers.shape_list(x) var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) if positions is None: pad_length = tf.maximum(0, length - max_length) sliced = tf.cond( tf.less(length, max_length), lambda: tf.slice(var, [0, 0], [length, -1]), lambda: tf.pad(var, [[0, pad_length], [0, 0]])) return x + tf.expand_dims(sliced, 0) else: return x + tf.gather(var, tf.to_int32(positions))
python
def add_positional_embedding(x, max_length, name=None, positions=None): """Adds positional embedding. Args: x: Tensor with shape [batch, length, depth]. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. positions: Tensor with shape [batch, length]. Returns: Tensor of same shape as x. """ with tf.name_scope("add_positional_embedding"): _, length, depth = common_layers.shape_list(x) var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) if positions is None: pad_length = tf.maximum(0, length - max_length) sliced = tf.cond( tf.less(length, max_length), lambda: tf.slice(var, [0, 0], [length, -1]), lambda: tf.pad(var, [[0, pad_length], [0, 0]])) return x + tf.expand_dims(sliced, 0) else: return x + tf.gather(var, tf.to_int32(positions))
[ "def", "add_positional_embedding", "(", "x", ",", "max_length", ",", "name", "=", "None", ",", "positions", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "\"add_positional_embedding\"", ")", ":", "_", ",", "length", ",", "depth", "=", "common_layers", ".", "shape_list", "(", "x", ")", "var", "=", "tf", ".", "cast", "(", "tf", ".", "get_variable", "(", "name", ",", "[", "max_length", ",", "depth", "]", ")", ",", "x", ".", "dtype", ")", "if", "positions", "is", "None", ":", "pad_length", "=", "tf", ".", "maximum", "(", "0", ",", "length", "-", "max_length", ")", "sliced", "=", "tf", ".", "cond", "(", "tf", ".", "less", "(", "length", ",", "max_length", ")", ",", "lambda", ":", "tf", ".", "slice", "(", "var", ",", "[", "0", ",", "0", "]", ",", "[", "length", ",", "-", "1", "]", ")", ",", "lambda", ":", "tf", ".", "pad", "(", "var", ",", "[", "[", "0", ",", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", ")", "return", "x", "+", "tf", ".", "expand_dims", "(", "sliced", ",", "0", ")", "else", ":", "return", "x", "+", "tf", ".", "gather", "(", "var", ",", "tf", ".", "to_int32", "(", "positions", ")", ")" ]
Adds positional embedding. Args: x: Tensor with shape [batch, length, depth]. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. positions: Tensor with shape [batch, length]. Returns: Tensor of same shape as x.
[ "Adds", "positional", "embedding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L666-L689
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
add_positional_embedding_nd
def add_positional_embedding_nd(x, max_length, name=None): """Adds n-dimensional positional embedding. The embeddings add to all positional dimensions of the tensor. Args: x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional dimensions, i.e., 1 for text, 2 for images, 3 for video, etc. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. Returns: Tensor of same shape as x. """ with tf.name_scope("add_positional_embedding_nd"): x_shape = common_layers.shape_list(x) num_dims = len(x_shape) - 2 depth = x_shape[-1] base_shape = [1] * (num_dims + 1) + [depth] base_start = [0] * (num_dims + 2) base_size = [-1] + [1] * num_dims + [depth] for i in range(num_dims): shape = base_shape[:] start = base_start[:] size = base_size[:] shape[i + 1] = max_length size[i + 1] = x_shape[i + 1] var = tf.get_variable( name + "_%d" % i, shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) var = var * depth**0.5 x += tf.slice(var, start, size) return x
python
def add_positional_embedding_nd(x, max_length, name=None): """Adds n-dimensional positional embedding. The embeddings add to all positional dimensions of the tensor. Args: x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional dimensions, i.e., 1 for text, 2 for images, 3 for video, etc. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. Returns: Tensor of same shape as x. """ with tf.name_scope("add_positional_embedding_nd"): x_shape = common_layers.shape_list(x) num_dims = len(x_shape) - 2 depth = x_shape[-1] base_shape = [1] * (num_dims + 1) + [depth] base_start = [0] * (num_dims + 2) base_size = [-1] + [1] * num_dims + [depth] for i in range(num_dims): shape = base_shape[:] start = base_start[:] size = base_size[:] shape[i + 1] = max_length size[i + 1] = x_shape[i + 1] var = tf.get_variable( name + "_%d" % i, shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) var = var * depth**0.5 x += tf.slice(var, start, size) return x
[ "def", "add_positional_embedding_nd", "(", "x", ",", "max_length", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "\"add_positional_embedding_nd\"", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "num_dims", "=", "len", "(", "x_shape", ")", "-", "2", "depth", "=", "x_shape", "[", "-", "1", "]", "base_shape", "=", "[", "1", "]", "*", "(", "num_dims", "+", "1", ")", "+", "[", "depth", "]", "base_start", "=", "[", "0", "]", "*", "(", "num_dims", "+", "2", ")", "base_size", "=", "[", "-", "1", "]", "+", "[", "1", "]", "*", "num_dims", "+", "[", "depth", "]", "for", "i", "in", "range", "(", "num_dims", ")", ":", "shape", "=", "base_shape", "[", ":", "]", "start", "=", "base_start", "[", ":", "]", "size", "=", "base_size", "[", ":", "]", "shape", "[", "i", "+", "1", "]", "=", "max_length", "size", "[", "i", "+", "1", "]", "=", "x_shape", "[", "i", "+", "1", "]", "var", "=", "tf", ".", "get_variable", "(", "name", "+", "\"_%d\"", "%", "i", ",", "shape", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "0", ",", "depth", "**", "-", "0.5", ")", ")", "var", "=", "var", "*", "depth", "**", "0.5", "x", "+=", "tf", ".", "slice", "(", "var", ",", "start", ",", "size", ")", "return", "x" ]
Adds n-dimensional positional embedding. The embeddings add to all positional dimensions of the tensor. Args: x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional dimensions, i.e., 1 for text, 2 for images, 3 for video, etc. max_length: int representing static maximum size of any dimension. name: str representing name of the embedding tf.Variable. Returns: Tensor of same shape as x.
[ "Adds", "n", "-", "dimensional", "positional", "embedding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L692-L725
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
make_edge_vectors
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): """Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors """ with tf.variable_scope(name, default_name="edge_vectors"): att_adj_vectors_shape = [num_edge_types, depth] adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) adj_vectors = ( tf.get_variable( "adj_vectors", att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**0.5)) # Avoiding gathers so that it works on TPUs # adjacency_matrix_one_hot has shape # [batch, num_nodes, num_nodes, num_edge_types] adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) att_adj_vectors = tf.matmul( tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), adj_vectors) return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])
python
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): """Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors """ with tf.variable_scope(name, default_name="edge_vectors"): att_adj_vectors_shape = [num_edge_types, depth] adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) adj_vectors = ( tf.get_variable( "adj_vectors", att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**0.5)) # Avoiding gathers so that it works on TPUs # adjacency_matrix_one_hot has shape # [batch, num_nodes, num_nodes, num_edge_types] adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) att_adj_vectors = tf.matmul( tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), adj_vectors) return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])
[ "def", "make_edge_vectors", "(", "adjacency_matrix", ",", "num_edge_types", ",", "depth", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"edge_vectors\"", ")", ":", "att_adj_vectors_shape", "=", "[", "num_edge_types", ",", "depth", "]", "adjacency_matrix_shape", "=", "common_layers", ".", "shape_list", "(", "adjacency_matrix", ")", "adj_vectors", "=", "(", "tf", ".", "get_variable", "(", "\"adj_vectors\"", ",", "att_adj_vectors_shape", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "0", ",", "depth", "**", "-", "0.5", ")", ")", "*", "(", "depth", "**", "0.5", ")", ")", "# Avoiding gathers so that it works on TPUs", "# adjacency_matrix_one_hot has shape", "# [batch, num_nodes, num_nodes, num_edge_types]", "adjacency_matrix_one_hot", "=", "tf", ".", "one_hot", "(", "adjacency_matrix", ",", "num_edge_types", ")", "att_adj_vectors", "=", "tf", ".", "matmul", "(", "tf", ".", "reshape", "(", "tf", ".", "to_float", "(", "adjacency_matrix_one_hot", ")", ",", "[", "-", "1", ",", "num_edge_types", "]", ")", ",", "adj_vectors", ")", "return", "tf", ".", "reshape", "(", "att_adj_vectors", ",", "[", "adjacency_matrix_shape", "[", "0", "]", ",", "adjacency_matrix_shape", "[", "1", "]", ",", "adjacency_matrix_shape", "[", "2", "]", ",", "depth", "]", ")" ]
Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors
[ "Gets", "edge", "vectors", "for", "the", "edge", "types", "in", "the", "adjacency", "matrix", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L728-L759
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
padding_to_length
def padding_to_length(padding): """Calculate the length of mask based on padding. Args: padding: a Tensor with shape [..., length]. Returns: a Tensor with shape [...]. """ non_padding = 1.0 - padding return tf.to_int32(tf.reduce_sum(non_padding, axis=-1))
python
def padding_to_length(padding): """Calculate the length of mask based on padding. Args: padding: a Tensor with shape [..., length]. Returns: a Tensor with shape [...]. """ non_padding = 1.0 - padding return tf.to_int32(tf.reduce_sum(non_padding, axis=-1))
[ "def", "padding_to_length", "(", "padding", ")", ":", "non_padding", "=", "1.0", "-", "padding", "return", "tf", ".", "to_int32", "(", "tf", ".", "reduce_sum", "(", "non_padding", ",", "axis", "=", "-", "1", ")", ")" ]
Calculate the length of mask based on padding. Args: padding: a Tensor with shape [..., length]. Returns: a Tensor with shape [...].
[ "Calculate", "the", "length", "of", "mask", "based", "on", "padding", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L860-L869
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_local
def attention_bias_local(length, max_backward, max_forward): """Create an bias tensor to be added to attention logits. A position may attend to positions at most max_distance from it, forward and backwards. This does not actually save any computation. Args: length: int max_backward: int, maximum distance backward to attend. Negative values indicate unlimited. max_forward: int, maximum distance forward to attend. Negative values indicate unlimited. Returns: a `Tensor` with shape [1, 1, length, length]. """ band = common_layers.ones_matrix_band_part( length, length, max_backward, max_forward, out_shape=[1, 1, length, length]) return -1e9 * (1.0 - band)
python
def attention_bias_local(length, max_backward, max_forward): """Create an bias tensor to be added to attention logits. A position may attend to positions at most max_distance from it, forward and backwards. This does not actually save any computation. Args: length: int max_backward: int, maximum distance backward to attend. Negative values indicate unlimited. max_forward: int, maximum distance forward to attend. Negative values indicate unlimited. Returns: a `Tensor` with shape [1, 1, length, length]. """ band = common_layers.ones_matrix_band_part( length, length, max_backward, max_forward, out_shape=[1, 1, length, length]) return -1e9 * (1.0 - band)
[ "def", "attention_bias_local", "(", "length", ",", "max_backward", ",", "max_forward", ")", ":", "band", "=", "common_layers", ".", "ones_matrix_band_part", "(", "length", ",", "length", ",", "max_backward", ",", "max_forward", ",", "out_shape", "=", "[", "1", ",", "1", ",", "length", ",", "length", "]", ")", "return", "-", "1e9", "*", "(", "1.0", "-", "band", ")" ]
Create an bias tensor to be added to attention logits. A position may attend to positions at most max_distance from it, forward and backwards. This does not actually save any computation. Args: length: int max_backward: int, maximum distance backward to attend. Negative values indicate unlimited. max_forward: int, maximum distance forward to attend. Negative values indicate unlimited. Returns: a `Tensor` with shape [1, 1, length, length].
[ "Create", "an", "bias", "tensor", "to", "be", "added", "to", "attention", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L873-L897
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_same_segment
def attention_bias_same_segment(query_segment_id, memory_segment_id): """Create an bias tensor to be added to attention logits. Positions with the same segment_ids can see each other. Args: query_segment_id: a float `Tensor` with shape [batch, query_length]. memory_segment_id: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, query_length, memory_length]. """ ret = (tf.to_float( tf.not_equal( tf.expand_dims(query_segment_id, 2), tf.expand_dims(memory_segment_id, 1))) * large_compatible_negative(memory_segment_id.dtype)) return tf.expand_dims(ret, axis=1)
python
def attention_bias_same_segment(query_segment_id, memory_segment_id): """Create an bias tensor to be added to attention logits. Positions with the same segment_ids can see each other. Args: query_segment_id: a float `Tensor` with shape [batch, query_length]. memory_segment_id: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, query_length, memory_length]. """ ret = (tf.to_float( tf.not_equal( tf.expand_dims(query_segment_id, 2), tf.expand_dims(memory_segment_id, 1))) * large_compatible_negative(memory_segment_id.dtype)) return tf.expand_dims(ret, axis=1)
[ "def", "attention_bias_same_segment", "(", "query_segment_id", ",", "memory_segment_id", ")", ":", "ret", "=", "(", "tf", ".", "to_float", "(", "tf", ".", "not_equal", "(", "tf", ".", "expand_dims", "(", "query_segment_id", ",", "2", ")", ",", "tf", ".", "expand_dims", "(", "memory_segment_id", ",", "1", ")", ")", ")", "*", "large_compatible_negative", "(", "memory_segment_id", ".", "dtype", ")", ")", "return", "tf", ".", "expand_dims", "(", "ret", ",", "axis", "=", "1", ")" ]
Create an bias tensor to be added to attention logits. Positions with the same segment_ids can see each other. Args: query_segment_id: a float `Tensor` with shape [batch, query_length]. memory_segment_id: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, query_length, memory_length].
[ "Create", "an", "bias", "tensor", "to", "be", "added", "to", "attention", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L916-L933
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_ignore_padding
def attention_bias_ignore_padding(memory_padding): """Create an bias tensor to be added to attention logits. Args: memory_padding: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, 1, memory_length]. """ ret = memory_padding * large_compatible_negative(memory_padding.dtype) return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)
python
def attention_bias_ignore_padding(memory_padding): """Create an bias tensor to be added to attention logits. Args: memory_padding: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, 1, memory_length]. """ ret = memory_padding * large_compatible_negative(memory_padding.dtype) return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)
[ "def", "attention_bias_ignore_padding", "(", "memory_padding", ")", ":", "ret", "=", "memory_padding", "*", "large_compatible_negative", "(", "memory_padding", ".", "dtype", ")", "return", "tf", ".", "expand_dims", "(", "tf", ".", "expand_dims", "(", "ret", ",", "axis", "=", "1", ")", ",", "axis", "=", "1", ")" ]
Create an bias tensor to be added to attention logits. Args: memory_padding: a float `Tensor` with shape [batch, memory_length]. Returns: a `Tensor` with shape [batch, 1, 1, memory_length].
[ "Create", "an", "bias", "tensor", "to", "be", "added", "to", "attention", "logits", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L937-L947
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_to_padding
def attention_bias_to_padding(attention_bias, cast_fn=tf.to_float): """Inverse of attention_bias_ignore_padding(). Args: attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as returned by attention_bias_ignore_padding(). cast_fn: function used to cast to output type. Returns: a Tensor with shape [batch, memory_length] with 1.0 in padding positions and 0.0 in non-padding positions. Type is determined by cast_fn. """ # `attention_bias` is a large negative number in padding positions and 0.0 # elsewhere. return tf.squeeze(cast_fn(tf.less(attention_bias, -1)), axis=[1, 2])
python
def attention_bias_to_padding(attention_bias, cast_fn=tf.to_float): """Inverse of attention_bias_ignore_padding(). Args: attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as returned by attention_bias_ignore_padding(). cast_fn: function used to cast to output type. Returns: a Tensor with shape [batch, memory_length] with 1.0 in padding positions and 0.0 in non-padding positions. Type is determined by cast_fn. """ # `attention_bias` is a large negative number in padding positions and 0.0 # elsewhere. return tf.squeeze(cast_fn(tf.less(attention_bias, -1)), axis=[1, 2])
[ "def", "attention_bias_to_padding", "(", "attention_bias", ",", "cast_fn", "=", "tf", ".", "to_float", ")", ":", "# `attention_bias` is a large negative number in padding positions and 0.0", "# elsewhere.", "return", "tf", ".", "squeeze", "(", "cast_fn", "(", "tf", ".", "less", "(", "attention_bias", ",", "-", "1", ")", ")", ",", "axis", "=", "[", "1", ",", "2", "]", ")" ]
Inverse of attention_bias_ignore_padding(). Args: attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as returned by attention_bias_ignore_padding(). cast_fn: function used to cast to output type. Returns: a Tensor with shape [batch, memory_length] with 1.0 in padding positions and 0.0 in non-padding positions. Type is determined by cast_fn.
[ "Inverse", "of", "attention_bias_ignore_padding", "()", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L951-L965
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_prepend_inputs_full_attention
def attention_bias_prepend_inputs_full_attention(padding): """Create a bias tensor for prepend_mode="prepend_inputs_full_attention". See prepend_inputs in common_hparams.py. Produces a bias tensor to be used in self-attention. This bias tensor allows for full connectivity in the "inputs" part of the sequence and masked connectivity in the targets part. Args: padding: a float `Tensor` with shape [batch, length] with ones in positions corresponding to padding. In each row, a single padding position separates the input part from the target part. Returns: a `Tensor` with shape [batch, 1, length, length]. """ # Everything past the first padding position is part of the target. # This Tensor has zeros for the source portion and separator, # and ones for the target portion. in_target = tf.cumsum(padding, axis=1, exclusive=True) # The position within the target, or 0 if part of the source. target_pos = tf.cumsum(in_target, axis=1) # A position with a lesser target_pos cannot see a position with greater # target_pos. illegal_connections = tf.greater( tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2)) bias = tf.to_float(illegal_connections) * -1e9 bias = tf.expand_dims(bias, 1) return bias
python
def attention_bias_prepend_inputs_full_attention(padding): """Create a bias tensor for prepend_mode="prepend_inputs_full_attention". See prepend_inputs in common_hparams.py. Produces a bias tensor to be used in self-attention. This bias tensor allows for full connectivity in the "inputs" part of the sequence and masked connectivity in the targets part. Args: padding: a float `Tensor` with shape [batch, length] with ones in positions corresponding to padding. In each row, a single padding position separates the input part from the target part. Returns: a `Tensor` with shape [batch, 1, length, length]. """ # Everything past the first padding position is part of the target. # This Tensor has zeros for the source portion and separator, # and ones for the target portion. in_target = tf.cumsum(padding, axis=1, exclusive=True) # The position within the target, or 0 if part of the source. target_pos = tf.cumsum(in_target, axis=1) # A position with a lesser target_pos cannot see a position with greater # target_pos. illegal_connections = tf.greater( tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2)) bias = tf.to_float(illegal_connections) * -1e9 bias = tf.expand_dims(bias, 1) return bias
[ "def", "attention_bias_prepend_inputs_full_attention", "(", "padding", ")", ":", "# Everything past the first padding position is part of the target.", "# This Tensor has zeros for the source portion and separator,", "# and ones for the target portion.", "in_target", "=", "tf", ".", "cumsum", "(", "padding", ",", "axis", "=", "1", ",", "exclusive", "=", "True", ")", "# The position within the target, or 0 if part of the source.", "target_pos", "=", "tf", ".", "cumsum", "(", "in_target", ",", "axis", "=", "1", ")", "# A position with a lesser target_pos cannot see a position with greater", "# target_pos.", "illegal_connections", "=", "tf", ".", "greater", "(", "tf", ".", "expand_dims", "(", "target_pos", ",", "1", ")", ",", "tf", ".", "expand_dims", "(", "target_pos", ",", "2", ")", ")", "bias", "=", "tf", ".", "to_float", "(", "illegal_connections", ")", "*", "-", "1e9", "bias", "=", "tf", ".", "expand_dims", "(", "bias", ",", "1", ")", "return", "bias" ]
Create a bias tensor for prepend_mode="prepend_inputs_full_attention". See prepend_inputs in common_hparams.py. Produces a bias tensor to be used in self-attention. This bias tensor allows for full connectivity in the "inputs" part of the sequence and masked connectivity in the targets part. Args: padding: a float `Tensor` with shape [batch, length] with ones in positions corresponding to padding. In each row, a single padding position separates the input part from the target part. Returns: a `Tensor` with shape [batch, 1, length, length].
[ "Create", "a", "bias", "tensor", "for", "prepend_mode", "=", "prepend_inputs_full_attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L969-L999
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_proximal
def attention_bias_proximal(length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = tf.to_float(tf.range(length)) diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1) return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0)
python
def attention_bias_proximal(length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = tf.to_float(tf.range(length)) diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1) return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0)
[ "def", "attention_bias_proximal", "(", "length", ")", ":", "r", "=", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "length", ")", ")", "diff", "=", "tf", ".", "expand_dims", "(", "r", ",", "0", ")", "-", "tf", ".", "expand_dims", "(", "r", ",", "1", ")", "return", "tf", ".", "expand_dims", "(", "tf", ".", "expand_dims", "(", "-", "tf", ".", "log1p", "(", "tf", ".", "abs", "(", "diff", ")", ")", ",", "0", ")", ",", "0", ")" ]
Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length]
[ "Bias", "for", "self", "-", "attention", "to", "encourage", "attention", "to", "close", "positions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1003-L1014
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_bias_batch
def attention_bias_batch(batch_coordinates_q, batch_coordinates_k=None, condition_fn=None): """Generate a mask to prevent the batch to attend to each others. Args: batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the coordinates of the batches batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the coordinates of the batches. If None, do self-attention. condition_fn: Callable defining the attention mask. Returns: Float-like Tensor of shape [length_q, length_k] containing either 0 or -infinity (-1e9). """ if batch_coordinates_k is None: batch_coordinates_k = batch_coordinates_q # Convert to float first because of b/25387198. def to_float(bc): bc = tf.squeeze(bc, 1) bc = tf.to_float(bc) return bc # Broadcast to create [length_q, length_k] mask. bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1) bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0) bias_batch = bc_h - bc_v bias_batch = condition_fn(bias_batch) bias_batch *= -1e9 return bias_batch
python
def attention_bias_batch(batch_coordinates_q, batch_coordinates_k=None, condition_fn=None): """Generate a mask to prevent the batch to attend to each others. Args: batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the coordinates of the batches batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the coordinates of the batches. If None, do self-attention. condition_fn: Callable defining the attention mask. Returns: Float-like Tensor of shape [length_q, length_k] containing either 0 or -infinity (-1e9). """ if batch_coordinates_k is None: batch_coordinates_k = batch_coordinates_q # Convert to float first because of b/25387198. def to_float(bc): bc = tf.squeeze(bc, 1) bc = tf.to_float(bc) return bc # Broadcast to create [length_q, length_k] mask. bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1) bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0) bias_batch = bc_h - bc_v bias_batch = condition_fn(bias_batch) bias_batch *= -1e9 return bias_batch
[ "def", "attention_bias_batch", "(", "batch_coordinates_q", ",", "batch_coordinates_k", "=", "None", ",", "condition_fn", "=", "None", ")", ":", "if", "batch_coordinates_k", "is", "None", ":", "batch_coordinates_k", "=", "batch_coordinates_q", "# Convert to float first because of b/25387198.", "def", "to_float", "(", "bc", ")", ":", "bc", "=", "tf", ".", "squeeze", "(", "bc", ",", "1", ")", "bc", "=", "tf", ".", "to_float", "(", "bc", ")", "return", "bc", "# Broadcast to create [length_q, length_k] mask.", "bc_v", "=", "tf", ".", "expand_dims", "(", "to_float", "(", "batch_coordinates_q", ")", ",", "1", ")", "bc_h", "=", "tf", ".", "expand_dims", "(", "to_float", "(", "batch_coordinates_k", ")", ",", "0", ")", "bias_batch", "=", "bc_h", "-", "bc_v", "bias_batch", "=", "condition_fn", "(", "bias_batch", ")", "bias_batch", "*=", "-", "1e9", "return", "bias_batch" ]
Generate a mask to prevent the batch to attend to each others. Args: batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the coordinates of the batches batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the coordinates of the batches. If None, do self-attention. condition_fn: Callable defining the attention mask. Returns: Float-like Tensor of shape [length_q, length_k] containing either 0 or -infinity (-1e9).
[ "Generate", "a", "mask", "to", "prevent", "the", "batch", "to", "attend", "to", "each", "others", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1018-L1049
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
split_last_dimension
def split_last_dimension(x, n): """Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n] """ x_shape = common_layers.shape_list(x) m = x_shape[-1] if isinstance(m, int) and isinstance(n, int): assert m % n == 0 return tf.reshape(x, x_shape[:-1] + [n, m // n])
python
def split_last_dimension(x, n): """Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n] """ x_shape = common_layers.shape_list(x) m = x_shape[-1] if isinstance(m, int) and isinstance(n, int): assert m % n == 0 return tf.reshape(x, x_shape[:-1] + [n, m // n])
[ "def", "split_last_dimension", "(", "x", ",", "n", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "m", "=", "x_shape", "[", "-", "1", "]", "if", "isinstance", "(", "m", ",", "int", ")", "and", "isinstance", "(", "n", ",", "int", ")", ":", "assert", "m", "%", "n", "==", "0", "return", "tf", ".", "reshape", "(", "x", ",", "x_shape", "[", ":", "-", "1", "]", "+", "[", "n", ",", "m", "//", "n", "]", ")" ]
Reshape x so that the last dimension becomes two dimensions. The first of these two dimensions is n. Args: x: a Tensor with shape [..., m] n: an integer. Returns: a Tensor with shape [..., n, m/n]
[ "Reshape", "x", "so", "that", "the", "last", "dimension", "becomes", "two", "dimensions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1069-L1085
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
combine_last_two_dimensions
def combine_last_two_dimensions(x): """Reshape x so that the last two dimension become one. Args: x: a Tensor with shape [..., a, b] Returns: a Tensor with shape [..., ab] """ x_shape = common_layers.shape_list(x) a, b = x_shape[-2:] return tf.reshape(x, x_shape[:-2] + [a * b])
python
def combine_last_two_dimensions(x): """Reshape x so that the last two dimension become one. Args: x: a Tensor with shape [..., a, b] Returns: a Tensor with shape [..., ab] """ x_shape = common_layers.shape_list(x) a, b = x_shape[-2:] return tf.reshape(x, x_shape[:-2] + [a * b])
[ "def", "combine_last_two_dimensions", "(", "x", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "a", ",", "b", "=", "x_shape", "[", "-", "2", ":", "]", "return", "tf", ".", "reshape", "(", "x", ",", "x_shape", "[", ":", "-", "2", "]", "+", "[", "a", "*", "b", "]", ")" ]
Reshape x so that the last two dimension become one. Args: x: a Tensor with shape [..., a, b] Returns: a Tensor with shape [..., ab]
[ "Reshape", "x", "so", "that", "the", "last", "two", "dimension", "become", "one", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1089-L1100
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
combine_first_two_dimensions
def combine_first_two_dimensions(x): """Reshape x so that the first two dimension become one. Args: x: a Tensor with shape [a, b, ...] Returns: a Tensor with shape [ab, ...] """ ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0)) old_shape = x.get_shape().dims a, b = old_shape[:2] new_shape = [a * b if a and b else None] + old_shape[2:] ret.set_shape(new_shape) return ret
python
def combine_first_two_dimensions(x): """Reshape x so that the first two dimension become one. Args: x: a Tensor with shape [a, b, ...] Returns: a Tensor with shape [ab, ...] """ ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0)) old_shape = x.get_shape().dims a, b = old_shape[:2] new_shape = [a * b if a and b else None] + old_shape[2:] ret.set_shape(new_shape) return ret
[ "def", "combine_first_two_dimensions", "(", "x", ")", ":", "ret", "=", "tf", ".", "reshape", "(", "x", ",", "tf", ".", "concat", "(", "[", "[", "-", "1", "]", ",", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", ":", "]", "]", ",", "0", ")", ")", "old_shape", "=", "x", ".", "get_shape", "(", ")", ".", "dims", "a", ",", "b", "=", "old_shape", "[", ":", "2", "]", "new_shape", "=", "[", "a", "*", "b", "if", "a", "and", "b", "else", "None", "]", "+", "old_shape", "[", "2", ":", "]", "ret", ".", "set_shape", "(", "new_shape", ")", "return", "ret" ]
Reshape x so that the first two dimension become one. Args: x: a Tensor with shape [a, b, ...] Returns: a Tensor with shape [ab, ...]
[ "Reshape", "x", "so", "that", "the", "first", "two", "dimension", "become", "one", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1104-L1118
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
attention_image_summary
def attention_image_summary(attn, image_shapes=None): """Compute color image summary. Args: attn: a Tensor with shape [batch, num_heads, query_length, memory_length] image_shapes: optional tuple of integer scalars. If the query positions and memory positions represent the pixels of flattened images, then pass in their dimensions: (query_rows, query_cols, memory_rows, memory_cols). If the query positions and memory positions represent the pixels x channels of flattened images, then pass in their dimensions: (query_rows, query_cols, query_channels, memory_rows, memory_cols, memory_channels). """ attn = tf.cast(attn, tf.float32) num_heads = common_layers.shape_list(attn)[1] # [batch, query_length, memory_length, num_heads] image = tf.transpose(attn, [0, 2, 3, 1]) image = tf.pow(image, 0.2) # for high-dynamic-range # Each head will correspond to one of RGB. # pad the heads to be a multiple of 3 image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]]) image = split_last_dimension(image, 3) image = tf.reduce_max(image, 4) if image_shapes is not None: if len(image_shapes) == 4: q_rows, q_cols, m_rows, m_cols = list(image_shapes) image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3]) image = tf.transpose(image, [0, 1, 3, 2, 4, 5]) image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3]) else: assert len(image_shapes) == 6 q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list( image_shapes) image = tf.reshape( image, [-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3]) image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7]) image = tf.reshape( image, [-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3]) tf.summary.image("attention", image, max_outputs=1)
python
def attention_image_summary(attn, image_shapes=None): """Compute color image summary. Args: attn: a Tensor with shape [batch, num_heads, query_length, memory_length] image_shapes: optional tuple of integer scalars. If the query positions and memory positions represent the pixels of flattened images, then pass in their dimensions: (query_rows, query_cols, memory_rows, memory_cols). If the query positions and memory positions represent the pixels x channels of flattened images, then pass in their dimensions: (query_rows, query_cols, query_channels, memory_rows, memory_cols, memory_channels). """ attn = tf.cast(attn, tf.float32) num_heads = common_layers.shape_list(attn)[1] # [batch, query_length, memory_length, num_heads] image = tf.transpose(attn, [0, 2, 3, 1]) image = tf.pow(image, 0.2) # for high-dynamic-range # Each head will correspond to one of RGB. # pad the heads to be a multiple of 3 image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]]) image = split_last_dimension(image, 3) image = tf.reduce_max(image, 4) if image_shapes is not None: if len(image_shapes) == 4: q_rows, q_cols, m_rows, m_cols = list(image_shapes) image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3]) image = tf.transpose(image, [0, 1, 3, 2, 4, 5]) image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3]) else: assert len(image_shapes) == 6 q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list( image_shapes) image = tf.reshape( image, [-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3]) image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7]) image = tf.reshape( image, [-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3]) tf.summary.image("attention", image, max_outputs=1)
[ "def", "attention_image_summary", "(", "attn", ",", "image_shapes", "=", "None", ")", ":", "attn", "=", "tf", ".", "cast", "(", "attn", ",", "tf", ".", "float32", ")", "num_heads", "=", "common_layers", ".", "shape_list", "(", "attn", ")", "[", "1", "]", "# [batch, query_length, memory_length, num_heads]", "image", "=", "tf", ".", "transpose", "(", "attn", ",", "[", "0", ",", "2", ",", "3", ",", "1", "]", ")", "image", "=", "tf", ".", "pow", "(", "image", ",", "0.2", ")", "# for high-dynamic-range", "# Each head will correspond to one of RGB.", "# pad the heads to be a multiple of 3", "image", "=", "tf", ".", "pad", "(", "image", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "tf", ".", "mod", "(", "-", "num_heads", ",", "3", ")", "]", "]", ")", "image", "=", "split_last_dimension", "(", "image", ",", "3", ")", "image", "=", "tf", ".", "reduce_max", "(", "image", ",", "4", ")", "if", "image_shapes", "is", "not", "None", ":", "if", "len", "(", "image_shapes", ")", "==", "4", ":", "q_rows", ",", "q_cols", ",", "m_rows", ",", "m_cols", "=", "list", "(", "image_shapes", ")", "image", "=", "tf", ".", "reshape", "(", "image", ",", "[", "-", "1", ",", "q_rows", ",", "q_cols", ",", "m_rows", ",", "m_cols", ",", "3", "]", ")", "image", "=", "tf", ".", "transpose", "(", "image", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", ",", "5", "]", ")", "image", "=", "tf", ".", "reshape", "(", "image", ",", "[", "-", "1", ",", "q_rows", "*", "m_rows", ",", "q_cols", "*", "m_cols", ",", "3", "]", ")", "else", ":", "assert", "len", "(", "image_shapes", ")", "==", "6", "q_rows", ",", "q_cols", ",", "q_channnels", ",", "m_rows", ",", "m_cols", ",", "m_channels", "=", "list", "(", "image_shapes", ")", "image", "=", "tf", ".", "reshape", "(", "image", ",", "[", "-", "1", ",", "q_rows", ",", "q_cols", ",", "q_channnels", ",", "m_rows", ",", "m_cols", ",", "m_channels", ",", "3", "]", ")", "image", "=", "tf", ".", "transpose", "(", "image", ",", "[", "0", ",", "1", ",", "4", ",", "3", ",", "2", ",", "5", ",", "6", ",", "7", "]", ")", "image", "=", "tf", ".", "reshape", "(", "image", ",", "[", "-", "1", ",", "q_rows", "*", "m_rows", "*", "q_channnels", ",", "q_cols", "*", "m_cols", "*", "m_channels", ",", "3", "]", ")", "tf", ".", "summary", ".", "image", "(", "\"attention\"", ",", "image", ",", "max_outputs", "=", "1", ")" ]
Compute color image summary. Args: attn: a Tensor with shape [batch, num_heads, query_length, memory_length] image_shapes: optional tuple of integer scalars. If the query positions and memory positions represent the pixels of flattened images, then pass in their dimensions: (query_rows, query_cols, memory_rows, memory_cols). If the query positions and memory positions represent the pixels x channels of flattened images, then pass in their dimensions: (query_rows, query_cols, query_channels, memory_rows, memory_cols, memory_channels).
[ "Compute", "color", "image", "summary", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1176-L1217
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
grouped_attention_multihead
def grouped_attention_multihead(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, num_groups, memory_target_density=2.0, multiplicative_overhead=1.25, additive_overhead=8.0, mask_right=False, make_image_summary=True, name=None): """Multi-head dot-product attention with sparsity. For each attention head, the queries are partitioned into groups. For each group, only a subset of the key-value pairs are considered. The choices of groups are selected based on trained predictors of the total attention given the group inclusion. memory_target_density indicates the average how many groups in which a key-value pair should participate. We use auxiliary losses to ensure that each group contains roughly the same number of queries and the same number of key-value pairs. If for a given sequence, the actual number of queries/pairs sent to an expert exceeds this target by a factor of more than multiplicative_overhead, then the last ones are dropped. We use this drop-last policy to avoid bleeding information backwards, which is necessary when using this function with autoregressive prediction. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth num_groups: an integer memory_target_density: a floating point scalar multiplicative_overhead: a floating point scalar additive_overhead: a floating point scalar mask_right: a boolean make_image_summary: a boolean name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ batch = common_layers.shape_list(query_antecedent)[0] length_q = common_layers.shape_list(query_antecedent)[1] length_kv = common_layers.shape_list(memory_antecedent)[1] if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) depth_qk = total_key_depth // num_heads if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) depth_v = total_value_depth // num_heads with tf.variable_scope( name, default_name="multihead_attention_sparse", values=[query_antecedent, memory_antecedent]): q = common_layers.dense( query_antecedent, total_key_depth, use_bias=False, name="q_transform") kv = common_layers.dense( memory_antecedent, total_key_depth + total_value_depth, use_bias=False, name="kv_transform") q = split_heads(q, num_heads) kv = split_heads(kv, num_heads) # Make predictions about q_total and m_total. # These are used to determine group inclusion. # We will train these by auxiliary losses. We use stop_gradient here # to keep these losses from back-propagating to the rest of the model. # We add biases that help balance the usage of the experts. q_pred = common_layers.dense( tf.stop_gradient(query_antecedent), num_heads * num_groups, use_bias=False, name="q_pred") q_pred = split_heads(q_pred, num_heads) q_bias = tf.get_variable("q_bias", [1, num_heads, 1, num_groups]) q_pred_biased = q_pred + q_bias m_pred = common_layers.dense( tf.stop_gradient(memory_antecedent), num_heads * num_groups, use_bias=False, name="m_pred") m_pred = split_heads(m_pred, num_heads) m_bias = tf.get_variable("m_bias", [1, num_heads, 1, num_groups]) m_pred_biased = m_pred + m_bias q *= depth_qk**-0.5 # q, kv, q_pred, m_pred are all [batch, heads, length_[q/m], ?] # now reshape them all to [batch * heads, length, ?] q = combine_first_two_dimensions(q) kv = combine_first_two_dimensions(kv) q_pred = combine_first_two_dimensions(q_pred) m_pred = combine_first_two_dimensions(m_pred) q_pred_biased = combine_first_two_dimensions(q_pred_biased) m_pred_biased = combine_first_two_dimensions(m_pred_biased) q_group = tf.argmax(q_pred_biased, axis=2) q_requests = tf.one_hot(q_group, num_groups, axis=-1) m_requests = tf.to_float(tf.greater(m_pred_biased, 0.0)) # include first memory position in all groups, to avoid division by zero. m_requests = tf.maximum( m_requests, tf.reshape(tf.one_hot([0], length_kv), [1, length_kv, 1])) q_group_size = tf.reduce_sum(q_requests, 1) m_group_size = tf.reduce_sum(m_requests, 1) q_group_target_size = tf.to_float(length_q) / tf.to_float(num_groups) m_group_target_size = ( tf.to_float(length_kv) * memory_target_density / tf.to_float(num_groups)) capacity_q = tf.minimum( length_q, tf.to_int32(q_group_target_size * multiplicative_overhead + additive_overhead)) capacity_m = tf.minimum( length_kv, tf.to_int32(m_group_target_size * multiplicative_overhead + additive_overhead)) q_dispatcher = expert_utils.TruncatingDispatcher(q_requests, capacity_q) m_dispatcher = expert_utils.TruncatingDispatcher(m_requests, capacity_m) q_gates = q_dispatcher.gates() m_gates = m_dispatcher.gates() dispatched_q = q_dispatcher.dispatch(q) dispatched_kv = m_dispatcher.dispatch(kv) # dispatched_q: [batch * num_heads, num_groups, capacity_q, depth_qk] # dispatched_kv: # [batch * num_heads, num_groups, capacity_m, depth_qk + depth_v] k, v = tf.split(dispatched_kv, [depth_qk, depth_v], axis=3) logits = tf.matmul(dispatched_q, k, transpose_b=True) bias = tf.expand_dims((m_dispatcher.nonpadding() - 1.0) * 1e9, 2) if mask_right: q_coordinate = tf.to_float( tf.expand_dims(q_dispatcher.length_coordinate(), 3)) m_coordinate = tf.to_float( tf.expand_dims(m_dispatcher.length_coordinate(), 2)) bias += tf.to_float(tf.greater(m_coordinate, q_coordinate)) * -1e9 logits += bias log_weights = tf.nn.log_softmax(logits) weights = tf.exp(log_weights) # For each query, this is the log of the sum of the unnormalized weights. q_total = tf.stop_gradient(logits[:, :, :, :1] - log_weights[:, :, :, :1]) # For each key, this is the sum of the normalized weights. m_total = tf.expand_dims( tf.reduce_sum(tf.stop_gradient(weights), axis=2), -1) o = tf.matmul(weights, v) o = q_dispatcher.combine(o) o = tf.reshape(o, [batch, num_heads, length_q, depth_v]) o = combine_heads(o) o = common_layers.dense( o, output_depth, use_bias=False, name="output_transform") m_total = m_dispatcher.combine(m_total) q_total = q_dispatcher.combine(q_total) q_total = tf.squeeze(q_total, -1) m_total = tf.squeeze(m_total, -1) # Compute summed m predictions for all groups m_pred_used = tf.reduce_sum(tf.exp(m_pred) * m_dispatcher.gates(), axis=2) q_pred_used = tf.reduce_sum(q_pred * q_dispatcher.gates(), axis=2) epsilon = 1e-3 m_pred_used = tf.log(m_pred_used + epsilon) m_total = tf.log(m_total + epsilon) m_loss = tf.nn.l2_loss(m_total - m_pred_used) q_loss = tf.nn.l2_loss( (q_total - q_pred_used) * tf.reduce_sum(q_gates, axis=2)) q_loss /= tf.to_float(batch * length_q) m_loss /= tf.to_float(batch * length_kv) # We would like the query groups to be equal sized. The group # size is discrete, so we need some trick here. We add a loss # proportional to the product of the group size and the # predictions for that group. This encourages the predictions to # decrease for groups that are too big. q_group_deviation = (q_group_size / q_group_target_size) - 1.0 q_balance_loss = tf.reduce_sum( tf.reduce_mean(q_pred_biased, axis=1) * q_group_deviation) / tf.to_float(batch) m_group_deviation = (m_group_size / m_group_target_size) - 1.0 m_balance_loss = tf.reduce_sum( tf.reduce_mean(m_pred_biased, axis=1) * m_group_deviation) / tf.to_float(batch) # The losses in this function only propagate back to variables # defined in this function, and the losses outside of this # function only propagate back to variables outside of this # function. Assuming some kind of adaptive learning algorithm, # it should not matter how much we scale the losses in this function. # Still we scale them down a lot so that they should not show up # much in the overall loss for the model. extra_loss_multiplier = 1e-3 extra_loss = q_loss + m_loss + q_balance_loss + m_balance_loss extra_loss *= extra_loss_multiplier # Show a bunch of summaries. if common_layers.should_generate_summaries() and make_image_summary: tf.summary.histogram("q_group_size", q_group_size) tf.summary.histogram("m_group_size", m_group_size) tf.summary.scalar("q_loss", q_loss) tf.summary.scalar("m_loss", m_loss) tf.summary.scalar("q_balance_loss", q_balance_loss) tf.summary.scalar("m_balance_loss", m_balance_loss) tf.summary.histogram("m_pred_used", m_pred_used) tf.summary.histogram("m_total", m_total) tf.summary.histogram("q_pred_used", q_pred_used) tf.summary.histogram("q_total", q_total) if make_image_summary: # image summaries are expensive. # So we restrict them to head_num<4, query_position<512, batch_index=0. trunc_heads = min(4, num_heads) trunc_length_q = tf.minimum(length_q, 512) # We recompute the attention for the first example, in an inefficient # way - masking. This lets us show pretty pictures. # [trunc_heads, length_q, group] q_gates_trunc = q_gates[:trunc_heads, :trunc_length_q, :] # [trunc_heads, length_kv, group] m_gates_trunc = m_gates[:trunc_heads, :, :] grouping_mask = tf.matmul( q_gates_trunc, m_gates_trunc, transpose_b=True) q_trunc = q[:trunc_heads, :trunc_length_q, :] k_trunc = kv[:trunc_heads, :, :depth_qk] logits_trunc = tf.matmul(q_trunc, k_trunc, transpose_b=True) if mask_right: band = common_layers.ones_matrix_band_part(trunc_length_q, length_kv, -1, 0) trunc_bias = tf.expand_dims((1.0 - band) * -1e9, 0) logits_trunc += trunc_bias att_trunc = tf.nn.softmax(logits_trunc) mask_coverage = tf.reduce_sum(grouping_mask * att_trunc) / ( tf.to_float(trunc_length_q) * trunc_heads) tf.summary.scalar("coverage", mask_coverage) att_trunc_hdr = tf.pow(att_trunc, 0.2) # for high-dynamic-range mask_channel = grouping_mask * tf.maximum(att_trunc_hdr, 0.3) image = tf.stack([att_trunc_hdr, mask_channel, mask_channel], axis=3) tf.summary.image("att", image, max_outputs=trunc_heads) # show one group for each head. att_per_group = tf.expand_dims(weights[:trunc_heads, 0, :, :], -1) tf.summary.image( "att_per_group_%d", tf.pow(att_per_group, 0.2), max_outputs=trunc_heads) return o, extra_loss
python
def grouped_attention_multihead(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, num_groups, memory_target_density=2.0, multiplicative_overhead=1.25, additive_overhead=8.0, mask_right=False, make_image_summary=True, name=None): """Multi-head dot-product attention with sparsity. For each attention head, the queries are partitioned into groups. For each group, only a subset of the key-value pairs are considered. The choices of groups are selected based on trained predictors of the total attention given the group inclusion. memory_target_density indicates the average how many groups in which a key-value pair should participate. We use auxiliary losses to ensure that each group contains roughly the same number of queries and the same number of key-value pairs. If for a given sequence, the actual number of queries/pairs sent to an expert exceeds this target by a factor of more than multiplicative_overhead, then the last ones are dropped. We use this drop-last policy to avoid bleeding information backwards, which is necessary when using this function with autoregressive prediction. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth num_groups: an integer memory_target_density: a floating point scalar multiplicative_overhead: a floating point scalar additive_overhead: a floating point scalar mask_right: a boolean make_image_summary: a boolean name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ batch = common_layers.shape_list(query_antecedent)[0] length_q = common_layers.shape_list(query_antecedent)[1] length_kv = common_layers.shape_list(memory_antecedent)[1] if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) depth_qk = total_key_depth // num_heads if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) depth_v = total_value_depth // num_heads with tf.variable_scope( name, default_name="multihead_attention_sparse", values=[query_antecedent, memory_antecedent]): q = common_layers.dense( query_antecedent, total_key_depth, use_bias=False, name="q_transform") kv = common_layers.dense( memory_antecedent, total_key_depth + total_value_depth, use_bias=False, name="kv_transform") q = split_heads(q, num_heads) kv = split_heads(kv, num_heads) # Make predictions about q_total and m_total. # These are used to determine group inclusion. # We will train these by auxiliary losses. We use stop_gradient here # to keep these losses from back-propagating to the rest of the model. # We add biases that help balance the usage of the experts. q_pred = common_layers.dense( tf.stop_gradient(query_antecedent), num_heads * num_groups, use_bias=False, name="q_pred") q_pred = split_heads(q_pred, num_heads) q_bias = tf.get_variable("q_bias", [1, num_heads, 1, num_groups]) q_pred_biased = q_pred + q_bias m_pred = common_layers.dense( tf.stop_gradient(memory_antecedent), num_heads * num_groups, use_bias=False, name="m_pred") m_pred = split_heads(m_pred, num_heads) m_bias = tf.get_variable("m_bias", [1, num_heads, 1, num_groups]) m_pred_biased = m_pred + m_bias q *= depth_qk**-0.5 # q, kv, q_pred, m_pred are all [batch, heads, length_[q/m], ?] # now reshape them all to [batch * heads, length, ?] q = combine_first_two_dimensions(q) kv = combine_first_two_dimensions(kv) q_pred = combine_first_two_dimensions(q_pred) m_pred = combine_first_two_dimensions(m_pred) q_pred_biased = combine_first_two_dimensions(q_pred_biased) m_pred_biased = combine_first_two_dimensions(m_pred_biased) q_group = tf.argmax(q_pred_biased, axis=2) q_requests = tf.one_hot(q_group, num_groups, axis=-1) m_requests = tf.to_float(tf.greater(m_pred_biased, 0.0)) # include first memory position in all groups, to avoid division by zero. m_requests = tf.maximum( m_requests, tf.reshape(tf.one_hot([0], length_kv), [1, length_kv, 1])) q_group_size = tf.reduce_sum(q_requests, 1) m_group_size = tf.reduce_sum(m_requests, 1) q_group_target_size = tf.to_float(length_q) / tf.to_float(num_groups) m_group_target_size = ( tf.to_float(length_kv) * memory_target_density / tf.to_float(num_groups)) capacity_q = tf.minimum( length_q, tf.to_int32(q_group_target_size * multiplicative_overhead + additive_overhead)) capacity_m = tf.minimum( length_kv, tf.to_int32(m_group_target_size * multiplicative_overhead + additive_overhead)) q_dispatcher = expert_utils.TruncatingDispatcher(q_requests, capacity_q) m_dispatcher = expert_utils.TruncatingDispatcher(m_requests, capacity_m) q_gates = q_dispatcher.gates() m_gates = m_dispatcher.gates() dispatched_q = q_dispatcher.dispatch(q) dispatched_kv = m_dispatcher.dispatch(kv) # dispatched_q: [batch * num_heads, num_groups, capacity_q, depth_qk] # dispatched_kv: # [batch * num_heads, num_groups, capacity_m, depth_qk + depth_v] k, v = tf.split(dispatched_kv, [depth_qk, depth_v], axis=3) logits = tf.matmul(dispatched_q, k, transpose_b=True) bias = tf.expand_dims((m_dispatcher.nonpadding() - 1.0) * 1e9, 2) if mask_right: q_coordinate = tf.to_float( tf.expand_dims(q_dispatcher.length_coordinate(), 3)) m_coordinate = tf.to_float( tf.expand_dims(m_dispatcher.length_coordinate(), 2)) bias += tf.to_float(tf.greater(m_coordinate, q_coordinate)) * -1e9 logits += bias log_weights = tf.nn.log_softmax(logits) weights = tf.exp(log_weights) # For each query, this is the log of the sum of the unnormalized weights. q_total = tf.stop_gradient(logits[:, :, :, :1] - log_weights[:, :, :, :1]) # For each key, this is the sum of the normalized weights. m_total = tf.expand_dims( tf.reduce_sum(tf.stop_gradient(weights), axis=2), -1) o = tf.matmul(weights, v) o = q_dispatcher.combine(o) o = tf.reshape(o, [batch, num_heads, length_q, depth_v]) o = combine_heads(o) o = common_layers.dense( o, output_depth, use_bias=False, name="output_transform") m_total = m_dispatcher.combine(m_total) q_total = q_dispatcher.combine(q_total) q_total = tf.squeeze(q_total, -1) m_total = tf.squeeze(m_total, -1) # Compute summed m predictions for all groups m_pred_used = tf.reduce_sum(tf.exp(m_pred) * m_dispatcher.gates(), axis=2) q_pred_used = tf.reduce_sum(q_pred * q_dispatcher.gates(), axis=2) epsilon = 1e-3 m_pred_used = tf.log(m_pred_used + epsilon) m_total = tf.log(m_total + epsilon) m_loss = tf.nn.l2_loss(m_total - m_pred_used) q_loss = tf.nn.l2_loss( (q_total - q_pred_used) * tf.reduce_sum(q_gates, axis=2)) q_loss /= tf.to_float(batch * length_q) m_loss /= tf.to_float(batch * length_kv) # We would like the query groups to be equal sized. The group # size is discrete, so we need some trick here. We add a loss # proportional to the product of the group size and the # predictions for that group. This encourages the predictions to # decrease for groups that are too big. q_group_deviation = (q_group_size / q_group_target_size) - 1.0 q_balance_loss = tf.reduce_sum( tf.reduce_mean(q_pred_biased, axis=1) * q_group_deviation) / tf.to_float(batch) m_group_deviation = (m_group_size / m_group_target_size) - 1.0 m_balance_loss = tf.reduce_sum( tf.reduce_mean(m_pred_biased, axis=1) * m_group_deviation) / tf.to_float(batch) # The losses in this function only propagate back to variables # defined in this function, and the losses outside of this # function only propagate back to variables outside of this # function. Assuming some kind of adaptive learning algorithm, # it should not matter how much we scale the losses in this function. # Still we scale them down a lot so that they should not show up # much in the overall loss for the model. extra_loss_multiplier = 1e-3 extra_loss = q_loss + m_loss + q_balance_loss + m_balance_loss extra_loss *= extra_loss_multiplier # Show a bunch of summaries. if common_layers.should_generate_summaries() and make_image_summary: tf.summary.histogram("q_group_size", q_group_size) tf.summary.histogram("m_group_size", m_group_size) tf.summary.scalar("q_loss", q_loss) tf.summary.scalar("m_loss", m_loss) tf.summary.scalar("q_balance_loss", q_balance_loss) tf.summary.scalar("m_balance_loss", m_balance_loss) tf.summary.histogram("m_pred_used", m_pred_used) tf.summary.histogram("m_total", m_total) tf.summary.histogram("q_pred_used", q_pred_used) tf.summary.histogram("q_total", q_total) if make_image_summary: # image summaries are expensive. # So we restrict them to head_num<4, query_position<512, batch_index=0. trunc_heads = min(4, num_heads) trunc_length_q = tf.minimum(length_q, 512) # We recompute the attention for the first example, in an inefficient # way - masking. This lets us show pretty pictures. # [trunc_heads, length_q, group] q_gates_trunc = q_gates[:trunc_heads, :trunc_length_q, :] # [trunc_heads, length_kv, group] m_gates_trunc = m_gates[:trunc_heads, :, :] grouping_mask = tf.matmul( q_gates_trunc, m_gates_trunc, transpose_b=True) q_trunc = q[:trunc_heads, :trunc_length_q, :] k_trunc = kv[:trunc_heads, :, :depth_qk] logits_trunc = tf.matmul(q_trunc, k_trunc, transpose_b=True) if mask_right: band = common_layers.ones_matrix_band_part(trunc_length_q, length_kv, -1, 0) trunc_bias = tf.expand_dims((1.0 - band) * -1e9, 0) logits_trunc += trunc_bias att_trunc = tf.nn.softmax(logits_trunc) mask_coverage = tf.reduce_sum(grouping_mask * att_trunc) / ( tf.to_float(trunc_length_q) * trunc_heads) tf.summary.scalar("coverage", mask_coverage) att_trunc_hdr = tf.pow(att_trunc, 0.2) # for high-dynamic-range mask_channel = grouping_mask * tf.maximum(att_trunc_hdr, 0.3) image = tf.stack([att_trunc_hdr, mask_channel, mask_channel], axis=3) tf.summary.image("att", image, max_outputs=trunc_heads) # show one group for each head. att_per_group = tf.expand_dims(weights[:trunc_heads, 0, :, :], -1) tf.summary.image( "att_per_group_%d", tf.pow(att_per_group, 0.2), max_outputs=trunc_heads) return o, extra_loss
[ "def", "grouped_attention_multihead", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ",", "output_depth", ",", "num_heads", ",", "num_groups", ",", "memory_target_density", "=", "2.0", ",", "multiplicative_overhead", "=", "1.25", ",", "additive_overhead", "=", "8.0", ",", "mask_right", "=", "False", ",", "make_image_summary", "=", "True", ",", "name", "=", "None", ")", ":", "batch", "=", "common_layers", ".", "shape_list", "(", "query_antecedent", ")", "[", "0", "]", "length_q", "=", "common_layers", ".", "shape_list", "(", "query_antecedent", ")", "[", "1", "]", "length_kv", "=", "common_layers", ".", "shape_list", "(", "memory_antecedent", ")", "[", "1", "]", "if", "total_key_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Key depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_key_depth", ",", "num_heads", ")", ")", "depth_qk", "=", "total_key_depth", "//", "num_heads", "if", "total_value_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Value depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_value_depth", ",", "num_heads", ")", ")", "depth_v", "=", "total_value_depth", "//", "num_heads", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"multihead_attention_sparse\"", ",", "values", "=", "[", "query_antecedent", ",", "memory_antecedent", "]", ")", ":", "q", "=", "common_layers", ".", "dense", "(", "query_antecedent", ",", "total_key_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"q_transform\"", ")", "kv", "=", "common_layers", ".", "dense", "(", "memory_antecedent", ",", "total_key_depth", "+", "total_value_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"kv_transform\"", ")", "q", "=", "split_heads", "(", "q", ",", "num_heads", ")", "kv", "=", "split_heads", "(", "kv", ",", "num_heads", ")", "# Make predictions about q_total and m_total.", "# These are used to determine group inclusion.", "# We will train these by auxiliary losses. We use stop_gradient here", "# to keep these losses from back-propagating to the rest of the model.", "# We add biases that help balance the usage of the experts.", "q_pred", "=", "common_layers", ".", "dense", "(", "tf", ".", "stop_gradient", "(", "query_antecedent", ")", ",", "num_heads", "*", "num_groups", ",", "use_bias", "=", "False", ",", "name", "=", "\"q_pred\"", ")", "q_pred", "=", "split_heads", "(", "q_pred", ",", "num_heads", ")", "q_bias", "=", "tf", ".", "get_variable", "(", "\"q_bias\"", ",", "[", "1", ",", "num_heads", ",", "1", ",", "num_groups", "]", ")", "q_pred_biased", "=", "q_pred", "+", "q_bias", "m_pred", "=", "common_layers", ".", "dense", "(", "tf", ".", "stop_gradient", "(", "memory_antecedent", ")", ",", "num_heads", "*", "num_groups", ",", "use_bias", "=", "False", ",", "name", "=", "\"m_pred\"", ")", "m_pred", "=", "split_heads", "(", "m_pred", ",", "num_heads", ")", "m_bias", "=", "tf", ".", "get_variable", "(", "\"m_bias\"", ",", "[", "1", ",", "num_heads", ",", "1", ",", "num_groups", "]", ")", "m_pred_biased", "=", "m_pred", "+", "m_bias", "q", "*=", "depth_qk", "**", "-", "0.5", "# q, kv, q_pred, m_pred are all [batch, heads, length_[q/m], ?]", "# now reshape them all to [batch * heads, length, ?]", "q", "=", "combine_first_two_dimensions", "(", "q", ")", "kv", "=", "combine_first_two_dimensions", "(", "kv", ")", "q_pred", "=", "combine_first_two_dimensions", "(", "q_pred", ")", "m_pred", "=", "combine_first_two_dimensions", "(", "m_pred", ")", "q_pred_biased", "=", "combine_first_two_dimensions", "(", "q_pred_biased", ")", "m_pred_biased", "=", "combine_first_two_dimensions", "(", "m_pred_biased", ")", "q_group", "=", "tf", ".", "argmax", "(", "q_pred_biased", ",", "axis", "=", "2", ")", "q_requests", "=", "tf", ".", "one_hot", "(", "q_group", ",", "num_groups", ",", "axis", "=", "-", "1", ")", "m_requests", "=", "tf", ".", "to_float", "(", "tf", ".", "greater", "(", "m_pred_biased", ",", "0.0", ")", ")", "# include first memory position in all groups, to avoid division by zero.", "m_requests", "=", "tf", ".", "maximum", "(", "m_requests", ",", "tf", ".", "reshape", "(", "tf", ".", "one_hot", "(", "[", "0", "]", ",", "length_kv", ")", ",", "[", "1", ",", "length_kv", ",", "1", "]", ")", ")", "q_group_size", "=", "tf", ".", "reduce_sum", "(", "q_requests", ",", "1", ")", "m_group_size", "=", "tf", ".", "reduce_sum", "(", "m_requests", ",", "1", ")", "q_group_target_size", "=", "tf", ".", "to_float", "(", "length_q", ")", "/", "tf", ".", "to_float", "(", "num_groups", ")", "m_group_target_size", "=", "(", "tf", ".", "to_float", "(", "length_kv", ")", "*", "memory_target_density", "/", "tf", ".", "to_float", "(", "num_groups", ")", ")", "capacity_q", "=", "tf", ".", "minimum", "(", "length_q", ",", "tf", ".", "to_int32", "(", "q_group_target_size", "*", "multiplicative_overhead", "+", "additive_overhead", ")", ")", "capacity_m", "=", "tf", ".", "minimum", "(", "length_kv", ",", "tf", ".", "to_int32", "(", "m_group_target_size", "*", "multiplicative_overhead", "+", "additive_overhead", ")", ")", "q_dispatcher", "=", "expert_utils", ".", "TruncatingDispatcher", "(", "q_requests", ",", "capacity_q", ")", "m_dispatcher", "=", "expert_utils", ".", "TruncatingDispatcher", "(", "m_requests", ",", "capacity_m", ")", "q_gates", "=", "q_dispatcher", ".", "gates", "(", ")", "m_gates", "=", "m_dispatcher", ".", "gates", "(", ")", "dispatched_q", "=", "q_dispatcher", ".", "dispatch", "(", "q", ")", "dispatched_kv", "=", "m_dispatcher", ".", "dispatch", "(", "kv", ")", "# dispatched_q: [batch * num_heads, num_groups, capacity_q, depth_qk]", "# dispatched_kv:", "# [batch * num_heads, num_groups, capacity_m, depth_qk + depth_v]", "k", ",", "v", "=", "tf", ".", "split", "(", "dispatched_kv", ",", "[", "depth_qk", ",", "depth_v", "]", ",", "axis", "=", "3", ")", "logits", "=", "tf", ".", "matmul", "(", "dispatched_q", ",", "k", ",", "transpose_b", "=", "True", ")", "bias", "=", "tf", ".", "expand_dims", "(", "(", "m_dispatcher", ".", "nonpadding", "(", ")", "-", "1.0", ")", "*", "1e9", ",", "2", ")", "if", "mask_right", ":", "q_coordinate", "=", "tf", ".", "to_float", "(", "tf", ".", "expand_dims", "(", "q_dispatcher", ".", "length_coordinate", "(", ")", ",", "3", ")", ")", "m_coordinate", "=", "tf", ".", "to_float", "(", "tf", ".", "expand_dims", "(", "m_dispatcher", ".", "length_coordinate", "(", ")", ",", "2", ")", ")", "bias", "+=", "tf", ".", "to_float", "(", "tf", ".", "greater", "(", "m_coordinate", ",", "q_coordinate", ")", ")", "*", "-", "1e9", "logits", "+=", "bias", "log_weights", "=", "tf", ".", "nn", ".", "log_softmax", "(", "logits", ")", "weights", "=", "tf", ".", "exp", "(", "log_weights", ")", "# For each query, this is the log of the sum of the unnormalized weights.", "q_total", "=", "tf", ".", "stop_gradient", "(", "logits", "[", ":", ",", ":", ",", ":", ",", ":", "1", "]", "-", "log_weights", "[", ":", ",", ":", ",", ":", ",", ":", "1", "]", ")", "# For each key, this is the sum of the normalized weights.", "m_total", "=", "tf", ".", "expand_dims", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "stop_gradient", "(", "weights", ")", ",", "axis", "=", "2", ")", ",", "-", "1", ")", "o", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "o", "=", "q_dispatcher", ".", "combine", "(", "o", ")", "o", "=", "tf", ".", "reshape", "(", "o", ",", "[", "batch", ",", "num_heads", ",", "length_q", ",", "depth_v", "]", ")", "o", "=", "combine_heads", "(", "o", ")", "o", "=", "common_layers", ".", "dense", "(", "o", ",", "output_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"output_transform\"", ")", "m_total", "=", "m_dispatcher", ".", "combine", "(", "m_total", ")", "q_total", "=", "q_dispatcher", ".", "combine", "(", "q_total", ")", "q_total", "=", "tf", ".", "squeeze", "(", "q_total", ",", "-", "1", ")", "m_total", "=", "tf", ".", "squeeze", "(", "m_total", ",", "-", "1", ")", "# Compute summed m predictions for all groups", "m_pred_used", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "exp", "(", "m_pred", ")", "*", "m_dispatcher", ".", "gates", "(", ")", ",", "axis", "=", "2", ")", "q_pred_used", "=", "tf", ".", "reduce_sum", "(", "q_pred", "*", "q_dispatcher", ".", "gates", "(", ")", ",", "axis", "=", "2", ")", "epsilon", "=", "1e-3", "m_pred_used", "=", "tf", ".", "log", "(", "m_pred_used", "+", "epsilon", ")", "m_total", "=", "tf", ".", "log", "(", "m_total", "+", "epsilon", ")", "m_loss", "=", "tf", ".", "nn", ".", "l2_loss", "(", "m_total", "-", "m_pred_used", ")", "q_loss", "=", "tf", ".", "nn", ".", "l2_loss", "(", "(", "q_total", "-", "q_pred_used", ")", "*", "tf", ".", "reduce_sum", "(", "q_gates", ",", "axis", "=", "2", ")", ")", "q_loss", "/=", "tf", ".", "to_float", "(", "batch", "*", "length_q", ")", "m_loss", "/=", "tf", ".", "to_float", "(", "batch", "*", "length_kv", ")", "# We would like the query groups to be equal sized. The group", "# size is discrete, so we need some trick here. We add a loss", "# proportional to the product of the group size and the", "# predictions for that group. This encourages the predictions to", "# decrease for groups that are too big.", "q_group_deviation", "=", "(", "q_group_size", "/", "q_group_target_size", ")", "-", "1.0", "q_balance_loss", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "reduce_mean", "(", "q_pred_biased", ",", "axis", "=", "1", ")", "*", "q_group_deviation", ")", "/", "tf", ".", "to_float", "(", "batch", ")", "m_group_deviation", "=", "(", "m_group_size", "/", "m_group_target_size", ")", "-", "1.0", "m_balance_loss", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "reduce_mean", "(", "m_pred_biased", ",", "axis", "=", "1", ")", "*", "m_group_deviation", ")", "/", "tf", ".", "to_float", "(", "batch", ")", "# The losses in this function only propagate back to variables", "# defined in this function, and the losses outside of this", "# function only propagate back to variables outside of this", "# function. Assuming some kind of adaptive learning algorithm,", "# it should not matter how much we scale the losses in this function.", "# Still we scale them down a lot so that they should not show up", "# much in the overall loss for the model.", "extra_loss_multiplier", "=", "1e-3", "extra_loss", "=", "q_loss", "+", "m_loss", "+", "q_balance_loss", "+", "m_balance_loss", "extra_loss", "*=", "extra_loss_multiplier", "# Show a bunch of summaries.", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "tf", ".", "summary", ".", "histogram", "(", "\"q_group_size\"", ",", "q_group_size", ")", "tf", ".", "summary", ".", "histogram", "(", "\"m_group_size\"", ",", "m_group_size", ")", "tf", ".", "summary", ".", "scalar", "(", "\"q_loss\"", ",", "q_loss", ")", "tf", ".", "summary", ".", "scalar", "(", "\"m_loss\"", ",", "m_loss", ")", "tf", ".", "summary", ".", "scalar", "(", "\"q_balance_loss\"", ",", "q_balance_loss", ")", "tf", ".", "summary", ".", "scalar", "(", "\"m_balance_loss\"", ",", "m_balance_loss", ")", "tf", ".", "summary", ".", "histogram", "(", "\"m_pred_used\"", ",", "m_pred_used", ")", "tf", ".", "summary", ".", "histogram", "(", "\"m_total\"", ",", "m_total", ")", "tf", ".", "summary", ".", "histogram", "(", "\"q_pred_used\"", ",", "q_pred_used", ")", "tf", ".", "summary", ".", "histogram", "(", "\"q_total\"", ",", "q_total", ")", "if", "make_image_summary", ":", "# image summaries are expensive.", "# So we restrict them to head_num<4, query_position<512, batch_index=0.", "trunc_heads", "=", "min", "(", "4", ",", "num_heads", ")", "trunc_length_q", "=", "tf", ".", "minimum", "(", "length_q", ",", "512", ")", "# We recompute the attention for the first example, in an inefficient", "# way - masking. This lets us show pretty pictures.", "# [trunc_heads, length_q, group]", "q_gates_trunc", "=", "q_gates", "[", ":", "trunc_heads", ",", ":", "trunc_length_q", ",", ":", "]", "# [trunc_heads, length_kv, group]", "m_gates_trunc", "=", "m_gates", "[", ":", "trunc_heads", ",", ":", ",", ":", "]", "grouping_mask", "=", "tf", ".", "matmul", "(", "q_gates_trunc", ",", "m_gates_trunc", ",", "transpose_b", "=", "True", ")", "q_trunc", "=", "q", "[", ":", "trunc_heads", ",", ":", "trunc_length_q", ",", ":", "]", "k_trunc", "=", "kv", "[", ":", "trunc_heads", ",", ":", ",", ":", "depth_qk", "]", "logits_trunc", "=", "tf", ".", "matmul", "(", "q_trunc", ",", "k_trunc", ",", "transpose_b", "=", "True", ")", "if", "mask_right", ":", "band", "=", "common_layers", ".", "ones_matrix_band_part", "(", "trunc_length_q", ",", "length_kv", ",", "-", "1", ",", "0", ")", "trunc_bias", "=", "tf", ".", "expand_dims", "(", "(", "1.0", "-", "band", ")", "*", "-", "1e9", ",", "0", ")", "logits_trunc", "+=", "trunc_bias", "att_trunc", "=", "tf", ".", "nn", ".", "softmax", "(", "logits_trunc", ")", "mask_coverage", "=", "tf", ".", "reduce_sum", "(", "grouping_mask", "*", "att_trunc", ")", "/", "(", "tf", ".", "to_float", "(", "trunc_length_q", ")", "*", "trunc_heads", ")", "tf", ".", "summary", ".", "scalar", "(", "\"coverage\"", ",", "mask_coverage", ")", "att_trunc_hdr", "=", "tf", ".", "pow", "(", "att_trunc", ",", "0.2", ")", "# for high-dynamic-range", "mask_channel", "=", "grouping_mask", "*", "tf", ".", "maximum", "(", "att_trunc_hdr", ",", "0.3", ")", "image", "=", "tf", ".", "stack", "(", "[", "att_trunc_hdr", ",", "mask_channel", ",", "mask_channel", "]", ",", "axis", "=", "3", ")", "tf", ".", "summary", ".", "image", "(", "\"att\"", ",", "image", ",", "max_outputs", "=", "trunc_heads", ")", "# show one group for each head.", "att_per_group", "=", "tf", ".", "expand_dims", "(", "weights", "[", ":", "trunc_heads", ",", "0", ",", ":", ",", ":", "]", ",", "-", "1", ")", "tf", ".", "summary", ".", "image", "(", "\"att_per_group_%d\"", ",", "tf", ".", "pow", "(", "att_per_group", ",", "0.2", ")", ",", "max_outputs", "=", "trunc_heads", ")", "return", "o", ",", "extra_loss" ]
Multi-head dot-product attention with sparsity. For each attention head, the queries are partitioned into groups. For each group, only a subset of the key-value pairs are considered. The choices of groups are selected based on trained predictors of the total attention given the group inclusion. memory_target_density indicates the average how many groups in which a key-value pair should participate. We use auxiliary losses to ensure that each group contains roughly the same number of queries and the same number of key-value pairs. If for a given sequence, the actual number of queries/pairs sent to an expert exceeds this target by a factor of more than multiplicative_overhead, then the last ones are dropped. We use this drop-last policy to avoid bleeding information backwards, which is necessary when using this function with autoregressive prediction. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth num_groups: an integer memory_target_density: a floating point scalar multiplicative_overhead: a floating point scalar additive_overhead: a floating point scalar mask_right: a boolean make_image_summary: a boolean name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
[ "Multi", "-", "head", "dot", "-", "product", "attention", "with", "sparsity", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1220-L1472
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
harden_attention_weights
def harden_attention_weights(weights, hard_attention_k): """Make attention weights non-0 only on the top-hard_attention_k ones.""" # Subtract the top-kth weight and zero-out all lower ones. # Note that currently in case of numerical ties it will retain more # than k elements. In the future, we may want to avoid this. weights -= common_layers.top_kth_iterative(weights, hard_attention_k) weights = tf.nn.relu(weights) # Re-normalize the weights. weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True) weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0. weights /= weights_sum return weights
python
def harden_attention_weights(weights, hard_attention_k): """Make attention weights non-0 only on the top-hard_attention_k ones.""" # Subtract the top-kth weight and zero-out all lower ones. # Note that currently in case of numerical ties it will retain more # than k elements. In the future, we may want to avoid this. weights -= common_layers.top_kth_iterative(weights, hard_attention_k) weights = tf.nn.relu(weights) # Re-normalize the weights. weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True) weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0. weights /= weights_sum return weights
[ "def", "harden_attention_weights", "(", "weights", ",", "hard_attention_k", ")", ":", "# Subtract the top-kth weight and zero-out all lower ones.", "# Note that currently in case of numerical ties it will retain more", "# than k elements. In the future, we may want to avoid this.", "weights", "-=", "common_layers", ".", "top_kth_iterative", "(", "weights", ",", "hard_attention_k", ")", "weights", "=", "tf", ".", "nn", ".", "relu", "(", "weights", ")", "# Re-normalize the weights.", "weights_sum", "=", "tf", ".", "reduce_sum", "(", "weights", ",", "axis", "=", "-", "1", ",", "keep_dims", "=", "True", ")", "weights_sum", "=", "tf", ".", "maximum", "(", "weights_sum", ",", "1e-6", ")", "# Avoid division by 0.", "weights", "/=", "weights_sum", "return", "weights" ]
Make attention weights non-0 only on the top-hard_attention_k ones.
[ "Make", "attention", "weights", "non", "-", "0", "only", "on", "the", "top", "-", "hard_attention_k", "ones", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1475-L1486
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_attention
def dot_product_attention(q, k, v, bias, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, save_weights_to=None, dropout_broadcast_dims=None, activation_dtype=None, weight_dtype=None, hard_attention_k=0): """Dot-product attention. Args: q: Tensor with shape [..., length_q, depth_k]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. bias: bias Tensor (see attention_bias()) dropout_rate: a float. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() name: an optional string make_image_summary: True if you want an image summary. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). dropout_broadcast_dims: an optional list of integers less than rank of q. Specifies in which dimensions to broadcast the dropout decisions. activation_dtype: Used to define function activation dtype when using mixed precision. weight_dtype: The dtype weights are stored in when using mixed precision hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: Tensor with shape [..., length_q, depth_v]. """ with tf.variable_scope( name, default_name="dot_product_attention", values=[q, k, v]) as scope: logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] if bias is not None: bias = common_layers.cast_like(bias, logits) logits += bias # If logits are fp16, upcast before softmax logits = maybe_upcast(logits, activation_dtype, weight_dtype) weights = tf.nn.softmax(logits, name="attention_weights") if hard_attention_k > 0: weights = harden_attention_weights(weights, hard_attention_k) weights = common_layers.cast_like(weights, q) if save_weights_to is not None: save_weights_to[scope.name] = weights save_weights_to[scope.name + "/logits"] = logits # Drop out attention links for each head. weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) return tf.matmul(weights, v)
python
def dot_product_attention(q, k, v, bias, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, save_weights_to=None, dropout_broadcast_dims=None, activation_dtype=None, weight_dtype=None, hard_attention_k=0): """Dot-product attention. Args: q: Tensor with shape [..., length_q, depth_k]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. bias: bias Tensor (see attention_bias()) dropout_rate: a float. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() name: an optional string make_image_summary: True if you want an image summary. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). dropout_broadcast_dims: an optional list of integers less than rank of q. Specifies in which dimensions to broadcast the dropout decisions. activation_dtype: Used to define function activation dtype when using mixed precision. weight_dtype: The dtype weights are stored in when using mixed precision hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: Tensor with shape [..., length_q, depth_v]. """ with tf.variable_scope( name, default_name="dot_product_attention", values=[q, k, v]) as scope: logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] if bias is not None: bias = common_layers.cast_like(bias, logits) logits += bias # If logits are fp16, upcast before softmax logits = maybe_upcast(logits, activation_dtype, weight_dtype) weights = tf.nn.softmax(logits, name="attention_weights") if hard_attention_k > 0: weights = harden_attention_weights(weights, hard_attention_k) weights = common_layers.cast_like(weights, q) if save_weights_to is not None: save_weights_to[scope.name] = weights save_weights_to[scope.name + "/logits"] = logits # Drop out attention links for each head. weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) return tf.matmul(weights, v)
[ "def", "dot_product_attention", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "True", ",", "save_weights_to", "=", "None", ",", "dropout_broadcast_dims", "=", "None", ",", "activation_dtype", "=", "None", ",", "weight_dtype", "=", "None", ",", "hard_attention_k", "=", "0", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_attention\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", "as", "scope", ":", "logits", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "# [..., length_q, length_kv]", "if", "bias", "is", "not", "None", ":", "bias", "=", "common_layers", ".", "cast_like", "(", "bias", ",", "logits", ")", "logits", "+=", "bias", "# If logits are fp16, upcast before softmax", "logits", "=", "maybe_upcast", "(", "logits", ",", "activation_dtype", ",", "weight_dtype", ")", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "if", "hard_attention_k", ">", "0", ":", "weights", "=", "harden_attention_weights", "(", "weights", ",", "hard_attention_k", ")", "weights", "=", "common_layers", ".", "cast_like", "(", "weights", ",", "q", ")", "if", "save_weights_to", "is", "not", "None", ":", "save_weights_to", "[", "scope", ".", "name", "]", "=", "weights", "save_weights_to", "[", "scope", ".", "name", "+", "\"/logits\"", "]", "=", "logits", "# Drop out attention links for each head.", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "return", "tf", ".", "matmul", "(", "weights", ",", "v", ")" ]
Dot-product attention. Args: q: Tensor with shape [..., length_q, depth_k]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. bias: bias Tensor (see attention_bias()) dropout_rate: a float. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() name: an optional string make_image_summary: True if you want an image summary. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). dropout_broadcast_dims: an optional list of integers less than rank of q. Specifies in which dimensions to broadcast the dropout decisions. activation_dtype: Used to define function activation dtype when using mixed precision. weight_dtype: The dtype weights are stored in when using mixed precision hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: Tensor with shape [..., length_q, depth_v].
[ "Dot", "-", "product", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1489-L1549
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_generate_relative_positions_matrix
def _generate_relative_positions_matrix(length_q, length_k, max_relative_position, cache=False): """Generates matrix of relative positions between inputs.""" if not cache: if length_q == length_k: range_vec_q = range_vec_k = tf.range(length_q) else: range_vec_k = tf.range(length_k) range_vec_q = range_vec_k[-length_q:] distance_mat = range_vec_k[None, :] - range_vec_q[:, None] else: distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0) distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position, max_relative_position) # Shift values to be >= 0. Each integer still uniquely identifies a relative # position difference. final_mat = distance_mat_clipped + max_relative_position return final_mat
python
def _generate_relative_positions_matrix(length_q, length_k, max_relative_position, cache=False): """Generates matrix of relative positions between inputs.""" if not cache: if length_q == length_k: range_vec_q = range_vec_k = tf.range(length_q) else: range_vec_k = tf.range(length_k) range_vec_q = range_vec_k[-length_q:] distance_mat = range_vec_k[None, :] - range_vec_q[:, None] else: distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0) distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position, max_relative_position) # Shift values to be >= 0. Each integer still uniquely identifies a relative # position difference. final_mat = distance_mat_clipped + max_relative_position return final_mat
[ "def", "_generate_relative_positions_matrix", "(", "length_q", ",", "length_k", ",", "max_relative_position", ",", "cache", "=", "False", ")", ":", "if", "not", "cache", ":", "if", "length_q", "==", "length_k", ":", "range_vec_q", "=", "range_vec_k", "=", "tf", ".", "range", "(", "length_q", ")", "else", ":", "range_vec_k", "=", "tf", ".", "range", "(", "length_k", ")", "range_vec_q", "=", "range_vec_k", "[", "-", "length_q", ":", "]", "distance_mat", "=", "range_vec_k", "[", "None", ",", ":", "]", "-", "range_vec_q", "[", ":", ",", "None", "]", "else", ":", "distance_mat", "=", "tf", ".", "expand_dims", "(", "tf", ".", "range", "(", "-", "length_k", "+", "1", ",", "1", ",", "1", ")", ",", "0", ")", "distance_mat_clipped", "=", "tf", ".", "clip_by_value", "(", "distance_mat", ",", "-", "max_relative_position", ",", "max_relative_position", ")", "# Shift values to be >= 0. Each integer still uniquely identifies a relative", "# position difference.", "final_mat", "=", "distance_mat_clipped", "+", "max_relative_position", "return", "final_mat" ]
Generates matrix of relative positions between inputs.
[ "Generates", "matrix", "of", "relative", "positions", "between", "inputs", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1552-L1570
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_generate_relative_positions_embeddings
def _generate_relative_positions_embeddings(length_q, length_k, depth, max_relative_position, name, cache=False): """Generates tensor of size [1 if cache else length_q, length_k, depth].""" with tf.variable_scope(name): relative_positions_matrix = _generate_relative_positions_matrix( length_q, length_k, max_relative_position, cache=cache) vocab_size = max_relative_position * 2 + 1 # Generates embedding for each relative position of dimension depth. embeddings_table = tf.get_variable("embeddings", [vocab_size, depth]) embeddings = tf.gather(embeddings_table, relative_positions_matrix) return embeddings
python
def _generate_relative_positions_embeddings(length_q, length_k, depth, max_relative_position, name, cache=False): """Generates tensor of size [1 if cache else length_q, length_k, depth].""" with tf.variable_scope(name): relative_positions_matrix = _generate_relative_positions_matrix( length_q, length_k, max_relative_position, cache=cache) vocab_size = max_relative_position * 2 + 1 # Generates embedding for each relative position of dimension depth. embeddings_table = tf.get_variable("embeddings", [vocab_size, depth]) embeddings = tf.gather(embeddings_table, relative_positions_matrix) return embeddings
[ "def", "_generate_relative_positions_embeddings", "(", "length_q", ",", "length_k", ",", "depth", ",", "max_relative_position", ",", "name", ",", "cache", "=", "False", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "relative_positions_matrix", "=", "_generate_relative_positions_matrix", "(", "length_q", ",", "length_k", ",", "max_relative_position", ",", "cache", "=", "cache", ")", "vocab_size", "=", "max_relative_position", "*", "2", "+", "1", "# Generates embedding for each relative position of dimension depth.", "embeddings_table", "=", "tf", ".", "get_variable", "(", "\"embeddings\"", ",", "[", "vocab_size", ",", "depth", "]", ")", "embeddings", "=", "tf", ".", "gather", "(", "embeddings_table", ",", "relative_positions_matrix", ")", "return", "embeddings" ]
Generates tensor of size [1 if cache else length_q, length_k, depth].
[ "Generates", "tensor", "of", "size", "[", "1", "if", "cache", "else", "length_q", "length_k", "depth", "]", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1573-L1584
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_relative_attention_inner
def _relative_attention_inner(x, y, z, transpose): """Relative position-aware dot-product attention inner calculation. This batches matrix multiply calculations to avoid unnecessary broadcasting. Args: x: Tensor with shape [batch_size, heads, length or 1, length or depth]. y: Tensor with shape [batch_size, heads, length or 1, depth]. z: Tensor with shape [length or 1, length, depth]. transpose: Whether to transpose inner matrices of y and z. Should be true if last dimension of x is depth, not length. Returns: A Tensor with shape [batch_size, heads, length, length or depth]. """ batch_size = tf.shape(x)[0] heads = x.get_shape().as_list()[1] length = tf.shape(x)[2] # xy_matmul is [batch_size, heads, length or 1, length or depth] xy_matmul = tf.matmul(x, y, transpose_b=transpose) # x_t is [length or 1, batch_size, heads, length or depth] x_t = tf.transpose(x, [2, 0, 1, 3]) # x_t_r is [length or 1, batch_size * heads, length or depth] x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1]) # x_tz_matmul is [length or 1, batch_size * heads, length or depth] x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) # x_tz_matmul_r is [length or 1, batch_size, heads, length or depth] x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1]) # x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth] x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) return xy_matmul + x_tz_matmul_r_t
python
def _relative_attention_inner(x, y, z, transpose): """Relative position-aware dot-product attention inner calculation. This batches matrix multiply calculations to avoid unnecessary broadcasting. Args: x: Tensor with shape [batch_size, heads, length or 1, length or depth]. y: Tensor with shape [batch_size, heads, length or 1, depth]. z: Tensor with shape [length or 1, length, depth]. transpose: Whether to transpose inner matrices of y and z. Should be true if last dimension of x is depth, not length. Returns: A Tensor with shape [batch_size, heads, length, length or depth]. """ batch_size = tf.shape(x)[0] heads = x.get_shape().as_list()[1] length = tf.shape(x)[2] # xy_matmul is [batch_size, heads, length or 1, length or depth] xy_matmul = tf.matmul(x, y, transpose_b=transpose) # x_t is [length or 1, batch_size, heads, length or depth] x_t = tf.transpose(x, [2, 0, 1, 3]) # x_t_r is [length or 1, batch_size * heads, length or depth] x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1]) # x_tz_matmul is [length or 1, batch_size * heads, length or depth] x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) # x_tz_matmul_r is [length or 1, batch_size, heads, length or depth] x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1]) # x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth] x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) return xy_matmul + x_tz_matmul_r_t
[ "def", "_relative_attention_inner", "(", "x", ",", "y", ",", "z", ",", "transpose", ")", ":", "batch_size", "=", "tf", ".", "shape", "(", "x", ")", "[", "0", "]", "heads", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "1", "]", "length", "=", "tf", ".", "shape", "(", "x", ")", "[", "2", "]", "# xy_matmul is [batch_size, heads, length or 1, length or depth]", "xy_matmul", "=", "tf", ".", "matmul", "(", "x", ",", "y", ",", "transpose_b", "=", "transpose", ")", "# x_t is [length or 1, batch_size, heads, length or depth]", "x_t", "=", "tf", ".", "transpose", "(", "x", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "# x_t_r is [length or 1, batch_size * heads, length or depth]", "x_t_r", "=", "tf", ".", "reshape", "(", "x_t", ",", "[", "length", ",", "heads", "*", "batch_size", ",", "-", "1", "]", ")", "# x_tz_matmul is [length or 1, batch_size * heads, length or depth]", "x_tz_matmul", "=", "tf", ".", "matmul", "(", "x_t_r", ",", "z", ",", "transpose_b", "=", "transpose", ")", "# x_tz_matmul_r is [length or 1, batch_size, heads, length or depth]", "x_tz_matmul_r", "=", "tf", ".", "reshape", "(", "x_tz_matmul", ",", "[", "length", ",", "batch_size", ",", "heads", ",", "-", "1", "]", ")", "# x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth]", "x_tz_matmul_r_t", "=", "tf", ".", "transpose", "(", "x_tz_matmul_r", ",", "[", "1", ",", "2", ",", "0", ",", "3", "]", ")", "return", "xy_matmul", "+", "x_tz_matmul_r_t" ]
Relative position-aware dot-product attention inner calculation. This batches matrix multiply calculations to avoid unnecessary broadcasting. Args: x: Tensor with shape [batch_size, heads, length or 1, length or depth]. y: Tensor with shape [batch_size, heads, length or 1, depth]. z: Tensor with shape [length or 1, length, depth]. transpose: Whether to transpose inner matrices of y and z. Should be true if last dimension of x is depth, not length. Returns: A Tensor with shape [batch_size, heads, length, length or depth].
[ "Relative", "position", "-", "aware", "dot", "-", "product", "attention", "inner", "calculation", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1587-L1618
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_attention_relative
def dot_product_attention_relative(q, k, v, bias, max_relative_position, dropout_rate=0.0, image_shapes=None, save_weights_to=None, name=None, make_image_summary=True, cache=False, allow_memory=False, hard_attention_k=0): """Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer specifying the maximum distance between inputs that unique position embeddings should be learned for. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). name: an optional string. make_image_summary: Whether to make an attention image summary. cache: whether use cache mode allow_memory: whether to assume that recurrent memory is in use. If True, the length dimension of k/v/bias may be longer than the queries, and it is assumed that the extra memory entries precede the non-memory entries. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) with tf.variable_scope( name, default_name="dot_product_attention_relative", values=[q, k, v]) as scope: # This calculation only works for self attention. # q, k and v must therefore have the same shape, unless memory is enabled. if not cache and not allow_memory: q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) # Use separate embeddings suitable for keys and values. depth = k.get_shape().as_list()[3] length_k = common_layers.shape_list(k)[2] length_q = common_layers.shape_list(q)[2] if allow_memory else length_k relations_keys = _generate_relative_positions_embeddings( length_q, length_k, depth, max_relative_position, "relative_positions_keys", cache=cache) relations_values = _generate_relative_positions_embeddings( length_q, length_k, depth, max_relative_position, "relative_positions_values", cache=cache) # Compute self attention considering the relative position embeddings. logits = _relative_attention_inner(q, k, relations_keys, True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") if hard_attention_k > 0: weights = harden_attention_weights(weights, hard_attention_k) if save_weights_to is not None: save_weights_to[scope.name] = weights save_weights_to[scope.name + "/logits"] = logits weights = tf.nn.dropout(weights, 1.0 - dropout_rate) if not tf.get_variable_scope().reuse and make_image_summary: attention_image_summary(weights, image_shapes) return _relative_attention_inner(weights, v, relations_values, False)
python
def dot_product_attention_relative(q, k, v, bias, max_relative_position, dropout_rate=0.0, image_shapes=None, save_weights_to=None, name=None, make_image_summary=True, cache=False, allow_memory=False, hard_attention_k=0): """Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer specifying the maximum distance between inputs that unique position embeddings should be learned for. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). name: an optional string. make_image_summary: Whether to make an attention image summary. cache: whether use cache mode allow_memory: whether to assume that recurrent memory is in use. If True, the length dimension of k/v/bias may be longer than the queries, and it is assumed that the extra memory entries precede the non-memory entries. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) with tf.variable_scope( name, default_name="dot_product_attention_relative", values=[q, k, v]) as scope: # This calculation only works for self attention. # q, k and v must therefore have the same shape, unless memory is enabled. if not cache and not allow_memory: q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) # Use separate embeddings suitable for keys and values. depth = k.get_shape().as_list()[3] length_k = common_layers.shape_list(k)[2] length_q = common_layers.shape_list(q)[2] if allow_memory else length_k relations_keys = _generate_relative_positions_embeddings( length_q, length_k, depth, max_relative_position, "relative_positions_keys", cache=cache) relations_values = _generate_relative_positions_embeddings( length_q, length_k, depth, max_relative_position, "relative_positions_values", cache=cache) # Compute self attention considering the relative position embeddings. logits = _relative_attention_inner(q, k, relations_keys, True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") if hard_attention_k > 0: weights = harden_attention_weights(weights, hard_attention_k) if save_weights_to is not None: save_weights_to[scope.name] = weights save_weights_to[scope.name + "/logits"] = logits weights = tf.nn.dropout(weights, 1.0 - dropout_rate) if not tf.get_variable_scope().reuse and make_image_summary: attention_image_summary(weights, image_shapes) return _relative_attention_inner(weights, v, relations_values, False)
[ "def", "dot_product_attention_relative", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "save_weights_to", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "True", ",", "cache", "=", "False", ",", "allow_memory", "=", "False", ",", "hard_attention_k", "=", "0", ")", ":", "if", "not", "max_relative_position", ":", "raise", "ValueError", "(", "\"Max relative position (%s) should be > 0 when using \"", "\"relative self attention.\"", "%", "(", "max_relative_position", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_attention_relative\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", "as", "scope", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape, unless memory is enabled.", "if", "not", "cache", "and", "not", "allow_memory", ":", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", ")", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", ")", "# Use separate embeddings suitable for keys and values.", "depth", "=", "k", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "3", "]", "length_k", "=", "common_layers", ".", "shape_list", "(", "k", ")", "[", "2", "]", "length_q", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", "if", "allow_memory", "else", "length_k", "relations_keys", "=", "_generate_relative_positions_embeddings", "(", "length_q", ",", "length_k", ",", "depth", ",", "max_relative_position", ",", "\"relative_positions_keys\"", ",", "cache", "=", "cache", ")", "relations_values", "=", "_generate_relative_positions_embeddings", "(", "length_q", ",", "length_k", ",", "depth", ",", "max_relative_position", ",", "\"relative_positions_values\"", ",", "cache", "=", "cache", ")", "# Compute self attention considering the relative position embeddings.", "logits", "=", "_relative_attention_inner", "(", "q", ",", "k", ",", "relations_keys", ",", "True", ")", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "if", "hard_attention_k", ">", "0", ":", "weights", "=", "harden_attention_weights", "(", "weights", ",", "hard_attention_k", ")", "if", "save_weights_to", "is", "not", "None", ":", "save_weights_to", "[", "scope", ".", "name", "]", "=", "weights", "save_weights_to", "[", "scope", ".", "name", "+", "\"/logits\"", "]", "=", "logits", "weights", "=", "tf", ".", "nn", ".", "dropout", "(", "weights", ",", "1.0", "-", "dropout_rate", ")", "if", "not", "tf", ".", "get_variable_scope", "(", ")", ".", "reuse", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "return", "_relative_attention_inner", "(", "weights", ",", "v", ",", "relations_values", ",", "False", ")" ]
Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer specifying the maximum distance between inputs that unique position embeddings should be learned for. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). name: an optional string. make_image_summary: Whether to make an attention image summary. cache: whether use cache mode allow_memory: whether to assume that recurrent memory is in use. If True, the length dimension of k/v/bias may be longer than the queries, and it is assumed that the extra memory entries precede the non-memory entries. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0.
[ "Calculate", "relative", "position", "-", "aware", "dot", "-", "product", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1621-L1702
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_relative_position_to_absolute_position_masked
def _relative_position_to_absolute_position_masked(x): """Helper to dot_product_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position - query_position + length - 1] The dimensions of the output represent: [batch, heads, query_position, memory_position] Only works with masked_attention. Undefined behavior for regions of the input where memory_position > query_position. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, length] """ batch, heads, length, _ = common_layers.shape_list(x) x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]]) x = tf.reshape(x, [batch, heads, 1 + length, length]) x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1]) return x
python
def _relative_position_to_absolute_position_masked(x): """Helper to dot_product_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position - query_position + length - 1] The dimensions of the output represent: [batch, heads, query_position, memory_position] Only works with masked_attention. Undefined behavior for regions of the input where memory_position > query_position. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, length] """ batch, heads, length, _ = common_layers.shape_list(x) x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]]) x = tf.reshape(x, [batch, heads, 1 + length, length]) x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1]) return x
[ "def", "_relative_position_to_absolute_position_masked", "(", "x", ")", ":", "batch", ",", "heads", ",", "length", ",", "_", "=", "common_layers", ".", "shape_list", "(", "x", ")", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "1", ",", "0", "]", "]", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", ",", "heads", ",", "1", "+", "length", ",", "length", "]", ")", "x", "=", "tf", ".", "slice", "(", "x", ",", "[", "0", ",", "0", ",", "1", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "return", "x" ]
Helper to dot_product_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position - query_position + length - 1] The dimensions of the output represent: [batch, heads, query_position, memory_position] Only works with masked_attention. Undefined behavior for regions of the input where memory_position > query_position. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, length]
[ "Helper", "to", "dot_product_self_attention_relative_v2", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1705-L1729
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_self_attention_relative_v2
def dot_product_self_attention_relative_v2(q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position-aware dot-product self-attention. Only works for masked self-attention (no looking forward). The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer indicating the maximum relative distance to look back - changing this invalidates checkpoints dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for whether to add relative component to values. Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) with tf.variable_scope( name, default_name="dot_product_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) # Use separate embeddings suitable for keys and values. _, num_heads, length, depth_k = common_layers.shape_list(k) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(q, k, transpose_b=True) key_relative_embeddings = get_relative_embeddings_left( max_relative_position, length, depth_k, num_heads, heads_share_relative_embedding, "key_relative_embeddings") rel_logits = matmul_with_relative_keys(q, key_relative_embeddings, heads_share_relative_embedding) rel_logits = _relative_position_to_absolute_position_masked(rel_logits) logits += rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # Dropping out the attention links for each of the heads. weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) output = tf.matmul(weights, v) if add_relative_to_values: # [batch, num_heads, query_length, memory_length] relative_weights = _absolute_position_to_relative_position_masked(weights) depth_v = common_layers.shape_list(v)[3] value_relative_embeddings = get_relative_embeddings_left( max_relative_position, length, depth_v, num_heads, heads_share_relative_embedding, "value_relative_embeddings") output += matmul_with_relative_values( relative_weights, value_relative_embeddings, heads_share_relative_embedding) return output
python
def dot_product_self_attention_relative_v2(q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position-aware dot-product self-attention. Only works for masked self-attention (no looking forward). The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer indicating the maximum relative distance to look back - changing this invalidates checkpoints dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for whether to add relative component to values. Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) with tf.variable_scope( name, default_name="dot_product_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) # Use separate embeddings suitable for keys and values. _, num_heads, length, depth_k = common_layers.shape_list(k) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(q, k, transpose_b=True) key_relative_embeddings = get_relative_embeddings_left( max_relative_position, length, depth_k, num_heads, heads_share_relative_embedding, "key_relative_embeddings") rel_logits = matmul_with_relative_keys(q, key_relative_embeddings, heads_share_relative_embedding) rel_logits = _relative_position_to_absolute_position_masked(rel_logits) logits += rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # Dropping out the attention links for each of the heads. weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) output = tf.matmul(weights, v) if add_relative_to_values: # [batch, num_heads, query_length, memory_length] relative_weights = _absolute_position_to_relative_position_masked(weights) depth_v = common_layers.shape_list(v)[3] value_relative_embeddings = get_relative_embeddings_left( max_relative_position, length, depth_v, num_heads, heads_share_relative_embedding, "value_relative_embeddings") output += matmul_with_relative_values( relative_weights, value_relative_embeddings, heads_share_relative_embedding) return output
[ "def", "dot_product_self_attention_relative_v2", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", "=", "None", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "True", ",", "dropout_broadcast_dims", "=", "None", ",", "heads_share_relative_embedding", "=", "False", ",", "add_relative_to_values", "=", "False", ")", ":", "if", "not", "max_relative_position", ":", "raise", "ValueError", "(", "\"Max relative position (%s) should be > 0 when using \"", "\"relative self attention.\"", "%", "(", "max_relative_position", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_self_attention_relative_v2\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape.", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", ")", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", ")", "# Use separate embeddings suitable for keys and values.", "_", ",", "num_heads", ",", "length", ",", "depth_k", "=", "common_layers", ".", "shape_list", "(", "k", ")", "# [batch, num_heads, query_length, memory_length]", "logits", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "key_relative_embeddings", "=", "get_relative_embeddings_left", "(", "max_relative_position", ",", "length", ",", "depth_k", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"key_relative_embeddings\"", ")", "rel_logits", "=", "matmul_with_relative_keys", "(", "q", ",", "key_relative_embeddings", ",", "heads_share_relative_embedding", ")", "rel_logits", "=", "_relative_position_to_absolute_position_masked", "(", "rel_logits", ")", "logits", "+=", "rel_logits", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "# Dropping out the attention links for each of the heads.", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "output", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "if", "add_relative_to_values", ":", "# [batch, num_heads, query_length, memory_length]", "relative_weights", "=", "_absolute_position_to_relative_position_masked", "(", "weights", ")", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "3", "]", "value_relative_embeddings", "=", "get_relative_embeddings_left", "(", "max_relative_position", ",", "length", ",", "depth_v", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"value_relative_embeddings\"", ")", "output", "+=", "matmul_with_relative_values", "(", "relative_weights", ",", "value_relative_embeddings", ",", "heads_share_relative_embedding", ")", "return", "output" ]
Calculate relative position-aware dot-product self-attention. Only works for masked self-attention (no looking forward). The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer indicating the maximum relative distance to look back - changing this invalidates checkpoints dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for whether to add relative component to values. Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0.
[ "Calculate", "relative", "position", "-", "aware", "dot", "-", "product", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1809-L1899
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_absolute_position_to_relative_position_unmasked
def _absolute_position_to_relative_position_unmasked(x): """Helper function for dot_product_unmasked_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position] The dimensions of the output represent: [batch, heads, query_position, memory_position - query_position + length - 1] Only works with unmasked_attention. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, 2*length-1] """ batch, heads, length, _ = common_layers.shape_list(x) # padd along column x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, length-1]]) x_flat = tf.reshape(x, [batch, heads, length**2 + length*(length -1)]) # add 0's in the beginning that will skew the elements after reshape x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]]) x = tf.reshape(x_flat, [batch, heads, length, 2*length]) x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, 2*length -1]) return x
python
def _absolute_position_to_relative_position_unmasked(x): """Helper function for dot_product_unmasked_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position] The dimensions of the output represent: [batch, heads, query_position, memory_position - query_position + length - 1] Only works with unmasked_attention. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, 2*length-1] """ batch, heads, length, _ = common_layers.shape_list(x) # padd along column x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, length-1]]) x_flat = tf.reshape(x, [batch, heads, length**2 + length*(length -1)]) # add 0's in the beginning that will skew the elements after reshape x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]]) x = tf.reshape(x_flat, [batch, heads, length, 2*length]) x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, 2*length -1]) return x
[ "def", "_absolute_position_to_relative_position_unmasked", "(", "x", ")", ":", "batch", ",", "heads", ",", "length", ",", "_", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# padd along column", "x", "=", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "length", "-", "1", "]", "]", ")", "x_flat", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", ",", "heads", ",", "length", "**", "2", "+", "length", "*", "(", "length", "-", "1", ")", "]", ")", "# add 0's in the beginning that will skew the elements after reshape", "x_flat", "=", "tf", ".", "pad", "(", "x_flat", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "length", ",", "0", "]", "]", ")", "x", "=", "tf", ".", "reshape", "(", "x_flat", ",", "[", "batch", ",", "heads", ",", "length", ",", "2", "*", "length", "]", ")", "x", "=", "tf", ".", "slice", "(", "x", ",", "[", "0", ",", "0", ",", "0", ",", "1", "]", ",", "[", "batch", ",", "heads", ",", "length", ",", "2", "*", "length", "-", "1", "]", ")", "return", "x" ]
Helper function for dot_product_unmasked_self_attention_relative_v2. Rearrange an attention logits or weights Tensor. The dimensions of the input represent: [batch, heads, query_position, memory_position] The dimensions of the output represent: [batch, heads, query_position, memory_position - query_position + length - 1] Only works with unmasked_attention. Args: x: a Tensor with shape [batch, heads, length, length] Returns: a Tensor with shape [batch, heads, length, 2*length-1]
[ "Helper", "function", "for", "dot_product_unmasked_self_attention_relative_v2", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1902-L1930
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_relative_embeddings_left_right
def get_relative_embeddings_left_right(max_relative_position, length, depth, num_heads, heads_share_relative_embedding, name): """Instantiate or retrieve relative embeddings, sliced according to length. Use for unmasked case where the relative attention looks both left and right. Args: max_relative_position: an Integer for the number of entries in the relative embedding, which corresponds to the max relative distance that is considered. length: an Integer, specifies the length of the input sequence for which this relative embedding is retrieved for. depth: an Integer, specifies the depth for relative embeddings. num_heads: an Integer, specifies the number of heads. heads_share_relative_embedding: a Boolean specifying if the relative embedding is shared across heads. name: a string giving the name of the embedding variables. Returns: a Tensor with shape [length, depth] """ initializer_stddev = depth**-0.5 max_relative_position_unmasked = 2 * max_relative_position - 1 if heads_share_relative_embedding: embedding_shape = (max_relative_position_unmasked, depth) else: embedding_shape = (num_heads, max_relative_position_unmasked, depth) relative_embeddings = tf.get_variable( name=name, shape=embedding_shape, initializer=tf.random_normal_initializer(stddev=initializer_stddev)) # Pad first before slice to avoid using tf.cond. pad_length = tf.maximum(length - max_relative_position, 0) slice_start_position = tf.maximum(max_relative_position-length, 0) if heads_share_relative_embedding: padded_relative_embeddings = tf.pad( relative_embeddings, [[pad_length, pad_length], [0, 0]]) used_relative_embeddings = tf.slice( padded_relative_embeddings, [slice_start_position, 0], [2 * length - 1, -1]) else: padded_relative_embeddings = tf.pad( relative_embeddings, [[0, 0], [pad_length, pad_length], [0, 0]]) used_relative_embeddings = tf.slice( padded_relative_embeddings, [0, slice_start_position, 0], [-1, 2 * length - 1, -1]) return used_relative_embeddings
python
def get_relative_embeddings_left_right(max_relative_position, length, depth, num_heads, heads_share_relative_embedding, name): """Instantiate or retrieve relative embeddings, sliced according to length. Use for unmasked case where the relative attention looks both left and right. Args: max_relative_position: an Integer for the number of entries in the relative embedding, which corresponds to the max relative distance that is considered. length: an Integer, specifies the length of the input sequence for which this relative embedding is retrieved for. depth: an Integer, specifies the depth for relative embeddings. num_heads: an Integer, specifies the number of heads. heads_share_relative_embedding: a Boolean specifying if the relative embedding is shared across heads. name: a string giving the name of the embedding variables. Returns: a Tensor with shape [length, depth] """ initializer_stddev = depth**-0.5 max_relative_position_unmasked = 2 * max_relative_position - 1 if heads_share_relative_embedding: embedding_shape = (max_relative_position_unmasked, depth) else: embedding_shape = (num_heads, max_relative_position_unmasked, depth) relative_embeddings = tf.get_variable( name=name, shape=embedding_shape, initializer=tf.random_normal_initializer(stddev=initializer_stddev)) # Pad first before slice to avoid using tf.cond. pad_length = tf.maximum(length - max_relative_position, 0) slice_start_position = tf.maximum(max_relative_position-length, 0) if heads_share_relative_embedding: padded_relative_embeddings = tf.pad( relative_embeddings, [[pad_length, pad_length], [0, 0]]) used_relative_embeddings = tf.slice( padded_relative_embeddings, [slice_start_position, 0], [2 * length - 1, -1]) else: padded_relative_embeddings = tf.pad( relative_embeddings, [[0, 0], [pad_length, pad_length], [0, 0]]) used_relative_embeddings = tf.slice( padded_relative_embeddings, [0, slice_start_position, 0], [-1, 2 * length - 1, -1]) return used_relative_embeddings
[ "def", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "length", ",", "depth", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "name", ")", ":", "initializer_stddev", "=", "depth", "**", "-", "0.5", "max_relative_position_unmasked", "=", "2", "*", "max_relative_position", "-", "1", "if", "heads_share_relative_embedding", ":", "embedding_shape", "=", "(", "max_relative_position_unmasked", ",", "depth", ")", "else", ":", "embedding_shape", "=", "(", "num_heads", ",", "max_relative_position_unmasked", ",", "depth", ")", "relative_embeddings", "=", "tf", ".", "get_variable", "(", "name", "=", "name", ",", "shape", "=", "embedding_shape", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "initializer_stddev", ")", ")", "# Pad first before slice to avoid using tf.cond.", "pad_length", "=", "tf", ".", "maximum", "(", "length", "-", "max_relative_position", ",", "0", ")", "slice_start_position", "=", "tf", ".", "maximum", "(", "max_relative_position", "-", "length", ",", "0", ")", "if", "heads_share_relative_embedding", ":", "padded_relative_embeddings", "=", "tf", ".", "pad", "(", "relative_embeddings", ",", "[", "[", "pad_length", ",", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "used_relative_embeddings", "=", "tf", ".", "slice", "(", "padded_relative_embeddings", ",", "[", "slice_start_position", ",", "0", "]", ",", "[", "2", "*", "length", "-", "1", ",", "-", "1", "]", ")", "else", ":", "padded_relative_embeddings", "=", "tf", ".", "pad", "(", "relative_embeddings", ",", "[", "[", "0", ",", "0", "]", ",", "[", "pad_length", ",", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "used_relative_embeddings", "=", "tf", ".", "slice", "(", "padded_relative_embeddings", ",", "[", "0", ",", "slice_start_position", ",", "0", "]", ",", "[", "-", "1", ",", "2", "*", "length", "-", "1", ",", "-", "1", "]", ")", "return", "used_relative_embeddings" ]
Instantiate or retrieve relative embeddings, sliced according to length. Use for unmasked case where the relative attention looks both left and right. Args: max_relative_position: an Integer for the number of entries in the relative embedding, which corresponds to the max relative distance that is considered. length: an Integer, specifies the length of the input sequence for which this relative embedding is retrieved for. depth: an Integer, specifies the depth for relative embeddings. num_heads: an Integer, specifies the number of heads. heads_share_relative_embedding: a Boolean specifying if the relative embedding is shared across heads. name: a string giving the name of the embedding variables. Returns: a Tensor with shape [length, depth]
[ "Instantiate", "or", "retrieve", "relative", "embeddings", "sliced", "according", "to", "length", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1933-L1982
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_unmasked_self_attention_relative_v2
def dot_product_unmasked_self_attention_relative_v2( q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for whether to add relative component to values. Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) with tf.variable_scope( name, default_name="dot_product_unmasked_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(q, k, transpose_b=True) length = common_layers.shape_list(q)[2] k_shape = common_layers.shape_list(k) num_heads = k_shape[1] depth_k = k_shape[-1] key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, length, depth_k, num_heads, heads_share_relative_embedding, "key_relative_embeddings") unmasked_rel_logits = matmul_with_relative_keys( q, key_relative_embeddings, heads_share_relative_embedding) unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( unmasked_rel_logits) logits += unmasked_rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) # relative_weights.set_shape([None, None, None, max_length]) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, v) if add_relative_to_values: # Adds the contribution of the weighted relative embeddings to the values. # [batch, num_heads, query_length, 2*memory_length-1] relative_weights = _absolute_position_to_relative_position_unmasked( weights) depth_v = common_layers.shape_list(v)[3] value_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, length, depth_v, num_heads, heads_share_relative_embedding, "value_relative_embeddings") ret += matmul_with_relative_values( relative_weights, value_relative_embeddings, heads_share_relative_embedding) return ret
python
def dot_product_unmasked_self_attention_relative_v2( q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for whether to add relative component to values. Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) with tf.variable_scope( name, default_name="dot_product_unmasked_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(q, k, transpose_b=True) length = common_layers.shape_list(q)[2] k_shape = common_layers.shape_list(k) num_heads = k_shape[1] depth_k = k_shape[-1] key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, length, depth_k, num_heads, heads_share_relative_embedding, "key_relative_embeddings") unmasked_rel_logits = matmul_with_relative_keys( q, key_relative_embeddings, heads_share_relative_embedding) unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( unmasked_rel_logits) logits += unmasked_rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) # relative_weights.set_shape([None, None, None, max_length]) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, v) if add_relative_to_values: # Adds the contribution of the weighted relative embeddings to the values. # [batch, num_heads, query_length, 2*memory_length-1] relative_weights = _absolute_position_to_relative_position_unmasked( weights) depth_v = common_layers.shape_list(v)[3] value_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, length, depth_v, num_heads, heads_share_relative_embedding, "value_relative_embeddings") ret += matmul_with_relative_values( relative_weights, value_relative_embeddings, heads_share_relative_embedding) return ret
[ "def", "dot_product_unmasked_self_attention_relative_v2", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", "=", "None", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "True", ",", "dropout_broadcast_dims", "=", "None", ",", "heads_share_relative_embedding", "=", "False", ",", "add_relative_to_values", "=", "False", ")", ":", "if", "not", "max_relative_position", ":", "raise", "ValueError", "(", "\"Max relative position (%s) should be > 0 when using \"", "\"relative self attention.\"", "%", "(", "max_relative_position", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_unmasked_self_attention_relative_v2\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape.", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", ")", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", ")", "# [batch, num_heads, query_length, memory_length]", "logits", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "length", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", "k_shape", "=", "common_layers", ".", "shape_list", "(", "k", ")", "num_heads", "=", "k_shape", "[", "1", "]", "depth_k", "=", "k_shape", "[", "-", "1", "]", "key_relative_embeddings", "=", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "length", ",", "depth_k", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"key_relative_embeddings\"", ")", "unmasked_rel_logits", "=", "matmul_with_relative_keys", "(", "q", ",", "key_relative_embeddings", ",", "heads_share_relative_embedding", ")", "unmasked_rel_logits", "=", "_relative_position_to_absolute_position_unmasked", "(", "unmasked_rel_logits", ")", "logits", "+=", "unmasked_rel_logits", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "# dropping out the attention links for each of the heads", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "# relative_weights.set_shape([None, None, None, max_length])", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "ret", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "if", "add_relative_to_values", ":", "# Adds the contribution of the weighted relative embeddings to the values.", "# [batch, num_heads, query_length, 2*memory_length-1]", "relative_weights", "=", "_absolute_position_to_relative_position_unmasked", "(", "weights", ")", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "3", "]", "value_relative_embeddings", "=", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "length", ",", "depth_v", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"value_relative_embeddings\"", ")", "ret", "+=", "matmul_with_relative_values", "(", "relative_weights", ",", "value_relative_embeddings", ",", "heads_share_relative_embedding", ")", "return", "ret" ]
Calculate relative position-aware dot-product self-attention. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v. Args: q: a Tensor with shape [batch, heads, length, depth]. k: a Tensor with shape [batch, heads, length, depth]. v: a Tensor with shape [batch, heads, length, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for whether to add relative component to values. Returns: A Tensor. Raises: ValueError: if max_relative_position is not > 0.
[ "Calculate", "relative", "position", "-", "aware", "dot", "-", "product", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L1985-L2074
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_matmul_with_relative_keys_2d
def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding): """Helper function for dot_product_unmasked_self_attention_relative_2d.""" if heads_share_relative_embedding: ret = tf.einsum("bhxyd,md->bhxym", x, y) else: ret = tf.einsum("bhxyd,hmd->bhxym", x, y) return ret
python
def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding): """Helper function for dot_product_unmasked_self_attention_relative_2d.""" if heads_share_relative_embedding: ret = tf.einsum("bhxyd,md->bhxym", x, y) else: ret = tf.einsum("bhxyd,hmd->bhxym", x, y) return ret
[ "def", "_matmul_with_relative_keys_2d", "(", "x", ",", "y", ",", "heads_share_relative_embedding", ")", ":", "if", "heads_share_relative_embedding", ":", "ret", "=", "tf", ".", "einsum", "(", "\"bhxyd,md->bhxym\"", ",", "x", ",", "y", ")", "else", ":", "ret", "=", "tf", ".", "einsum", "(", "\"bhxyd,hmd->bhxym\"", ",", "x", ",", "y", ")", "return", "ret" ]
Helper function for dot_product_unmasked_self_attention_relative_2d.
[ "Helper", "function", "for", "dot_product_unmasked_self_attention_relative_2d", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2077-L2083
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_unmasked_self_attention_relative_2d
def dot_product_unmasked_self_attention_relative_2d( q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position unmasked dot-product self-attention 2d. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v in height and width dimensions. for query index (i,j) and key index (l, m), the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of relative embeddings in height and width spatial dimensions, respectively. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for adding relative embeddings to values. Returns: [batch, heads, height, width, depth] tensor, the output of attention. height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for height. width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for width. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) if add_relative_to_values: raise ValueError("Adding relative embeddings to values is not implemented") with tf.variable_scope( name, default_name="dot_product_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) (height, width) = (common_layers.shape_list(q)[2], common_layers.shape_list(q)[3]) k_shape = common_layers.shape_list(k) num_heads = k_shape[1] depth_k = k_shape[-1] depth_v = common_layers.shape_list(v)[-1] # flatten height width flatten_hw = lambda x, d: tf.reshape(x, [-1, num_heads, height*width, d]) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(flatten_hw(q, depth_k), flatten_hw(k, depth_k), transpose_b=True) def _compute_2d_relative_logits( query, key_relative_embeddings, height, width, heads_share_relative_embedding, transpose_mask): """compute relative logits.""" unmasked_rel_logits = _matmul_with_relative_keys_2d( query, key_relative_embeddings, heads_share_relative_embedding) # collapse height and heads unmasked_rel_logits = tf.reshape(unmasked_rel_logits, [-1, num_heads*height, width, 2*width-1]) unmasked_rel_logits = ( _relative_position_to_absolute_position_unmasked( unmasked_rel_logits)) # shape it back for tiling unmasked_rel_logits = tf.reshape( unmasked_rel_logits, [-1, num_heads, height, width, width]) # tiling it height times unmasked_rel_logits = tf.expand_dims( unmasked_rel_logits, axis=3) unmasked_rel_logits = tf.tile(unmasked_rel_logits, [1, 1, 1, height, 1, 1]) # bringing it to the right shape for adding to the logits. unmasked_rel_logits = tf.transpose(unmasked_rel_logits, transpose_mask) unmasked_rel_logits = tf.reshape(unmasked_rel_logits, [-1, num_heads, height*width, height*width]) return unmasked_rel_logits # Relative logits in width dimension first. width_key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, width, depth_k, num_heads, heads_share_relative_embedding, "width_key_relative_embeddings") # [batch, heads, height, 2*width-1, 2*width-1] width_unmasked_rel_logits = _compute_2d_relative_logits( q, width_key_relative_embeddings, height, width, heads_share_relative_embedding, [0, 1, 2, 4, 3, 5]) logits += width_unmasked_rel_logits # Relative logits in height dimension next. For ease, we transpose # height and width and repeat the above steps, and transpose to eventually # put the logits in their right positions. # [batch, heads, height, 2*height-1, 2*width-1] height_key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, height, depth_k, num_heads, heads_share_relative_embedding, "height_key_relative_embeddings") height_unmasked_rel_logits = _compute_2d_relative_logits( tf.transpose(q, [0, 1, 3, 2, 4]), height_key_relative_embeddings, width, height, heads_share_relative_embedding, [0, 1, 4, 2, 5, 3]) logits += height_unmasked_rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, flatten_hw(v, depth_v)) # reshape back the same spatial dimensions as q return ( tf.reshape(ret, [-1, num_heads, height, width, depth_v]), height_key_relative_embeddings, width_key_relative_embeddings)
python
def dot_product_unmasked_self_attention_relative_2d( q, k, v, bias, max_relative_position=None, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, dropout_broadcast_dims=None, heads_share_relative_embedding=False, add_relative_to_values=False): """Calculate relative position unmasked dot-product self-attention 2d. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v in height and width dimensions. for query index (i,j) and key index (l, m), the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of relative embeddings in height and width spatial dimensions, respectively. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for adding relative embeddings to values. Returns: [batch, heads, height, width, depth] tensor, the output of attention. height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for height. width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for width. Raises: ValueError: if max_relative_position is not > 0. """ if not max_relative_position: raise ValueError("Max relative position (%s) should be > 0 when using " "relative self attention." % (max_relative_position)) if add_relative_to_values: raise ValueError("Adding relative embeddings to values is not implemented") with tf.variable_scope( name, default_name="dot_product_self_attention_relative_v2", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) (height, width) = (common_layers.shape_list(q)[2], common_layers.shape_list(q)[3]) k_shape = common_layers.shape_list(k) num_heads = k_shape[1] depth_k = k_shape[-1] depth_v = common_layers.shape_list(v)[-1] # flatten height width flatten_hw = lambda x, d: tf.reshape(x, [-1, num_heads, height*width, d]) # [batch, num_heads, query_length, memory_length] logits = tf.matmul(flatten_hw(q, depth_k), flatten_hw(k, depth_k), transpose_b=True) def _compute_2d_relative_logits( query, key_relative_embeddings, height, width, heads_share_relative_embedding, transpose_mask): """compute relative logits.""" unmasked_rel_logits = _matmul_with_relative_keys_2d( query, key_relative_embeddings, heads_share_relative_embedding) # collapse height and heads unmasked_rel_logits = tf.reshape(unmasked_rel_logits, [-1, num_heads*height, width, 2*width-1]) unmasked_rel_logits = ( _relative_position_to_absolute_position_unmasked( unmasked_rel_logits)) # shape it back for tiling unmasked_rel_logits = tf.reshape( unmasked_rel_logits, [-1, num_heads, height, width, width]) # tiling it height times unmasked_rel_logits = tf.expand_dims( unmasked_rel_logits, axis=3) unmasked_rel_logits = tf.tile(unmasked_rel_logits, [1, 1, 1, height, 1, 1]) # bringing it to the right shape for adding to the logits. unmasked_rel_logits = tf.transpose(unmasked_rel_logits, transpose_mask) unmasked_rel_logits = tf.reshape(unmasked_rel_logits, [-1, num_heads, height*width, height*width]) return unmasked_rel_logits # Relative logits in width dimension first. width_key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, width, depth_k, num_heads, heads_share_relative_embedding, "width_key_relative_embeddings") # [batch, heads, height, 2*width-1, 2*width-1] width_unmasked_rel_logits = _compute_2d_relative_logits( q, width_key_relative_embeddings, height, width, heads_share_relative_embedding, [0, 1, 2, 4, 3, 5]) logits += width_unmasked_rel_logits # Relative logits in height dimension next. For ease, we transpose # height and width and repeat the above steps, and transpose to eventually # put the logits in their right positions. # [batch, heads, height, 2*height-1, 2*width-1] height_key_relative_embeddings = get_relative_embeddings_left_right( max_relative_position, height, depth_k, num_heads, heads_share_relative_embedding, "height_key_relative_embeddings") height_unmasked_rel_logits = _compute_2d_relative_logits( tf.transpose(q, [0, 1, 3, 2, 4]), height_key_relative_embeddings, width, height, heads_share_relative_embedding, [0, 1, 4, 2, 5, 3]) logits += height_unmasked_rel_logits if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, flatten_hw(v, depth_v)) # reshape back the same spatial dimensions as q return ( tf.reshape(ret, [-1, num_heads, height, width, depth_v]), height_key_relative_embeddings, width_key_relative_embeddings)
[ "def", "dot_product_unmasked_self_attention_relative_2d", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", "=", "None", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "True", ",", "dropout_broadcast_dims", "=", "None", ",", "heads_share_relative_embedding", "=", "False", ",", "add_relative_to_values", "=", "False", ")", ":", "if", "not", "max_relative_position", ":", "raise", "ValueError", "(", "\"Max relative position (%s) should be > 0 when using \"", "\"relative self attention.\"", "%", "(", "max_relative_position", ")", ")", "if", "add_relative_to_values", ":", "raise", "ValueError", "(", "\"Adding relative embeddings to values is not implemented\"", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_self_attention_relative_v2\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape.", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", ")", "q", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ")", "(", "height", ",", "width", ")", "=", "(", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", ",", "common_layers", ".", "shape_list", "(", "q", ")", "[", "3", "]", ")", "k_shape", "=", "common_layers", ".", "shape_list", "(", "k", ")", "num_heads", "=", "k_shape", "[", "1", "]", "depth_k", "=", "k_shape", "[", "-", "1", "]", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "-", "1", "]", "# flatten height width", "flatten_hw", "=", "lambda", "x", ",", "d", ":", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "num_heads", ",", "height", "*", "width", ",", "d", "]", ")", "# [batch, num_heads, query_length, memory_length]", "logits", "=", "tf", ".", "matmul", "(", "flatten_hw", "(", "q", ",", "depth_k", ")", ",", "flatten_hw", "(", "k", ",", "depth_k", ")", ",", "transpose_b", "=", "True", ")", "def", "_compute_2d_relative_logits", "(", "query", ",", "key_relative_embeddings", ",", "height", ",", "width", ",", "heads_share_relative_embedding", ",", "transpose_mask", ")", ":", "\"\"\"compute relative logits.\"\"\"", "unmasked_rel_logits", "=", "_matmul_with_relative_keys_2d", "(", "query", ",", "key_relative_embeddings", ",", "heads_share_relative_embedding", ")", "# collapse height and heads", "unmasked_rel_logits", "=", "tf", ".", "reshape", "(", "unmasked_rel_logits", ",", "[", "-", "1", ",", "num_heads", "*", "height", ",", "width", ",", "2", "*", "width", "-", "1", "]", ")", "unmasked_rel_logits", "=", "(", "_relative_position_to_absolute_position_unmasked", "(", "unmasked_rel_logits", ")", ")", "# shape it back for tiling", "unmasked_rel_logits", "=", "tf", ".", "reshape", "(", "unmasked_rel_logits", ",", "[", "-", "1", ",", "num_heads", ",", "height", ",", "width", ",", "width", "]", ")", "# tiling it height times", "unmasked_rel_logits", "=", "tf", ".", "expand_dims", "(", "unmasked_rel_logits", ",", "axis", "=", "3", ")", "unmasked_rel_logits", "=", "tf", ".", "tile", "(", "unmasked_rel_logits", ",", "[", "1", ",", "1", ",", "1", ",", "height", ",", "1", ",", "1", "]", ")", "# bringing it to the right shape for adding to the logits.", "unmasked_rel_logits", "=", "tf", ".", "transpose", "(", "unmasked_rel_logits", ",", "transpose_mask", ")", "unmasked_rel_logits", "=", "tf", ".", "reshape", "(", "unmasked_rel_logits", ",", "[", "-", "1", ",", "num_heads", ",", "height", "*", "width", ",", "height", "*", "width", "]", ")", "return", "unmasked_rel_logits", "# Relative logits in width dimension first.", "width_key_relative_embeddings", "=", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "width", ",", "depth_k", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"width_key_relative_embeddings\"", ")", "# [batch, heads, height, 2*width-1, 2*width-1]", "width_unmasked_rel_logits", "=", "_compute_2d_relative_logits", "(", "q", ",", "width_key_relative_embeddings", ",", "height", ",", "width", ",", "heads_share_relative_embedding", ",", "[", "0", ",", "1", ",", "2", ",", "4", ",", "3", ",", "5", "]", ")", "logits", "+=", "width_unmasked_rel_logits", "# Relative logits in height dimension next. For ease, we transpose", "# height and width and repeat the above steps, and transpose to eventually", "# put the logits in their right positions.", "# [batch, heads, height, 2*height-1, 2*width-1]", "height_key_relative_embeddings", "=", "get_relative_embeddings_left_right", "(", "max_relative_position", ",", "height", ",", "depth_k", ",", "num_heads", ",", "heads_share_relative_embedding", ",", "\"height_key_relative_embeddings\"", ")", "height_unmasked_rel_logits", "=", "_compute_2d_relative_logits", "(", "tf", ".", "transpose", "(", "q", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", "]", ")", ",", "height_key_relative_embeddings", ",", "width", ",", "height", ",", "heads_share_relative_embedding", ",", "[", "0", ",", "1", ",", "4", ",", "2", ",", "5", ",", "3", "]", ")", "logits", "+=", "height_unmasked_rel_logits", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "# dropping out the attention links for each of the heads", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "ret", "=", "tf", ".", "matmul", "(", "weights", ",", "flatten_hw", "(", "v", ",", "depth_v", ")", ")", "# reshape back the same spatial dimensions as q", "return", "(", "tf", ".", "reshape", "(", "ret", ",", "[", "-", "1", ",", "num_heads", ",", "height", ",", "width", ",", "depth_v", "]", ")", ",", "height_key_relative_embeddings", ",", "width_key_relative_embeddings", ")" ]
Calculate relative position unmasked dot-product self-attention 2d. The attention calculation is augmented with learned representations for the relative position between each element in q and each element in k and v in height and width dimensions. for query index (i,j) and key index (l, m), the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of relative embeddings in height and width spatial dimensions, respectively. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. heads_share_relative_embedding: a boolean indicating wheather to share relative embeddings between attention heads. add_relative_to_values: a boolean for adding relative embeddings to values. Returns: [batch, heads, height, width, depth] tensor, the output of attention. height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for height. width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing settings, which are the relative embeddings for width. Raises: ValueError: if max_relative_position is not > 0.
[ "Calculate", "relative", "position", "unmasked", "dot", "-", "product", "self", "-", "attention", "2d", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2086-L2225
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_split_along_width
def _split_along_width(x_left_right_blocks): """Helper function for local 2d attention. Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks, height, width, depth] and returns two tensors which contain every alternate position along the width Args: x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks, height, width, depth] tensor Returns: x_left_blocks, x_right_blocks: two [batch, num_h_blocks, (num_w_blocks-2)/2, height, width, depth] tensors """ (_, x_num_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x_left_right_blocks) x_num_w_blocks = (x_num_outer_w_blocks-1)//2 # get it ready for splitting the left and right memory blocks x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, x_num_h_blocks, x_num_outer_w_blocks//2, 2, x_memory_flange_h, x_memory_flange_w, depth]) x_left_blocks, x_right_blocks = tf.split(x_left_right_blocks, num_or_size_splits=2, axis=3) x_left_blocks = tf.squeeze(x_left_blocks, axis=3) x_right_blocks = tf.squeeze(x_right_blocks, axis=3) x_left_blocks = tf.slice(x_left_blocks, [0, 0, 0, 0, 0, 0], [-1, -1, x_num_w_blocks, -1, -1, -1]) x_right_blocks = tf.slice(x_right_blocks, [0, 0, 1, 0, 0, 0], [-1, -1, x_num_w_blocks, -1, -1, -1]) return x_left_blocks, x_right_blocks
python
def _split_along_width(x_left_right_blocks): """Helper function for local 2d attention. Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks, height, width, depth] and returns two tensors which contain every alternate position along the width Args: x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks, height, width, depth] tensor Returns: x_left_blocks, x_right_blocks: two [batch, num_h_blocks, (num_w_blocks-2)/2, height, width, depth] tensors """ (_, x_num_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x_left_right_blocks) x_num_w_blocks = (x_num_outer_w_blocks-1)//2 # get it ready for splitting the left and right memory blocks x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, x_num_h_blocks, x_num_outer_w_blocks//2, 2, x_memory_flange_h, x_memory_flange_w, depth]) x_left_blocks, x_right_blocks = tf.split(x_left_right_blocks, num_or_size_splits=2, axis=3) x_left_blocks = tf.squeeze(x_left_blocks, axis=3) x_right_blocks = tf.squeeze(x_right_blocks, axis=3) x_left_blocks = tf.slice(x_left_blocks, [0, 0, 0, 0, 0, 0], [-1, -1, x_num_w_blocks, -1, -1, -1]) x_right_blocks = tf.slice(x_right_blocks, [0, 0, 1, 0, 0, 0], [-1, -1, x_num_w_blocks, -1, -1, -1]) return x_left_blocks, x_right_blocks
[ "def", "_split_along_width", "(", "x_left_right_blocks", ")", ":", "(", "_", ",", "x_num_h_blocks", ",", "x_num_outer_w_blocks", ",", "x_memory_flange_h", ",", "x_memory_flange_w", ",", "depth", ")", "=", "common_layers", ".", "shape_list", "(", "x_left_right_blocks", ")", "x_num_w_blocks", "=", "(", "x_num_outer_w_blocks", "-", "1", ")", "//", "2", "# get it ready for splitting the left and right memory blocks", "x_left_right_blocks", "=", "tf", ".", "reshape", "(", "x_left_right_blocks", ",", "[", "-", "1", ",", "x_num_h_blocks", ",", "x_num_outer_w_blocks", "//", "2", ",", "2", ",", "x_memory_flange_h", ",", "x_memory_flange_w", ",", "depth", "]", ")", "x_left_blocks", ",", "x_right_blocks", "=", "tf", ".", "split", "(", "x_left_right_blocks", ",", "num_or_size_splits", "=", "2", ",", "axis", "=", "3", ")", "x_left_blocks", "=", "tf", ".", "squeeze", "(", "x_left_blocks", ",", "axis", "=", "3", ")", "x_right_blocks", "=", "tf", ".", "squeeze", "(", "x_right_blocks", ",", "axis", "=", "3", ")", "x_left_blocks", "=", "tf", ".", "slice", "(", "x_left_blocks", ",", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "x_num_w_blocks", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "x_right_blocks", "=", "tf", ".", "slice", "(", "x_right_blocks", ",", "[", "0", ",", "0", ",", "1", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "x_num_w_blocks", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "return", "x_left_blocks", ",", "x_right_blocks" ]
Helper function for local 2d attention. Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks, height, width, depth] and returns two tensors which contain every alternate position along the width Args: x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks, height, width, depth] tensor Returns: x_left_blocks, x_right_blocks: two [batch, num_h_blocks, (num_w_blocks-2)/2, height, width, depth] tensors
[ "Helper", "function", "for", "local", "2d", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2228-L2265
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_get_left_right_blocks
def _get_left_right_blocks(x): """Helper function. Assumes that memory_flange is half of query sizes. This function splits the tensor of width 'n' into two halves, where the first half gets the width indices 0, 2, 4.. and the second half gets the width indices 3, 5, ... We also fuse two blocks along the h dimension. Args: x: a 6-d tensor. Returns: x_left_blocks, x_right_blocks: Two 6-d tensors """ (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x) x_left_right_blocks = tf.slice(x, [0, 1, 0, 0, 0, 0], [-1, x_num_outer_h_blocks-2, -1, -1, -1, -1]) num_blocks_h = (x_num_outer_h_blocks-2)//2 x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, num_blocks_h, 2, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth]) x_left_right_blocks = tf.transpose(x_left_right_blocks, [0, 1, 3, 2, 4, 5, 6]) x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, num_blocks_h, x_num_outer_w_blocks, 2*x_memory_flange_h, x_memory_flange_w, depth]) # get it ready for splitting the left and right memory blocks x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks) return x_left_blocks, x_right_blocks
python
def _get_left_right_blocks(x): """Helper function. Assumes that memory_flange is half of query sizes. This function splits the tensor of width 'n' into two halves, where the first half gets the width indices 0, 2, 4.. and the second half gets the width indices 3, 5, ... We also fuse two blocks along the h dimension. Args: x: a 6-d tensor. Returns: x_left_blocks, x_right_blocks: Two 6-d tensors """ (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth) = common_layers.shape_list(x) x_left_right_blocks = tf.slice(x, [0, 1, 0, 0, 0, 0], [-1, x_num_outer_h_blocks-2, -1, -1, -1, -1]) num_blocks_h = (x_num_outer_h_blocks-2)//2 x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, num_blocks_h, 2, x_num_outer_w_blocks, x_memory_flange_h, x_memory_flange_w, depth]) x_left_right_blocks = tf.transpose(x_left_right_blocks, [0, 1, 3, 2, 4, 5, 6]) x_left_right_blocks = tf.reshape(x_left_right_blocks, [-1, num_blocks_h, x_num_outer_w_blocks, 2*x_memory_flange_h, x_memory_flange_w, depth]) # get it ready for splitting the left and right memory blocks x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks) return x_left_blocks, x_right_blocks
[ "def", "_get_left_right_blocks", "(", "x", ")", ":", "(", "_", ",", "x_num_outer_h_blocks", ",", "x_num_outer_w_blocks", ",", "x_memory_flange_h", ",", "x_memory_flange_w", ",", "depth", ")", "=", "common_layers", ".", "shape_list", "(", "x", ")", "x_left_right_blocks", "=", "tf", ".", "slice", "(", "x", ",", "[", "0", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "x_num_outer_h_blocks", "-", "2", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "num_blocks_h", "=", "(", "x_num_outer_h_blocks", "-", "2", ")", "//", "2", "x_left_right_blocks", "=", "tf", ".", "reshape", "(", "x_left_right_blocks", ",", "[", "-", "1", ",", "num_blocks_h", ",", "2", ",", "x_num_outer_w_blocks", ",", "x_memory_flange_h", ",", "x_memory_flange_w", ",", "depth", "]", ")", "x_left_right_blocks", "=", "tf", ".", "transpose", "(", "x_left_right_blocks", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", ",", "5", ",", "6", "]", ")", "x_left_right_blocks", "=", "tf", ".", "reshape", "(", "x_left_right_blocks", ",", "[", "-", "1", ",", "num_blocks_h", ",", "x_num_outer_w_blocks", ",", "2", "*", "x_memory_flange_h", ",", "x_memory_flange_w", ",", "depth", "]", ")", "# get it ready for splitting the left and right memory blocks", "x_left_blocks", ",", "x_right_blocks", "=", "_split_along_width", "(", "x_left_right_blocks", ")", "return", "x_left_blocks", ",", "x_right_blocks" ]
Helper function. Assumes that memory_flange is half of query sizes. This function splits the tensor of width 'n' into two halves, where the first half gets the width indices 0, 2, 4.. and the second half gets the width indices 3, 5, ... We also fuse two blocks along the h dimension. Args: x: a 6-d tensor. Returns: x_left_blocks, x_right_blocks: Two 6-d tensors
[ "Helper", "function", ".", "Assumes", "that", "memory_flange", "is", "half", "of", "query", "sizes", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2268-L2303
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_extract_blocks
def _extract_blocks(x, block_h, block_w): """Helper function for local 2d attention. Args: x: a [batch, height, width, depth] tensor block_h: An integer. block height block_w: An inteter. block width returns: a [batch, num_heads, height/block_h, width/block_w, depth] tensor """ (_, height, width, depth) = common_layers.shape_list(x) assert height % block_h == 0 assert width % block_w == 0 x = tf.reshape(x, [-1, height//block_h, block_h, width//block_w, block_w, depth]) return tf.transpose(x, [0, 1, 3, 2, 4, 5])
python
def _extract_blocks(x, block_h, block_w): """Helper function for local 2d attention. Args: x: a [batch, height, width, depth] tensor block_h: An integer. block height block_w: An inteter. block width returns: a [batch, num_heads, height/block_h, width/block_w, depth] tensor """ (_, height, width, depth) = common_layers.shape_list(x) assert height % block_h == 0 assert width % block_w == 0 x = tf.reshape(x, [-1, height//block_h, block_h, width//block_w, block_w, depth]) return tf.transpose(x, [0, 1, 3, 2, 4, 5])
[ "def", "_extract_blocks", "(", "x", ",", "block_h", ",", "block_w", ")", ":", "(", "_", ",", "height", ",", "width", ",", "depth", ")", "=", "common_layers", ".", "shape_list", "(", "x", ")", "assert", "height", "%", "block_h", "==", "0", "assert", "width", "%", "block_w", "==", "0", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "height", "//", "block_h", ",", "block_h", ",", "width", "//", "block_w", ",", "block_w", ",", "depth", "]", ")", "return", "tf", ".", "transpose", "(", "x", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", ",", "5", "]", ")" ]
Helper function for local 2d attention. Args: x: a [batch, height, width, depth] tensor block_h: An integer. block height block_w: An inteter. block width returns: a [batch, num_heads, height/block_h, width/block_w, depth] tensor
[ "Helper", "function", "for", "local", "2d", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2307-L2323
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_2d_local_memory
def get_2d_local_memory(x, query_shape, memory_flange): """Stitches together the local 2d memory blocks. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor. """ (_, height, width, depth_x) = common_layers.shape_list(x) x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) # add extra padding to x so that we can extract the memory region # around the center paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, height+2*memory_flange[0], width+2*memory_flange[1], depth_x]) x_outer_memory_blocks = _extract_blocks(padded_x, memory_flange[0], memory_flange[1]) # We'll extract left and right memory blocks, top and bottom memory blocks, # and then the corner memory blocks # Each of these after will have shape # [batch, num_h_blocks, num_w_blocks, query_shape[0], # memory_flange[1], depth] x_left_blocks, x_right_blocks = _get_left_right_blocks( x_outer_memory_blocks) t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5]) # now to get top and bottom blocks, we should just transpose the outer # blocks, call the same function and transpose back to get shape # [batch, num_h_blocks, num_w_blocks, memory_flange[0], # query_shape[1], depth] x_top_center_blocks, x_bottom_center_blocks = ( map(t_hw_block, _get_left_right_blocks( t_hw_block(x_outer_memory_blocks)))) # now to get the corner blocks x_left_corner_blocks, x_right_corner_blocks = _split_along_width( x_outer_memory_blocks) # now to extract top and bottom for both k and v # we need to transpose because _split_along_width separates along # the width # each of these should have shape [batch, num_h_blocks, # num_w_blocks, memory_flange[0], memory_flange[1], depth] t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5]) x_top_left_corner_blocks, x_bottom_left_corner_blocks = ( map(t_hw, _split_along_width(t_hw(x_left_corner_blocks)))) x_top_right_corner_blocks, x_bottom_right_corner_blocks = ( map(t_hw, _split_along_width(t_hw(x_right_corner_blocks)))) # The memory is top_left top_center top_right # left_center middle right_center # bottom_left bottom_center bottom_right # Assembling the above row by row # first [x_top_left, x_top, x_top_right] # to get [batch, num_h_blocks, num_w_blocks, memory_flange[0], # query_shape[1]+2*memory_flange[1], depth] # then [x_left, x_center, x_right] # then [x_bottom_left, x_bottom, x_bottom_right] x_top_memory = tf.concat( [x_top_left_corner_blocks, x_top_center_blocks, x_top_right_corner_blocks], axis=4) x_middle_memory = tf.concat( [x_left_blocks, x_center_blocks, x_right_blocks], axis=4) x_bottom_memory = tf.concat( [x_bottom_left_corner_blocks, x_bottom_center_blocks, x_bottom_right_corner_blocks], axis=4) # concat along height x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3) return x
python
def get_2d_local_memory(x, query_shape, memory_flange): """Stitches together the local 2d memory blocks. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor. """ (_, height, width, depth_x) = common_layers.shape_list(x) x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) # add extra padding to x so that we can extract the memory region # around the center paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, height+2*memory_flange[0], width+2*memory_flange[1], depth_x]) x_outer_memory_blocks = _extract_blocks(padded_x, memory_flange[0], memory_flange[1]) # We'll extract left and right memory blocks, top and bottom memory blocks, # and then the corner memory blocks # Each of these after will have shape # [batch, num_h_blocks, num_w_blocks, query_shape[0], # memory_flange[1], depth] x_left_blocks, x_right_blocks = _get_left_right_blocks( x_outer_memory_blocks) t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5]) # now to get top and bottom blocks, we should just transpose the outer # blocks, call the same function and transpose back to get shape # [batch, num_h_blocks, num_w_blocks, memory_flange[0], # query_shape[1], depth] x_top_center_blocks, x_bottom_center_blocks = ( map(t_hw_block, _get_left_right_blocks( t_hw_block(x_outer_memory_blocks)))) # now to get the corner blocks x_left_corner_blocks, x_right_corner_blocks = _split_along_width( x_outer_memory_blocks) # now to extract top and bottom for both k and v # we need to transpose because _split_along_width separates along # the width # each of these should have shape [batch, num_h_blocks, # num_w_blocks, memory_flange[0], memory_flange[1], depth] t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5]) x_top_left_corner_blocks, x_bottom_left_corner_blocks = ( map(t_hw, _split_along_width(t_hw(x_left_corner_blocks)))) x_top_right_corner_blocks, x_bottom_right_corner_blocks = ( map(t_hw, _split_along_width(t_hw(x_right_corner_blocks)))) # The memory is top_left top_center top_right # left_center middle right_center # bottom_left bottom_center bottom_right # Assembling the above row by row # first [x_top_left, x_top, x_top_right] # to get [batch, num_h_blocks, num_w_blocks, memory_flange[0], # query_shape[1]+2*memory_flange[1], depth] # then [x_left, x_center, x_right] # then [x_bottom_left, x_bottom, x_bottom_right] x_top_memory = tf.concat( [x_top_left_corner_blocks, x_top_center_blocks, x_top_right_corner_blocks], axis=4) x_middle_memory = tf.concat( [x_left_blocks, x_center_blocks, x_right_blocks], axis=4) x_bottom_memory = tf.concat( [x_bottom_left_corner_blocks, x_bottom_center_blocks, x_bottom_right_corner_blocks], axis=4) # concat along height x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3) return x
[ "def", "get_2d_local_memory", "(", "x", ",", "query_shape", ",", "memory_flange", ")", ":", "(", "_", ",", "height", ",", "width", ",", "depth_x", ")", "=", "common_layers", ".", "shape_list", "(", "x", ")", "x_center_blocks", "=", "_extract_blocks", "(", "x", ",", "query_shape", "[", "0", "]", ",", "query_shape", "[", "1", "]", ")", "# add extra padding to x so that we can extract the memory region", "# around the center", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "memory_flange", "[", "0", "]", ",", "memory_flange", "[", "0", "]", "]", ",", "[", "memory_flange", "[", "1", "]", ",", "memory_flange", "[", "1", "]", "]", ",", "[", "0", ",", "0", "]", "]", "padded_x", "=", "tf", ".", "pad", "(", "x", ",", "paddings", ")", "padded_x", ".", "set_shape", "(", "[", "None", ",", "height", "+", "2", "*", "memory_flange", "[", "0", "]", ",", "width", "+", "2", "*", "memory_flange", "[", "1", "]", ",", "depth_x", "]", ")", "x_outer_memory_blocks", "=", "_extract_blocks", "(", "padded_x", ",", "memory_flange", "[", "0", "]", ",", "memory_flange", "[", "1", "]", ")", "# We'll extract left and right memory blocks, top and bottom memory blocks,", "# and then the corner memory blocks", "# Each of these after will have shape", "# [batch, num_h_blocks, num_w_blocks, query_shape[0],", "# memory_flange[1], depth]", "x_left_blocks", ",", "x_right_blocks", "=", "_get_left_right_blocks", "(", "x_outer_memory_blocks", ")", "t_hw_block", "=", "lambda", "x", ":", "tf", ".", "transpose", "(", "x", ",", "[", "0", ",", "2", ",", "1", ",", "4", ",", "3", ",", "5", "]", ")", "# now to get top and bottom blocks, we should just transpose the outer", "# blocks, call the same function and transpose back to get shape", "# [batch, num_h_blocks, num_w_blocks, memory_flange[0],", "# query_shape[1], depth]", "x_top_center_blocks", ",", "x_bottom_center_blocks", "=", "(", "map", "(", "t_hw_block", ",", "_get_left_right_blocks", "(", "t_hw_block", "(", "x_outer_memory_blocks", ")", ")", ")", ")", "# now to get the corner blocks", "x_left_corner_blocks", ",", "x_right_corner_blocks", "=", "_split_along_width", "(", "x_outer_memory_blocks", ")", "# now to extract top and bottom for both k and v", "# we need to transpose because _split_along_width separates along", "# the width", "# each of these should have shape [batch, num_h_blocks,", "# num_w_blocks, memory_flange[0], memory_flange[1], depth]", "t_hw", "=", "lambda", "x", ":", "tf", ".", "transpose", "(", "x", ",", "[", "0", ",", "2", ",", "1", ",", "3", ",", "4", ",", "5", "]", ")", "x_top_left_corner_blocks", ",", "x_bottom_left_corner_blocks", "=", "(", "map", "(", "t_hw", ",", "_split_along_width", "(", "t_hw", "(", "x_left_corner_blocks", ")", ")", ")", ")", "x_top_right_corner_blocks", ",", "x_bottom_right_corner_blocks", "=", "(", "map", "(", "t_hw", ",", "_split_along_width", "(", "t_hw", "(", "x_right_corner_blocks", ")", ")", ")", ")", "# The memory is top_left top_center top_right", "# left_center middle right_center", "# bottom_left bottom_center bottom_right", "# Assembling the above row by row", "# first [x_top_left, x_top, x_top_right]", "# to get [batch, num_h_blocks, num_w_blocks, memory_flange[0],", "# query_shape[1]+2*memory_flange[1], depth]", "# then [x_left, x_center, x_right]", "# then [x_bottom_left, x_bottom, x_bottom_right]", "x_top_memory", "=", "tf", ".", "concat", "(", "[", "x_top_left_corner_blocks", ",", "x_top_center_blocks", ",", "x_top_right_corner_blocks", "]", ",", "axis", "=", "4", ")", "x_middle_memory", "=", "tf", ".", "concat", "(", "[", "x_left_blocks", ",", "x_center_blocks", ",", "x_right_blocks", "]", ",", "axis", "=", "4", ")", "x_bottom_memory", "=", "tf", ".", "concat", "(", "[", "x_bottom_left_corner_blocks", ",", "x_bottom_center_blocks", ",", "x_bottom_right_corner_blocks", "]", ",", "axis", "=", "4", ")", "# concat along height", "x", "=", "tf", ".", "concat", "(", "[", "x_top_memory", ",", "x_middle_memory", ",", "x_bottom_memory", "]", ",", "axis", "=", "3", ")", "return", "x" ]
Stitches together the local 2d memory blocks. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor.
[ "Stitches", "together", "the", "local", "2d", "memory", "blocks", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2326-L2404
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_2d_local_memory_v2
def get_2d_local_memory_v2(x, query_shape, memory_flange): """Gathering memory blocks around query blocks. flange is half of query . Only works if memory flanges are half of query sizes. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor. """ (_, height, width, depth_x) = common_layers.shape_list(x) # add extra padding to x so that we can extract the memory region # around the center paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, height+2*memory_flange[0], width+2*memory_flange[1], depth_x]) num_h_memory_blocks = height//query_shape[0] + 1 num_w_memory_blocks = width//query_shape[1] + 1 x_memory_blocks = _extract_blocks(padded_x, query_shape[0], query_shape[1]) x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, 2) x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2) x_right_width = tf.concat(x_width_blocks[1:], axis=2) x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4) x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1) x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1) x_bottom_height = tf.concat(x_height_blocks[1:], axis=1) x = tf.concat([x_top_height, x_bottom_height], axis=3) return x
python
def get_2d_local_memory_v2(x, query_shape, memory_flange): """Gathering memory blocks around query blocks. flange is half of query . Only works if memory flanges are half of query sizes. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor. """ (_, height, width, depth_x) = common_layers.shape_list(x) # add extra padding to x so that we can extract the memory region # around the center paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, height+2*memory_flange[0], width+2*memory_flange[1], depth_x]) num_h_memory_blocks = height//query_shape[0] + 1 num_w_memory_blocks = width//query_shape[1] + 1 x_memory_blocks = _extract_blocks(padded_x, query_shape[0], query_shape[1]) x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, 2) x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2) x_right_width = tf.concat(x_width_blocks[1:], axis=2) x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4) x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1) x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1) x_bottom_height = tf.concat(x_height_blocks[1:], axis=1) x = tf.concat([x_top_height, x_bottom_height], axis=3) return x
[ "def", "get_2d_local_memory_v2", "(", "x", ",", "query_shape", ",", "memory_flange", ")", ":", "(", "_", ",", "height", ",", "width", ",", "depth_x", ")", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# add extra padding to x so that we can extract the memory region", "# around the center", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "memory_flange", "[", "0", "]", ",", "memory_flange", "[", "0", "]", "]", ",", "[", "memory_flange", "[", "1", "]", ",", "memory_flange", "[", "1", "]", "]", ",", "[", "0", ",", "0", "]", "]", "padded_x", "=", "tf", ".", "pad", "(", "x", ",", "paddings", ")", "padded_x", ".", "set_shape", "(", "[", "None", ",", "height", "+", "2", "*", "memory_flange", "[", "0", "]", ",", "width", "+", "2", "*", "memory_flange", "[", "1", "]", ",", "depth_x", "]", ")", "num_h_memory_blocks", "=", "height", "//", "query_shape", "[", "0", "]", "+", "1", "num_w_memory_blocks", "=", "width", "//", "query_shape", "[", "1", "]", "+", "1", "x_memory_blocks", "=", "_extract_blocks", "(", "padded_x", ",", "query_shape", "[", "0", "]", ",", "query_shape", "[", "1", "]", ")", "x_width_blocks", "=", "tf", ".", "split", "(", "x_memory_blocks", ",", "num_w_memory_blocks", ",", "2", ")", "x_left_width", "=", "tf", ".", "concat", "(", "x_width_blocks", "[", ":", "num_w_memory_blocks", "-", "1", "]", ",", "axis", "=", "2", ")", "x_right_width", "=", "tf", ".", "concat", "(", "x_width_blocks", "[", "1", ":", "]", ",", "axis", "=", "2", ")", "x_memory_blocks", "=", "tf", ".", "concat", "(", "[", "x_left_width", ",", "x_right_width", "]", ",", "axis", "=", "4", ")", "x_height_blocks", "=", "tf", ".", "split", "(", "x_memory_blocks", ",", "num_h_memory_blocks", ",", "1", ")", "x_top_height", "=", "tf", ".", "concat", "(", "x_height_blocks", "[", ":", "num_h_memory_blocks", "-", "1", "]", ",", "axis", "=", "1", ")", "x_bottom_height", "=", "tf", ".", "concat", "(", "x_height_blocks", "[", "1", ":", "]", ",", "axis", "=", "1", ")", "x", "=", "tf", ".", "concat", "(", "[", "x_top_height", ",", "x_bottom_height", "]", ",", "axis", "=", "3", ")", "return", "x" ]
Gathering memory blocks around query blocks. flange is half of query . Only works if memory flanges are half of query sizes. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor.
[ "Gathering", "memory", "blocks", "around", "query", "blocks", ".", "flange", "is", "half", "of", "query", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2407-L2445
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_unmasked_attention_local_2d_tpu
def dot_product_unmasked_attention_local_2d_tpu( q, k, v, bias, max_relative_position=None, query_shape=(8, 8), dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False, dropout_broadcast_dims=None): """Calculate unmasked dot-product local self-attention 2d on tpu. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. query_shape: a two tuple indicating query shape dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. Returns: [batch, heads, height, width, depth] tensor, the output of attention. """ if max_relative_position: raise ValueError("Relative local 2d attention not implemented") with tf.variable_scope( name, default_name="dot_product_unmasked_attention_local_2d_tpu", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) orig_q_shape = common_layers.shape_list(q) # Pad query, key, value to ensure multiple of corresponding lengths. memory_flange = [int(query_shape[0]//2), int(query_shape[1]//2)] q = pad_to_multiple_2d(q, query_shape) k = pad_to_multiple_2d(k, query_shape) v = pad_to_multiple_2d(v, query_shape) q_shape = common_layers.shape_list(q) (height, width) = (q_shape[2], q_shape[3]) _, num_heads, height, width, depth_k = common_layers.shape_list(k) depth_v = common_layers.shape_list(v)[-1] num_h_blocks = height//query_shape[0] num_w_blocks = width//query_shape[1] # Extract center queries, keys, and values q = tf.reshape(q, [-1, height, width, depth_k]) queries = _extract_blocks( q, query_shape[0], query_shape[1]) k = tf.reshape(k, [-1, height, width, depth_k]) keys = get_2d_local_memory_v2( k, query_shape, memory_flange) v = tf.reshape(v, [-1, height, width, depth_v]) values = get_2d_local_memory_v2( v, query_shape, memory_flange) memory_h = query_shape[0] + 2*memory_flange[0] memory_w = query_shape[1] + 2*memory_flange[1] queries = tf.reshape(queries, [-1, num_heads, num_h_blocks, num_w_blocks, query_shape[0]*query_shape[1], depth_k]) keys = tf.reshape(keys, [-1, num_heads, num_h_blocks, num_w_blocks, memory_h*memory_w, depth_k]) values = tf.reshape(values, [-1, num_heads, num_h_blocks, num_w_blocks, memory_h*memory_w, depth_v]) logits = tf.matmul(queries, keys, transpose_b=True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # Dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, values) # we need to get it back to shape [batch, heads, height, width] ret = tf.reshape(ret, [-1, num_heads, num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], depth_v]) ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5, 6]) ret = tf.reshape(ret, [-1, num_heads, num_h_blocks*query_shape[0], num_w_blocks*query_shape[1], depth_v]) # slice if padding was introduced ret = tf.slice(ret, [0, 0, 0, 0, 0], [-1, -1, orig_q_shape[2], orig_q_shape[3], -1]) return ret
python
def dot_product_unmasked_attention_local_2d_tpu( q, k, v, bias, max_relative_position=None, query_shape=(8, 8), dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False, dropout_broadcast_dims=None): """Calculate unmasked dot-product local self-attention 2d on tpu. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. query_shape: a two tuple indicating query shape dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. Returns: [batch, heads, height, width, depth] tensor, the output of attention. """ if max_relative_position: raise ValueError("Relative local 2d attention not implemented") with tf.variable_scope( name, default_name="dot_product_unmasked_attention_local_2d_tpu", values=[q, k, v]): # This calculation only works for self attention. # q, k and v must therefore have the same shape. q.get_shape().assert_is_compatible_with(k.get_shape()) q.get_shape().assert_is_compatible_with(v.get_shape()) orig_q_shape = common_layers.shape_list(q) # Pad query, key, value to ensure multiple of corresponding lengths. memory_flange = [int(query_shape[0]//2), int(query_shape[1]//2)] q = pad_to_multiple_2d(q, query_shape) k = pad_to_multiple_2d(k, query_shape) v = pad_to_multiple_2d(v, query_shape) q_shape = common_layers.shape_list(q) (height, width) = (q_shape[2], q_shape[3]) _, num_heads, height, width, depth_k = common_layers.shape_list(k) depth_v = common_layers.shape_list(v)[-1] num_h_blocks = height//query_shape[0] num_w_blocks = width//query_shape[1] # Extract center queries, keys, and values q = tf.reshape(q, [-1, height, width, depth_k]) queries = _extract_blocks( q, query_shape[0], query_shape[1]) k = tf.reshape(k, [-1, height, width, depth_k]) keys = get_2d_local_memory_v2( k, query_shape, memory_flange) v = tf.reshape(v, [-1, height, width, depth_v]) values = get_2d_local_memory_v2( v, query_shape, memory_flange) memory_h = query_shape[0] + 2*memory_flange[0] memory_w = query_shape[1] + 2*memory_flange[1] queries = tf.reshape(queries, [-1, num_heads, num_h_blocks, num_w_blocks, query_shape[0]*query_shape[1], depth_k]) keys = tf.reshape(keys, [-1, num_heads, num_h_blocks, num_w_blocks, memory_h*memory_w, depth_k]) values = tf.reshape(values, [-1, num_heads, num_h_blocks, num_w_blocks, memory_h*memory_w, depth_v]) logits = tf.matmul(queries, keys, transpose_b=True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # Dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) ret = tf.matmul(weights, values) # we need to get it back to shape [batch, heads, height, width] ret = tf.reshape(ret, [-1, num_heads, num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], depth_v]) ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5, 6]) ret = tf.reshape(ret, [-1, num_heads, num_h_blocks*query_shape[0], num_w_blocks*query_shape[1], depth_v]) # slice if padding was introduced ret = tf.slice(ret, [0, 0, 0, 0, 0], [-1, -1, orig_q_shape[2], orig_q_shape[3], -1]) return ret
[ "def", "dot_product_unmasked_attention_local_2d_tpu", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", "=", "None", ",", "query_shape", "=", "(", "8", ",", "8", ")", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "name", "=", "None", ",", "make_image_summary", "=", "False", ",", "dropout_broadcast_dims", "=", "None", ")", ":", "if", "max_relative_position", ":", "raise", "ValueError", "(", "\"Relative local 2d attention not implemented\"", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dot_product_unmasked_attention_local_2d_tpu\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape.", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", ")", "q", ".", "get_shape", "(", ")", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", ")", "orig_q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "# Pad query, key, value to ensure multiple of corresponding lengths.", "memory_flange", "=", "[", "int", "(", "query_shape", "[", "0", "]", "//", "2", ")", ",", "int", "(", "query_shape", "[", "1", "]", "//", "2", ")", "]", "q", "=", "pad_to_multiple_2d", "(", "q", ",", "query_shape", ")", "k", "=", "pad_to_multiple_2d", "(", "k", ",", "query_shape", ")", "v", "=", "pad_to_multiple_2d", "(", "v", ",", "query_shape", ")", "q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "(", "height", ",", "width", ")", "=", "(", "q_shape", "[", "2", "]", ",", "q_shape", "[", "3", "]", ")", "_", ",", "num_heads", ",", "height", ",", "width", ",", "depth_k", "=", "common_layers", ".", "shape_list", "(", "k", ")", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "-", "1", "]", "num_h_blocks", "=", "height", "//", "query_shape", "[", "0", "]", "num_w_blocks", "=", "width", "//", "query_shape", "[", "1", "]", "# Extract center queries, keys, and values", "q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "-", "1", ",", "height", ",", "width", ",", "depth_k", "]", ")", "queries", "=", "_extract_blocks", "(", "q", ",", "query_shape", "[", "0", "]", ",", "query_shape", "[", "1", "]", ")", "k", "=", "tf", ".", "reshape", "(", "k", ",", "[", "-", "1", ",", "height", ",", "width", ",", "depth_k", "]", ")", "keys", "=", "get_2d_local_memory_v2", "(", "k", ",", "query_shape", ",", "memory_flange", ")", "v", "=", "tf", ".", "reshape", "(", "v", ",", "[", "-", "1", ",", "height", ",", "width", ",", "depth_v", "]", ")", "values", "=", "get_2d_local_memory_v2", "(", "v", ",", "query_shape", ",", "memory_flange", ")", "memory_h", "=", "query_shape", "[", "0", "]", "+", "2", "*", "memory_flange", "[", "0", "]", "memory_w", "=", "query_shape", "[", "1", "]", "+", "2", "*", "memory_flange", "[", "1", "]", "queries", "=", "tf", ".", "reshape", "(", "queries", ",", "[", "-", "1", ",", "num_heads", ",", "num_h_blocks", ",", "num_w_blocks", ",", "query_shape", "[", "0", "]", "*", "query_shape", "[", "1", "]", ",", "depth_k", "]", ")", "keys", "=", "tf", ".", "reshape", "(", "keys", ",", "[", "-", "1", ",", "num_heads", ",", "num_h_blocks", ",", "num_w_blocks", ",", "memory_h", "*", "memory_w", ",", "depth_k", "]", ")", "values", "=", "tf", ".", "reshape", "(", "values", ",", "[", "-", "1", ",", "num_heads", ",", "num_h_blocks", ",", "num_w_blocks", ",", "memory_h", "*", "memory_w", ",", "depth_v", "]", ")", "logits", "=", "tf", ".", "matmul", "(", "queries", ",", "keys", ",", "transpose_b", "=", "True", ")", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "# Dropping out the attention links for each of the heads", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "ret", "=", "tf", ".", "matmul", "(", "weights", ",", "values", ")", "# we need to get it back to shape [batch, heads, height, width]", "ret", "=", "tf", ".", "reshape", "(", "ret", ",", "[", "-", "1", ",", "num_heads", ",", "num_h_blocks", ",", "num_w_blocks", ",", "query_shape", "[", "0", "]", ",", "query_shape", "[", "1", "]", ",", "depth_v", "]", ")", "ret", "=", "tf", ".", "transpose", "(", "ret", ",", "[", "0", ",", "1", ",", "2", ",", "4", ",", "3", ",", "5", ",", "6", "]", ")", "ret", "=", "tf", ".", "reshape", "(", "ret", ",", "[", "-", "1", ",", "num_heads", ",", "num_h_blocks", "*", "query_shape", "[", "0", "]", ",", "num_w_blocks", "*", "query_shape", "[", "1", "]", ",", "depth_v", "]", ")", "# slice if padding was introduced", "ret", "=", "tf", ".", "slice", "(", "ret", ",", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "orig_q_shape", "[", "2", "]", ",", "orig_q_shape", "[", "3", "]", ",", "-", "1", "]", ")", "return", "ret" ]
Calculate unmasked dot-product local self-attention 2d on tpu. Args: q: a Tensor with shape [batch, heads, height, width, depth]. k: a Tensor with shape [batch, heads, height, width, depth]. v: a Tensor with shape [batch, heads, height, width, depth]. bias: bias Tensor. max_relative_position: an integer the max relative embedding considered. Changing this invalidates checkpoints. query_shape: a two tuple indicating query shape dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. name: an optional string. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. Returns: [batch, heads, height, width, depth] tensor, the output of attention.
[ "Calculate", "unmasked", "dot", "-", "product", "local", "self", "-", "attention", "2d", "on", "tpu", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2448-L2537
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dot_product_unmasked_attention_local_2d_tpu_simple
def dot_product_unmasked_attention_local_2d_tpu_simple( x, bias, total_key_depth, total_value_depth, num_heads, query_shape=(8, 8), dropout_rate=0.0, image_shapes=None, make_image_summary=False, dropout_broadcast_dims=None): """Calculate simple unmasked dot-product local self-attention 2d on tpu. The query, key, and value blocks are the same. We do not do a second linear transformation after computing the values Args: x: a Tensor with shape [batch, height, width, depth]. bias: bias Tensor. total_key_depth: the dimensions of the keys total_value_depth: the dimensions of the values num_heads: number of heads query_shape: a two tuple indicating query shape dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. Returns: ret: [batch, height, width, total_value_depth] tensor, the output of attention. q: [batch, height, width, total_key_depth] query tensor k: [batch, height, width, total_key_depth] key tensor v: [batch, height, width, total_value_depth] value tensor """ # This calculation only works for self attention. # q, k and v must therefore have the same shape. orig_x_shape = common_layers.shape_list(x) # Pad query, key, value to ensure multiple of corresponding lengths if # necessary is_padded = False if (orig_x_shape[1]%query_shape[0]) != 0 or ( orig_x_shape[2]%query_shape[1]) != 0: x = pad_to_multiple_2d(x, query_shape) is_padded = True _, height, width, depth = common_layers.shape_list(x) assert depth%num_heads == 0 num_h_blocks = height//query_shape[0] num_w_blocks = width//query_shape[1] # Extract center queries, keys, and values x_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) x_blocks = tf.reshape(x_blocks, [-1, query_shape[0]*query_shape[1], depth]) q, k, v = compute_qkv(x_blocks, None, total_key_depth, total_value_depth) hsplit = lambda x: split_heads(x, num_heads) q, k, v = map(hsplit, [q, k, v]) logits = tf.matmul(q, k, transpose_b=True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # Dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) output = tf.matmul(weights, v) output = combine_heads(output) # we need to get it back to shape [batch, height, width] ret = tf.reshape(output, [-1, num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], total_value_depth]) ret = tf.transpose(ret, [0, 1, 3, 2, 4, 5]) ret = tf.reshape(ret, [-1, num_h_blocks*query_shape[0], num_w_blocks*query_shape[1], total_value_depth]) # slice if padding was introduced if is_padded: ret = tf.slice(ret, [0, 0, 0, 0], [-1, orig_x_shape[1], orig_x_shape[2], -1]) return ret, q, k, v
python
def dot_product_unmasked_attention_local_2d_tpu_simple( x, bias, total_key_depth, total_value_depth, num_heads, query_shape=(8, 8), dropout_rate=0.0, image_shapes=None, make_image_summary=False, dropout_broadcast_dims=None): """Calculate simple unmasked dot-product local self-attention 2d on tpu. The query, key, and value blocks are the same. We do not do a second linear transformation after computing the values Args: x: a Tensor with shape [batch, height, width, depth]. bias: bias Tensor. total_key_depth: the dimensions of the keys total_value_depth: the dimensions of the values num_heads: number of heads query_shape: a two tuple indicating query shape dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. Returns: ret: [batch, height, width, total_value_depth] tensor, the output of attention. q: [batch, height, width, total_key_depth] query tensor k: [batch, height, width, total_key_depth] key tensor v: [batch, height, width, total_value_depth] value tensor """ # This calculation only works for self attention. # q, k and v must therefore have the same shape. orig_x_shape = common_layers.shape_list(x) # Pad query, key, value to ensure multiple of corresponding lengths if # necessary is_padded = False if (orig_x_shape[1]%query_shape[0]) != 0 or ( orig_x_shape[2]%query_shape[1]) != 0: x = pad_to_multiple_2d(x, query_shape) is_padded = True _, height, width, depth = common_layers.shape_list(x) assert depth%num_heads == 0 num_h_blocks = height//query_shape[0] num_w_blocks = width//query_shape[1] # Extract center queries, keys, and values x_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) x_blocks = tf.reshape(x_blocks, [-1, query_shape[0]*query_shape[1], depth]) q, k, v = compute_qkv(x_blocks, None, total_key_depth, total_value_depth) hsplit = lambda x: split_heads(x, num_heads) q, k, v = map(hsplit, [q, k, v]) logits = tf.matmul(q, k, transpose_b=True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") # Dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) output = tf.matmul(weights, v) output = combine_heads(output) # we need to get it back to shape [batch, height, width] ret = tf.reshape(output, [-1, num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], total_value_depth]) ret = tf.transpose(ret, [0, 1, 3, 2, 4, 5]) ret = tf.reshape(ret, [-1, num_h_blocks*query_shape[0], num_w_blocks*query_shape[1], total_value_depth]) # slice if padding was introduced if is_padded: ret = tf.slice(ret, [0, 0, 0, 0], [-1, orig_x_shape[1], orig_x_shape[2], -1]) return ret, q, k, v
[ "def", "dot_product_unmasked_attention_local_2d_tpu_simple", "(", "x", ",", "bias", ",", "total_key_depth", ",", "total_value_depth", ",", "num_heads", ",", "query_shape", "=", "(", "8", ",", "8", ")", ",", "dropout_rate", "=", "0.0", ",", "image_shapes", "=", "None", ",", "make_image_summary", "=", "False", ",", "dropout_broadcast_dims", "=", "None", ")", ":", "# This calculation only works for self attention.", "# q, k and v must therefore have the same shape.", "orig_x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# Pad query, key, value to ensure multiple of corresponding lengths if", "# necessary", "is_padded", "=", "False", "if", "(", "orig_x_shape", "[", "1", "]", "%", "query_shape", "[", "0", "]", ")", "!=", "0", "or", "(", "orig_x_shape", "[", "2", "]", "%", "query_shape", "[", "1", "]", ")", "!=", "0", ":", "x", "=", "pad_to_multiple_2d", "(", "x", ",", "query_shape", ")", "is_padded", "=", "True", "_", ",", "height", ",", "width", ",", "depth", "=", "common_layers", ".", "shape_list", "(", "x", ")", "assert", "depth", "%", "num_heads", "==", "0", "num_h_blocks", "=", "height", "//", "query_shape", "[", "0", "]", "num_w_blocks", "=", "width", "//", "query_shape", "[", "1", "]", "# Extract center queries, keys, and values", "x_blocks", "=", "_extract_blocks", "(", "x", ",", "query_shape", "[", "0", "]", ",", "query_shape", "[", "1", "]", ")", "x_blocks", "=", "tf", ".", "reshape", "(", "x_blocks", ",", "[", "-", "1", ",", "query_shape", "[", "0", "]", "*", "query_shape", "[", "1", "]", ",", "depth", "]", ")", "q", ",", "k", ",", "v", "=", "compute_qkv", "(", "x_blocks", ",", "None", ",", "total_key_depth", ",", "total_value_depth", ")", "hsplit", "=", "lambda", "x", ":", "split_heads", "(", "x", ",", "num_heads", ")", "q", ",", "k", ",", "v", "=", "map", "(", "hsplit", ",", "[", "q", ",", "k", ",", "v", "]", ")", "logits", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "if", "bias", "is", "not", "None", ":", "logits", "+=", "bias", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "name", "=", "\"attention_weights\"", ")", "# Dropping out the attention links for each of the heads", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "dropout_broadcast_dims", ")", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "weights", ",", "image_shapes", ")", "output", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "output", "=", "combine_heads", "(", "output", ")", "# we need to get it back to shape [batch, height, width]", "ret", "=", "tf", ".", "reshape", "(", "output", ",", "[", "-", "1", ",", "num_h_blocks", ",", "num_w_blocks", ",", "query_shape", "[", "0", "]", ",", "query_shape", "[", "1", "]", ",", "total_value_depth", "]", ")", "ret", "=", "tf", ".", "transpose", "(", "ret", ",", "[", "0", ",", "1", ",", "3", ",", "2", ",", "4", ",", "5", "]", ")", "ret", "=", "tf", ".", "reshape", "(", "ret", ",", "[", "-", "1", ",", "num_h_blocks", "*", "query_shape", "[", "0", "]", ",", "num_w_blocks", "*", "query_shape", "[", "1", "]", ",", "total_value_depth", "]", ")", "# slice if padding was introduced", "if", "is_padded", ":", "ret", "=", "tf", ".", "slice", "(", "ret", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "orig_x_shape", "[", "1", "]", ",", "orig_x_shape", "[", "2", "]", ",", "-", "1", "]", ")", "return", "ret", ",", "q", ",", "k", ",", "v" ]
Calculate simple unmasked dot-product local self-attention 2d on tpu. The query, key, and value blocks are the same. We do not do a second linear transformation after computing the values Args: x: a Tensor with shape [batch, height, width, depth]. bias: bias Tensor. total_key_depth: the dimensions of the keys total_value_depth: the dimensions of the values num_heads: number of heads query_shape: a two tuple indicating query shape dropout_rate: a floating point number. image_shapes: optional tuple of integer scalars. make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. Returns: ret: [batch, height, width, total_value_depth] tensor, the output of attention. q: [batch, height, width, total_key_depth] query tensor k: [batch, height, width, total_key_depth] key tensor v: [batch, height, width, total_value_depth] value tensor
[ "Calculate", "simple", "unmasked", "dot", "-", "product", "local", "self", "-", "attention", "2d", "on", "tpu", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2540-L2615
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
masked_within_block_local_attention_1d
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None): """Attention to the source and a neighborhood to the left within a block. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position in the corresponding block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="within_local_attention_1d", values=[q, k, v]): batch, heads, length, depth_k = common_layers.shape_list(q) depth_v = common_layers.shape_list(v)[-1] if isinstance(block_length, tf.Tensor): const = tf.contrib.util.constant_value(block_length) if const is not None: block_length = int(const) # Pad query, key, value to ensure multiple of block length. original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) # Compute attention for all subsequent query blocks. num_blocks = tf.div(length, block_length) q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) # [batch, heads, num_blocks, block_length, block_length] attention = tf.matmul(q, k, transpose_b=True) attention += tf.reshape(attention_bias_lower_triangle(block_length), [1, 1, 1, block_length, block_length]) attention = tf.nn.softmax(attention) # [batch, heads, num_blocks, block_length, depth_v] output = tf.matmul(attention, v) output = tf.reshape(output, [batch, heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in (batch, heads, length, depth_v)]) return output
python
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None): """Attention to the source and a neighborhood to the left within a block. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position in the corresponding block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="within_local_attention_1d", values=[q, k, v]): batch, heads, length, depth_k = common_layers.shape_list(q) depth_v = common_layers.shape_list(v)[-1] if isinstance(block_length, tf.Tensor): const = tf.contrib.util.constant_value(block_length) if const is not None: block_length = int(const) # Pad query, key, value to ensure multiple of block length. original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) # Compute attention for all subsequent query blocks. num_blocks = tf.div(length, block_length) q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) # [batch, heads, num_blocks, block_length, block_length] attention = tf.matmul(q, k, transpose_b=True) attention += tf.reshape(attention_bias_lower_triangle(block_length), [1, 1, 1, block_length, block_length]) attention = tf.nn.softmax(attention) # [batch, heads, num_blocks, block_length, depth_v] output = tf.matmul(attention, v) output = tf.reshape(output, [batch, heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in (batch, heads, length, depth_v)]) return output
[ "def", "masked_within_block_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "64", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"within_local_attention_1d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "batch", ",", "heads", ",", "length", ",", "depth_k", "=", "common_layers", ".", "shape_list", "(", "q", ")", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "-", "1", "]", "if", "isinstance", "(", "block_length", ",", "tf", ".", "Tensor", ")", ":", "const", "=", "tf", ".", "contrib", ".", "util", ".", "constant_value", "(", "block_length", ")", "if", "const", "is", "not", "None", ":", "block_length", "=", "int", "(", "const", ")", "# Pad query, key, value to ensure multiple of block length.", "original_length", "=", "length", "padding_size", "=", "tf", ".", "mod", "(", "-", "length", ",", "block_length", ")", "length", "+=", "padding_size", "padding", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "padding_size", "]", ",", "[", "0", ",", "0", "]", "]", "q", "=", "tf", ".", "pad", "(", "q", ",", "padding", ")", "k", "=", "tf", ".", "pad", "(", "k", ",", "padding", ")", "v", "=", "tf", ".", "pad", "(", "v", ",", "padding", ")", "# Compute attention for all subsequent query blocks.", "num_blocks", "=", "tf", ".", "div", "(", "length", ",", "block_length", ")", "q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_k", "]", ")", "k", "=", "tf", ".", "reshape", "(", "k", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_k", "]", ")", "v", "=", "tf", ".", "reshape", "(", "v", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_v", "]", ")", "# [batch, heads, num_blocks, block_length, block_length]", "attention", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "attention", "+=", "tf", ".", "reshape", "(", "attention_bias_lower_triangle", "(", "block_length", ")", ",", "[", "1", ",", "1", ",", "1", ",", "block_length", ",", "block_length", "]", ")", "attention", "=", "tf", ".", "nn", ".", "softmax", "(", "attention", ")", "# [batch, heads, num_blocks, block_length, depth_v]", "output", "=", "tf", ".", "matmul", "(", "attention", ",", "v", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch", ",", "heads", ",", "-", "1", ",", "depth_v", "]", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "original_length", ",", "-", "1", "]", ")", "output", ".", "set_shape", "(", "[", "None", "if", "isinstance", "(", "dim", ",", "tf", ".", "Tensor", ")", "else", "dim", "for", "dim", "in", "(", "batch", ",", "heads", ",", "length", ",", "depth_v", ")", "]", ")", "return", "output" ]
Attention to the source and a neighborhood to the left within a block. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position in the corresponding block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v]
[ "Attention", "to", "the", "source", "and", "a", "neighborhood", "to", "the", "left", "within", "a", "block", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2618-L2671
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_relative_position_to_absolute_position_unmasked
def _relative_position_to_absolute_position_unmasked(x): """Converts tensor from relative to aboslute indexing for local attention. Args: x: a Tensor of shape [batch (or batch*num_blocks), heads, length, 2 * length - 1] Returns: A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1] """ x_shape = common_layers.shape_list(x) batch = x_shape[0] heads = x_shape[1] length = x_shape[2] # Concat columns of pad to shift from relative to absolute indexing. col_pad = tf.zeros((batch, heads, length, 1)) x = tf.concat([x, col_pad], axis=3) # Concat extra elements so to add up to shape (len+1, 2*len-1). flat_x = tf.reshape(x, [batch, heads, length * 2 * length]) flat_pad = tf.zeros((batch, heads, length-1)) flat_x_padded = tf.concat([flat_x, flat_pad], axis=2) # Reshape and slice out the padded elements. final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1]) final_x = final_x[:, :, :, length-1:] final_x = final_x[:, :, :length, :] return final_x
python
def _relative_position_to_absolute_position_unmasked(x): """Converts tensor from relative to aboslute indexing for local attention. Args: x: a Tensor of shape [batch (or batch*num_blocks), heads, length, 2 * length - 1] Returns: A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1] """ x_shape = common_layers.shape_list(x) batch = x_shape[0] heads = x_shape[1] length = x_shape[2] # Concat columns of pad to shift from relative to absolute indexing. col_pad = tf.zeros((batch, heads, length, 1)) x = tf.concat([x, col_pad], axis=3) # Concat extra elements so to add up to shape (len+1, 2*len-1). flat_x = tf.reshape(x, [batch, heads, length * 2 * length]) flat_pad = tf.zeros((batch, heads, length-1)) flat_x_padded = tf.concat([flat_x, flat_pad], axis=2) # Reshape and slice out the padded elements. final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1]) final_x = final_x[:, :, :, length-1:] final_x = final_x[:, :, :length, :] return final_x
[ "def", "_relative_position_to_absolute_position_unmasked", "(", "x", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "batch", "=", "x_shape", "[", "0", "]", "heads", "=", "x_shape", "[", "1", "]", "length", "=", "x_shape", "[", "2", "]", "# Concat columns of pad to shift from relative to absolute indexing.", "col_pad", "=", "tf", ".", "zeros", "(", "(", "batch", ",", "heads", ",", "length", ",", "1", ")", ")", "x", "=", "tf", ".", "concat", "(", "[", "x", ",", "col_pad", "]", ",", "axis", "=", "3", ")", "# Concat extra elements so to add up to shape (len+1, 2*len-1).", "flat_x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", ",", "heads", ",", "length", "*", "2", "*", "length", "]", ")", "flat_pad", "=", "tf", ".", "zeros", "(", "(", "batch", ",", "heads", ",", "length", "-", "1", ")", ")", "flat_x_padded", "=", "tf", ".", "concat", "(", "[", "flat_x", ",", "flat_pad", "]", ",", "axis", "=", "2", ")", "# Reshape and slice out the padded elements.", "final_x", "=", "tf", ".", "reshape", "(", "flat_x_padded", ",", "[", "batch", ",", "heads", ",", "length", "+", "1", ",", "2", "*", "length", "-", "1", "]", ")", "final_x", "=", "final_x", "[", ":", ",", ":", ",", ":", ",", "length", "-", "1", ":", "]", "final_x", "=", "final_x", "[", ":", ",", ":", ",", ":", "length", ",", ":", "]", "return", "final_x" ]
Converts tensor from relative to aboslute indexing for local attention. Args: x: a Tensor of shape [batch (or batch*num_blocks), heads, length, 2 * length - 1] Returns: A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1]
[ "Converts", "tensor", "from", "relative", "to", "aboslute", "indexing", "for", "local", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2674-L2701
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
masked_local_attention_1d
def masked_local_attention_1d(q, k, v, block_length=128, make_image_summary=False, dropout_rate=0., name=None): """Attention to the source position and a neighborhood to the left of it. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer make_image_summary: a boolean, whether to make an attention image summary. dropout_rate: Dropout rate for attention dropout name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="local_attention_1d", values=[q, k, v]): batch, heads, length, depth_k = common_layers.shape_list(q) depth_v = common_layers.shape_list(v)[-1] if isinstance(block_length, tf.Tensor): const = tf.contrib.util.constant_value(block_length) if const is not None: block_length = int(const) # If (length < 2 * block_length), then we use only one block. if isinstance(length, int) and isinstance(block_length, int): block_length = length if length < block_length * 2 else block_length else: block_length = tf.where( tf.less(length, block_length * 2), length, block_length) # Pad query, key, value to ensure multiple of block length. original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) if isinstance(length, int) and isinstance(block_length, int): num_blocks = length // block_length else: num_blocks = tf.div(length, block_length) # Compute attention for the first query block. first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_output = dot_product_attention( first_q, first_k, first_v, attention_bias_lower_triangle(block_length), dropout_rate=dropout_rate, make_image_summary=make_image_summary, name="first_block") # Compute attention for all subsequent query blocks. q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, block_length) local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, block_length) tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) tail_q = tf.reshape(tail_q, [batch, heads, num_blocks - 1, block_length, depth_k]) local_length = common_layers.shape_list(local_k)[3] # make sure source_pos <= target_pos good_part = common_layers.ones_matrix_band_part( block_length, local_length, -1, block_length, out_shape=[1, 1, 1, block_length, local_length]) bias = (1.0 - good_part) * -1e9 # TODO(noam): figure out how to show a summary for the remaining blocks. # The naive way currently causes errors due to empty tensors. # output: [batch, heads, num_blocks-1, block_length, depth_v] tail_output = dot_product_attention( tail_q, local_k, local_v, bias, dropout_rate=dropout_rate, make_image_summary=False, name="tail_block") tail_output = tf.reshape( tail_output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) output = tf.concat([first_output, tail_output], axis=2) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output = tf.reshape(output, [batch, heads, original_length, depth_v]) return output
python
def masked_local_attention_1d(q, k, v, block_length=128, make_image_summary=False, dropout_rate=0., name=None): """Attention to the source position and a neighborhood to the left of it. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer make_image_summary: a boolean, whether to make an attention image summary. dropout_rate: Dropout rate for attention dropout name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="local_attention_1d", values=[q, k, v]): batch, heads, length, depth_k = common_layers.shape_list(q) depth_v = common_layers.shape_list(v)[-1] if isinstance(block_length, tf.Tensor): const = tf.contrib.util.constant_value(block_length) if const is not None: block_length = int(const) # If (length < 2 * block_length), then we use only one block. if isinstance(length, int) and isinstance(block_length, int): block_length = length if length < block_length * 2 else block_length else: block_length = tf.where( tf.less(length, block_length * 2), length, block_length) # Pad query, key, value to ensure multiple of block length. original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) if isinstance(length, int) and isinstance(block_length, int): num_blocks = length // block_length else: num_blocks = tf.div(length, block_length) # Compute attention for the first query block. first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_output = dot_product_attention( first_q, first_k, first_v, attention_bias_lower_triangle(block_length), dropout_rate=dropout_rate, make_image_summary=make_image_summary, name="first_block") # Compute attention for all subsequent query blocks. q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, block_length) local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, block_length) tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) tail_q = tf.reshape(tail_q, [batch, heads, num_blocks - 1, block_length, depth_k]) local_length = common_layers.shape_list(local_k)[3] # make sure source_pos <= target_pos good_part = common_layers.ones_matrix_band_part( block_length, local_length, -1, block_length, out_shape=[1, 1, 1, block_length, local_length]) bias = (1.0 - good_part) * -1e9 # TODO(noam): figure out how to show a summary for the remaining blocks. # The naive way currently causes errors due to empty tensors. # output: [batch, heads, num_blocks-1, block_length, depth_v] tail_output = dot_product_attention( tail_q, local_k, local_v, bias, dropout_rate=dropout_rate, make_image_summary=False, name="tail_block") tail_output = tf.reshape( tail_output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) output = tf.concat([first_output, tail_output], axis=2) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output = tf.reshape(output, [batch, heads, original_length, depth_v]) return output
[ "def", "masked_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "128", ",", "make_image_summary", "=", "False", ",", "dropout_rate", "=", "0.", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"local_attention_1d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "batch", ",", "heads", ",", "length", ",", "depth_k", "=", "common_layers", ".", "shape_list", "(", "q", ")", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "-", "1", "]", "if", "isinstance", "(", "block_length", ",", "tf", ".", "Tensor", ")", ":", "const", "=", "tf", ".", "contrib", ".", "util", ".", "constant_value", "(", "block_length", ")", "if", "const", "is", "not", "None", ":", "block_length", "=", "int", "(", "const", ")", "# If (length < 2 * block_length), then we use only one block.", "if", "isinstance", "(", "length", ",", "int", ")", "and", "isinstance", "(", "block_length", ",", "int", ")", ":", "block_length", "=", "length", "if", "length", "<", "block_length", "*", "2", "else", "block_length", "else", ":", "block_length", "=", "tf", ".", "where", "(", "tf", ".", "less", "(", "length", ",", "block_length", "*", "2", ")", ",", "length", ",", "block_length", ")", "# Pad query, key, value to ensure multiple of block length.", "original_length", "=", "length", "padding_size", "=", "tf", ".", "mod", "(", "-", "length", ",", "block_length", ")", "length", "+=", "padding_size", "padding", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "padding_size", "]", ",", "[", "0", ",", "0", "]", "]", "q", "=", "tf", ".", "pad", "(", "q", ",", "padding", ")", "k", "=", "tf", ".", "pad", "(", "k", ",", "padding", ")", "v", "=", "tf", ".", "pad", "(", "v", ",", "padding", ")", "if", "isinstance", "(", "length", ",", "int", ")", "and", "isinstance", "(", "block_length", ",", "int", ")", ":", "num_blocks", "=", "length", "//", "block_length", "else", ":", "num_blocks", "=", "tf", ".", "div", "(", "length", ",", "block_length", ")", "# Compute attention for the first query block.", "first_q", "=", "tf", ".", "slice", "(", "q", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "block_length", ",", "-", "1", "]", ")", "first_k", "=", "tf", ".", "slice", "(", "k", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "block_length", ",", "-", "1", "]", ")", "first_v", "=", "tf", ".", "slice", "(", "v", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "block_length", ",", "-", "1", "]", ")", "first_output", "=", "dot_product_attention", "(", "first_q", ",", "first_k", ",", "first_v", ",", "attention_bias_lower_triangle", "(", "block_length", ")", ",", "dropout_rate", "=", "dropout_rate", ",", "make_image_summary", "=", "make_image_summary", ",", "name", "=", "\"first_block\"", ")", "# Compute attention for all subsequent query blocks.", "q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_k", "]", ")", "k", "=", "tf", ".", "reshape", "(", "k", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_k", "]", ")", "v", "=", "tf", ".", "reshape", "(", "v", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_v", "]", ")", "local_k", "=", "_make_local_block", "(", "k", ",", "depth_k", ",", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ")", "local_v", "=", "_make_local_block", "(", "v", ",", "depth_v", ",", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ")", "tail_q", "=", "tf", ".", "slice", "(", "q", ",", "[", "0", ",", "0", ",", "1", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "tail_q", "=", "tf", ".", "reshape", "(", "tail_q", ",", "[", "batch", ",", "heads", ",", "num_blocks", "-", "1", ",", "block_length", ",", "depth_k", "]", ")", "local_length", "=", "common_layers", ".", "shape_list", "(", "local_k", ")", "[", "3", "]", "# make sure source_pos <= target_pos", "good_part", "=", "common_layers", ".", "ones_matrix_band_part", "(", "block_length", ",", "local_length", ",", "-", "1", ",", "block_length", ",", "out_shape", "=", "[", "1", ",", "1", ",", "1", ",", "block_length", ",", "local_length", "]", ")", "bias", "=", "(", "1.0", "-", "good_part", ")", "*", "-", "1e9", "# TODO(noam): figure out how to show a summary for the remaining blocks.", "# The naive way currently causes errors due to empty tensors.", "# output: [batch, heads, num_blocks-1, block_length, depth_v]", "tail_output", "=", "dot_product_attention", "(", "tail_q", ",", "local_k", ",", "local_v", ",", "bias", ",", "dropout_rate", "=", "dropout_rate", ",", "make_image_summary", "=", "False", ",", "name", "=", "\"tail_block\"", ")", "tail_output", "=", "tf", ".", "reshape", "(", "tail_output", ",", "[", "batch", ",", "heads", ",", "(", "num_blocks", "-", "1", ")", "*", "block_length", ",", "depth_v", "]", ")", "output", "=", "tf", ".", "concat", "(", "[", "first_output", ",", "tail_output", "]", ",", "axis", "=", "2", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "original_length", ",", "-", "1", "]", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch", ",", "heads", ",", "original_length", ",", "depth_v", "]", ")", "return", "output" ]
Attention to the source position and a neighborhood to the left of it. The sequence is divided into blocks of length block_length. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer make_image_summary: a boolean, whether to make an attention image summary. dropout_rate: Dropout rate for attention dropout name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v]
[ "Attention", "to", "the", "source", "position", "and", "a", "neighborhood", "to", "the", "left", "of", "it", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2704-L2812
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
_make_local_block
def _make_local_block(x, depth, batch, heads, num_blocks, block_length): """Helper function to create a local version of the keys or values for 1d.""" prev_block = tf.slice(x, [0, 0, 0, 0, 0], [-1, -1, num_blocks - 1, -1, -1]) cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) local_block = tf.concat([prev_block, cur_block], 3) return tf.reshape(local_block, [batch, heads, num_blocks - 1, block_length * 2, depth])
python
def _make_local_block(x, depth, batch, heads, num_blocks, block_length): """Helper function to create a local version of the keys or values for 1d.""" prev_block = tf.slice(x, [0, 0, 0, 0, 0], [-1, -1, num_blocks - 1, -1, -1]) cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) local_block = tf.concat([prev_block, cur_block], 3) return tf.reshape(local_block, [batch, heads, num_blocks - 1, block_length * 2, depth])
[ "def", "_make_local_block", "(", "x", ",", "depth", ",", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ")", ":", "prev_block", "=", "tf", ".", "slice", "(", "x", ",", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "num_blocks", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "cur_block", "=", "tf", ".", "slice", "(", "x", ",", "[", "0", ",", "0", ",", "1", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "local_block", "=", "tf", ".", "concat", "(", "[", "prev_block", ",", "cur_block", "]", ",", "3", ")", "return", "tf", ".", "reshape", "(", "local_block", ",", "[", "batch", ",", "heads", ",", "num_blocks", "-", "1", ",", "block_length", "*", "2", ",", "depth", "]", ")" ]
Helper function to create a local version of the keys or values for 1d.
[ "Helper", "function", "to", "create", "a", "local", "version", "of", "the", "keys", "or", "values", "for", "1d", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2815-L2822
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
masked_relative_local_attention_1d
def masked_relative_local_attention_1d(q, k, v, block_length=128, make_image_summary=False, dropout_rate=0., heads_share_relative_embedding=False, add_relative_to_values=False, name=None): """Masked local 1d attention with relative positions. The sequence is divided into blocks of length block_size. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. If mask_right is True, then a target position cannot see greater source positions. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer make_image_summary: a boolean, whether to make an attention image summary. dropout_rate: Dropout rate for attention dropout heads_share_relative_embedding: a boolean for sharing relative embeddings. add_relative_to_values: a boolean for whether to add relative component to values. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] Raises: ValueError: wwhen the name for the variable scope is not passed. """ if not name: raise ValueError("Name must be assigned since reuse for variable scope is " "set to tf.AUTO_REUSE, in order to reuse relative " "embeddings of keys and values.") # Reuse flag is set to auto_reuse to reuse relative embeddings of keys and # values across blocks (first and tail blocks). with tf.variable_scope( name, default_name="masked_relative_local_attention_1d", values=[q, k, v], reuse=tf.AUTO_REUSE): default_block_length = block_length batch = common_layers.shape_list(q)[0] heads = common_layers.shape_list(q)[1] length = common_layers.shape_list(q)[2] # If (length < 2 * block_length), then we use only one block. if isinstance(length, int) and isinstance(block_length, int): block_length = length if length < block_length * 2 else block_length else: block_length = tf.where( tf.less(length, block_length * 2), length, block_length) depth_k = common_layers.shape_list(k)[3] depth_v = common_layers.shape_list(v)[3] original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) num_blocks = length // block_length # compute attention for the first query block. first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) # Relative embeddings will be used later as well. # TODO(avaswani,annahuang): check why 2*bl was breaking for music # Needs to be known at static shape inference time, hence cannot be # 2 * block_length. rel_embed_length = 4 * default_block_length # We only multiply with the needed embeddings as we slice them out. first_rel_embeddings = get_relative_embeddings_left( rel_embed_length, block_length, depth_k, heads, heads_share_relative_embedding, "relative_embeddings") first_rel_logits = matmul_with_relative_keys( first_q, first_rel_embeddings, heads_share_relative_embedding) first_logits = tf.matmul(first_q, first_k, transpose_b=True) first_logits += ( _relative_position_to_absolute_position_masked(first_rel_logits)) # adding a mask first_logits += ( common_layers.cast_like(attention_bias_lower_triangle(block_length), first_logits)) first_att = tf.nn.softmax(first_logits, name="first_attention_weights") # dropping out the attention links for each of the heads first_att = common_layers.dropout_with_broadcast_dims( first_att, 1.0 - dropout_rate, broadcast_dims=None) # only call image summary for the first block if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(first_att, None) first_output = tf.matmul(first_att, first_v) # compute attention for all subsequent query blocks. q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, block_length) local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, block_length) tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) tail_q = tf.reshape(tail_q, [batch, heads, num_blocks - 1, block_length, depth_k]) local_length = common_layers.shape_list(local_k)[3] # collapsing num blocks and batch size so that we can reuse # functions def _reshape_for_relative(x): x_shape = common_layers.shape_list(x) # [batch, num_blocks, heads, length, depth] x = tf.transpose(x, [0, 2, 1, 3, 4]) x = tf.reshape(x, [batch*x_shape[2], heads, x_shape[3], x_shape[4]]) return x rel_tail_q = _reshape_for_relative(tail_q) rel_k = _reshape_for_relative(local_k) rel_v = _reshape_for_relative(local_v) rel_embeddings = get_relative_embeddings_left( rel_embed_length, 2 * block_length, depth_k, heads, heads_share_relative_embedding, "relative_embeddings") rel_logits = matmul_with_relative_keys( rel_tail_q, rel_embeddings, heads_share_relative_embedding) # Computing relative logits separately for the masked and unmasked parts # because the reshaping logic is different for both masked_rel_logits = tf.slice(rel_logits, [0, 0, 0, block_length], [-1, -1, -1, -1]) masked_rel_logits = _relative_position_to_absolute_position_masked( masked_rel_logits) unmasked_rel_logits = tf.slice(rel_logits, [0, 0, 0, 0], [-1, -1, -1, 2*block_length-1]) unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( unmasked_rel_logits) all_rel_logits = tf.concat([unmasked_rel_logits, masked_rel_logits], axis=3) all_logits = ( tf.matmul(rel_tail_q, rel_k, transpose_b=True) + all_rel_logits) # make sure source_pos <= target_pos good_part = common_layers.ones_matrix_band_part(block_length, local_length, -1, block_length) mask = (1.0 - good_part) * -1e9 mask = common_layers.cast_like(mask, all_logits) all_logits += tf.reshape(mask, [1, 1, block_length, local_length]) weights = tf.nn.softmax(all_logits, name="attention_weights") # [batch (* num_blocks), heads, query_length (=block_length), # key_length (=2*block_length)] weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=None) output = tf.matmul(weights, rel_v) if add_relative_to_values: # Adds the contribution of the weighted relative embeddings to the values. weights_for_unmasked, weights_for_masked = ( tf.split(weights, 2, axis=3)) rel_weights_unmasked = _absolute_position_to_relative_position_unmasked( weights_for_unmasked) rel_weights_masked = _absolute_position_to_relative_position_masked( weights_for_masked) value_rel_embeddings_unmasked = get_relative_embeddings_left( rel_embed_length, 2 * block_length, depth_v, heads, heads_share_relative_embedding, "value_relative_embeddings") # The unmasked part starts with index -1 as opposed 0 has take uptil last. if heads_share_relative_embedding: value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:-1, :] else: value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:, :-1, :] value_rel_embeddings_masked = get_relative_embeddings_left( rel_embed_length, block_length, depth_v, heads, heads_share_relative_embedding, "value_relative_embeddings") # [batch (*num_blocks), heads, query length, key length] rel_weights = tf.concat( [rel_weights_unmasked, rel_weights_masked], axis=3) if heads_share_relative_embedding: value_rel_embeddings_concat_axis = 0 else: value_rel_embeddings_concat_axis = 1 value_rel_embeddings = tf.concat( [value_rel_embeddings_unmasked, value_rel_embeddings_masked], axis=value_rel_embeddings_concat_axis) output_rel = matmul_with_relative_values( rel_weights, value_rel_embeddings, heads_share_relative_embedding) output += output_rel # bring to [batch, heads, num_blocks-1, block_length, depth] output = tf.reshape(output, [batch, num_blocks-1, heads, block_length, depth_v]) output = tf.transpose(output, [0, 2, 1, 3, 4]) output = tf.reshape( output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) output = tf.concat([first_output, output], axis=2) output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output = tf.reshape(output, [batch, heads, original_length, depth_v]) return output
python
def masked_relative_local_attention_1d(q, k, v, block_length=128, make_image_summary=False, dropout_rate=0., heads_share_relative_embedding=False, add_relative_to_values=False, name=None): """Masked local 1d attention with relative positions. The sequence is divided into blocks of length block_size. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. If mask_right is True, then a target position cannot see greater source positions. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer make_image_summary: a boolean, whether to make an attention image summary. dropout_rate: Dropout rate for attention dropout heads_share_relative_embedding: a boolean for sharing relative embeddings. add_relative_to_values: a boolean for whether to add relative component to values. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] Raises: ValueError: wwhen the name for the variable scope is not passed. """ if not name: raise ValueError("Name must be assigned since reuse for variable scope is " "set to tf.AUTO_REUSE, in order to reuse relative " "embeddings of keys and values.") # Reuse flag is set to auto_reuse to reuse relative embeddings of keys and # values across blocks (first and tail blocks). with tf.variable_scope( name, default_name="masked_relative_local_attention_1d", values=[q, k, v], reuse=tf.AUTO_REUSE): default_block_length = block_length batch = common_layers.shape_list(q)[0] heads = common_layers.shape_list(q)[1] length = common_layers.shape_list(q)[2] # If (length < 2 * block_length), then we use only one block. if isinstance(length, int) and isinstance(block_length, int): block_length = length if length < block_length * 2 else block_length else: block_length = tf.where( tf.less(length, block_length * 2), length, block_length) depth_k = common_layers.shape_list(k)[3] depth_v = common_layers.shape_list(v)[3] original_length = length padding_size = tf.mod(-length, block_length) length += padding_size padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] q = tf.pad(q, padding) k = tf.pad(k, padding) v = tf.pad(v, padding) num_blocks = length // block_length # compute attention for the first query block. first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) # Relative embeddings will be used later as well. # TODO(avaswani,annahuang): check why 2*bl was breaking for music # Needs to be known at static shape inference time, hence cannot be # 2 * block_length. rel_embed_length = 4 * default_block_length # We only multiply with the needed embeddings as we slice them out. first_rel_embeddings = get_relative_embeddings_left( rel_embed_length, block_length, depth_k, heads, heads_share_relative_embedding, "relative_embeddings") first_rel_logits = matmul_with_relative_keys( first_q, first_rel_embeddings, heads_share_relative_embedding) first_logits = tf.matmul(first_q, first_k, transpose_b=True) first_logits += ( _relative_position_to_absolute_position_masked(first_rel_logits)) # adding a mask first_logits += ( common_layers.cast_like(attention_bias_lower_triangle(block_length), first_logits)) first_att = tf.nn.softmax(first_logits, name="first_attention_weights") # dropping out the attention links for each of the heads first_att = common_layers.dropout_with_broadcast_dims( first_att, 1.0 - dropout_rate, broadcast_dims=None) # only call image summary for the first block if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(first_att, None) first_output = tf.matmul(first_att, first_v) # compute attention for all subsequent query blocks. q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, block_length) local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, block_length) tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) tail_q = tf.reshape(tail_q, [batch, heads, num_blocks - 1, block_length, depth_k]) local_length = common_layers.shape_list(local_k)[3] # collapsing num blocks and batch size so that we can reuse # functions def _reshape_for_relative(x): x_shape = common_layers.shape_list(x) # [batch, num_blocks, heads, length, depth] x = tf.transpose(x, [0, 2, 1, 3, 4]) x = tf.reshape(x, [batch*x_shape[2], heads, x_shape[3], x_shape[4]]) return x rel_tail_q = _reshape_for_relative(tail_q) rel_k = _reshape_for_relative(local_k) rel_v = _reshape_for_relative(local_v) rel_embeddings = get_relative_embeddings_left( rel_embed_length, 2 * block_length, depth_k, heads, heads_share_relative_embedding, "relative_embeddings") rel_logits = matmul_with_relative_keys( rel_tail_q, rel_embeddings, heads_share_relative_embedding) # Computing relative logits separately for the masked and unmasked parts # because the reshaping logic is different for both masked_rel_logits = tf.slice(rel_logits, [0, 0, 0, block_length], [-1, -1, -1, -1]) masked_rel_logits = _relative_position_to_absolute_position_masked( masked_rel_logits) unmasked_rel_logits = tf.slice(rel_logits, [0, 0, 0, 0], [-1, -1, -1, 2*block_length-1]) unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( unmasked_rel_logits) all_rel_logits = tf.concat([unmasked_rel_logits, masked_rel_logits], axis=3) all_logits = ( tf.matmul(rel_tail_q, rel_k, transpose_b=True) + all_rel_logits) # make sure source_pos <= target_pos good_part = common_layers.ones_matrix_band_part(block_length, local_length, -1, block_length) mask = (1.0 - good_part) * -1e9 mask = common_layers.cast_like(mask, all_logits) all_logits += tf.reshape(mask, [1, 1, block_length, local_length]) weights = tf.nn.softmax(all_logits, name="attention_weights") # [batch (* num_blocks), heads, query_length (=block_length), # key_length (=2*block_length)] weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=None) output = tf.matmul(weights, rel_v) if add_relative_to_values: # Adds the contribution of the weighted relative embeddings to the values. weights_for_unmasked, weights_for_masked = ( tf.split(weights, 2, axis=3)) rel_weights_unmasked = _absolute_position_to_relative_position_unmasked( weights_for_unmasked) rel_weights_masked = _absolute_position_to_relative_position_masked( weights_for_masked) value_rel_embeddings_unmasked = get_relative_embeddings_left( rel_embed_length, 2 * block_length, depth_v, heads, heads_share_relative_embedding, "value_relative_embeddings") # The unmasked part starts with index -1 as opposed 0 has take uptil last. if heads_share_relative_embedding: value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:-1, :] else: value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:, :-1, :] value_rel_embeddings_masked = get_relative_embeddings_left( rel_embed_length, block_length, depth_v, heads, heads_share_relative_embedding, "value_relative_embeddings") # [batch (*num_blocks), heads, query length, key length] rel_weights = tf.concat( [rel_weights_unmasked, rel_weights_masked], axis=3) if heads_share_relative_embedding: value_rel_embeddings_concat_axis = 0 else: value_rel_embeddings_concat_axis = 1 value_rel_embeddings = tf.concat( [value_rel_embeddings_unmasked, value_rel_embeddings_masked], axis=value_rel_embeddings_concat_axis) output_rel = matmul_with_relative_values( rel_weights, value_rel_embeddings, heads_share_relative_embedding) output += output_rel # bring to [batch, heads, num_blocks-1, block_length, depth] output = tf.reshape(output, [batch, num_blocks-1, heads, block_length, depth_v]) output = tf.transpose(output, [0, 2, 1, 3, 4]) output = tf.reshape( output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) output = tf.concat([first_output, output], axis=2) output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output = tf.reshape(output, [batch, heads, original_length, depth_v]) return output
[ "def", "masked_relative_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "128", ",", "make_image_summary", "=", "False", ",", "dropout_rate", "=", "0.", ",", "heads_share_relative_embedding", "=", "False", ",", "add_relative_to_values", "=", "False", ",", "name", "=", "None", ")", ":", "if", "not", "name", ":", "raise", "ValueError", "(", "\"Name must be assigned since reuse for variable scope is \"", "\"set to tf.AUTO_REUSE, in order to reuse relative \"", "\"embeddings of keys and values.\"", ")", "# Reuse flag is set to auto_reuse to reuse relative embeddings of keys and", "# values across blocks (first and tail blocks).", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"masked_relative_local_attention_1d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "default_block_length", "=", "block_length", "batch", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "0", "]", "heads", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "1", "]", "length", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", "# If (length < 2 * block_length), then we use only one block.", "if", "isinstance", "(", "length", ",", "int", ")", "and", "isinstance", "(", "block_length", ",", "int", ")", ":", "block_length", "=", "length", "if", "length", "<", "block_length", "*", "2", "else", "block_length", "else", ":", "block_length", "=", "tf", ".", "where", "(", "tf", ".", "less", "(", "length", ",", "block_length", "*", "2", ")", ",", "length", ",", "block_length", ")", "depth_k", "=", "common_layers", ".", "shape_list", "(", "k", ")", "[", "3", "]", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "3", "]", "original_length", "=", "length", "padding_size", "=", "tf", ".", "mod", "(", "-", "length", ",", "block_length", ")", "length", "+=", "padding_size", "padding", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "padding_size", "]", ",", "[", "0", ",", "0", "]", "]", "q", "=", "tf", ".", "pad", "(", "q", ",", "padding", ")", "k", "=", "tf", ".", "pad", "(", "k", ",", "padding", ")", "v", "=", "tf", ".", "pad", "(", "v", ",", "padding", ")", "num_blocks", "=", "length", "//", "block_length", "# compute attention for the first query block.", "first_q", "=", "tf", ".", "slice", "(", "q", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "block_length", ",", "-", "1", "]", ")", "first_k", "=", "tf", ".", "slice", "(", "k", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "block_length", ",", "-", "1", "]", ")", "first_v", "=", "tf", ".", "slice", "(", "v", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "block_length", ",", "-", "1", "]", ")", "# Relative embeddings will be used later as well.", "# TODO(avaswani,annahuang): check why 2*bl was breaking for music", "# Needs to be known at static shape inference time, hence cannot be", "# 2 * block_length.", "rel_embed_length", "=", "4", "*", "default_block_length", "# We only multiply with the needed embeddings as we slice them out.", "first_rel_embeddings", "=", "get_relative_embeddings_left", "(", "rel_embed_length", ",", "block_length", ",", "depth_k", ",", "heads", ",", "heads_share_relative_embedding", ",", "\"relative_embeddings\"", ")", "first_rel_logits", "=", "matmul_with_relative_keys", "(", "first_q", ",", "first_rel_embeddings", ",", "heads_share_relative_embedding", ")", "first_logits", "=", "tf", ".", "matmul", "(", "first_q", ",", "first_k", ",", "transpose_b", "=", "True", ")", "first_logits", "+=", "(", "_relative_position_to_absolute_position_masked", "(", "first_rel_logits", ")", ")", "# adding a mask", "first_logits", "+=", "(", "common_layers", ".", "cast_like", "(", "attention_bias_lower_triangle", "(", "block_length", ")", ",", "first_logits", ")", ")", "first_att", "=", "tf", ".", "nn", ".", "softmax", "(", "first_logits", ",", "name", "=", "\"first_attention_weights\"", ")", "# dropping out the attention links for each of the heads", "first_att", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "first_att", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "None", ")", "# only call image summary for the first block", "if", "common_layers", ".", "should_generate_summaries", "(", ")", "and", "make_image_summary", ":", "attention_image_summary", "(", "first_att", ",", "None", ")", "first_output", "=", "tf", ".", "matmul", "(", "first_att", ",", "first_v", ")", "# compute attention for all subsequent query blocks.", "q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_k", "]", ")", "k", "=", "tf", ".", "reshape", "(", "k", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_k", "]", ")", "v", "=", "tf", ".", "reshape", "(", "v", ",", "[", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ",", "depth_v", "]", ")", "local_k", "=", "_make_local_block", "(", "k", ",", "depth_k", ",", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ")", "local_v", "=", "_make_local_block", "(", "v", ",", "depth_v", ",", "batch", ",", "heads", ",", "num_blocks", ",", "block_length", ")", "tail_q", "=", "tf", ".", "slice", "(", "q", ",", "[", "0", ",", "0", ",", "1", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "tail_q", "=", "tf", ".", "reshape", "(", "tail_q", ",", "[", "batch", ",", "heads", ",", "num_blocks", "-", "1", ",", "block_length", ",", "depth_k", "]", ")", "local_length", "=", "common_layers", ".", "shape_list", "(", "local_k", ")", "[", "3", "]", "# collapsing num blocks and batch size so that we can reuse", "# functions", "def", "_reshape_for_relative", "(", "x", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# [batch, num_blocks, heads, length, depth]", "x", "=", "tf", ".", "transpose", "(", "x", ",", "[", "0", ",", "2", ",", "1", ",", "3", ",", "4", "]", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "batch", "*", "x_shape", "[", "2", "]", ",", "heads", ",", "x_shape", "[", "3", "]", ",", "x_shape", "[", "4", "]", "]", ")", "return", "x", "rel_tail_q", "=", "_reshape_for_relative", "(", "tail_q", ")", "rel_k", "=", "_reshape_for_relative", "(", "local_k", ")", "rel_v", "=", "_reshape_for_relative", "(", "local_v", ")", "rel_embeddings", "=", "get_relative_embeddings_left", "(", "rel_embed_length", ",", "2", "*", "block_length", ",", "depth_k", ",", "heads", ",", "heads_share_relative_embedding", ",", "\"relative_embeddings\"", ")", "rel_logits", "=", "matmul_with_relative_keys", "(", "rel_tail_q", ",", "rel_embeddings", ",", "heads_share_relative_embedding", ")", "# Computing relative logits separately for the masked and unmasked parts", "# because the reshaping logic is different for both", "masked_rel_logits", "=", "tf", ".", "slice", "(", "rel_logits", ",", "[", "0", ",", "0", ",", "0", ",", "block_length", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", "]", ")", "masked_rel_logits", "=", "_relative_position_to_absolute_position_masked", "(", "masked_rel_logits", ")", "unmasked_rel_logits", "=", "tf", ".", "slice", "(", "rel_logits", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "2", "*", "block_length", "-", "1", "]", ")", "unmasked_rel_logits", "=", "_relative_position_to_absolute_position_unmasked", "(", "unmasked_rel_logits", ")", "all_rel_logits", "=", "tf", ".", "concat", "(", "[", "unmasked_rel_logits", ",", "masked_rel_logits", "]", ",", "axis", "=", "3", ")", "all_logits", "=", "(", "tf", ".", "matmul", "(", "rel_tail_q", ",", "rel_k", ",", "transpose_b", "=", "True", ")", "+", "all_rel_logits", ")", "# make sure source_pos <= target_pos", "good_part", "=", "common_layers", ".", "ones_matrix_band_part", "(", "block_length", ",", "local_length", ",", "-", "1", ",", "block_length", ")", "mask", "=", "(", "1.0", "-", "good_part", ")", "*", "-", "1e9", "mask", "=", "common_layers", ".", "cast_like", "(", "mask", ",", "all_logits", ")", "all_logits", "+=", "tf", ".", "reshape", "(", "mask", ",", "[", "1", ",", "1", ",", "block_length", ",", "local_length", "]", ")", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "all_logits", ",", "name", "=", "\"attention_weights\"", ")", "# [batch (* num_blocks), heads, query_length (=block_length),", "# key_length (=2*block_length)]", "weights", "=", "common_layers", ".", "dropout_with_broadcast_dims", "(", "weights", ",", "1.0", "-", "dropout_rate", ",", "broadcast_dims", "=", "None", ")", "output", "=", "tf", ".", "matmul", "(", "weights", ",", "rel_v", ")", "if", "add_relative_to_values", ":", "# Adds the contribution of the weighted relative embeddings to the values.", "weights_for_unmasked", ",", "weights_for_masked", "=", "(", "tf", ".", "split", "(", "weights", ",", "2", ",", "axis", "=", "3", ")", ")", "rel_weights_unmasked", "=", "_absolute_position_to_relative_position_unmasked", "(", "weights_for_unmasked", ")", "rel_weights_masked", "=", "_absolute_position_to_relative_position_masked", "(", "weights_for_masked", ")", "value_rel_embeddings_unmasked", "=", "get_relative_embeddings_left", "(", "rel_embed_length", ",", "2", "*", "block_length", ",", "depth_v", ",", "heads", ",", "heads_share_relative_embedding", ",", "\"value_relative_embeddings\"", ")", "# The unmasked part starts with index -1 as opposed 0 has take uptil last.", "if", "heads_share_relative_embedding", ":", "value_rel_embeddings_unmasked", "=", "value_rel_embeddings_unmasked", "[", ":", "-", "1", ",", ":", "]", "else", ":", "value_rel_embeddings_unmasked", "=", "value_rel_embeddings_unmasked", "[", ":", ",", ":", "-", "1", ",", ":", "]", "value_rel_embeddings_masked", "=", "get_relative_embeddings_left", "(", "rel_embed_length", ",", "block_length", ",", "depth_v", ",", "heads", ",", "heads_share_relative_embedding", ",", "\"value_relative_embeddings\"", ")", "# [batch (*num_blocks), heads, query length, key length]", "rel_weights", "=", "tf", ".", "concat", "(", "[", "rel_weights_unmasked", ",", "rel_weights_masked", "]", ",", "axis", "=", "3", ")", "if", "heads_share_relative_embedding", ":", "value_rel_embeddings_concat_axis", "=", "0", "else", ":", "value_rel_embeddings_concat_axis", "=", "1", "value_rel_embeddings", "=", "tf", ".", "concat", "(", "[", "value_rel_embeddings_unmasked", ",", "value_rel_embeddings_masked", "]", ",", "axis", "=", "value_rel_embeddings_concat_axis", ")", "output_rel", "=", "matmul_with_relative_values", "(", "rel_weights", ",", "value_rel_embeddings", ",", "heads_share_relative_embedding", ")", "output", "+=", "output_rel", "# bring to [batch, heads, num_blocks-1, block_length, depth]", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch", ",", "num_blocks", "-", "1", ",", "heads", ",", "block_length", ",", "depth_v", "]", ")", "output", "=", "tf", ".", "transpose", "(", "output", ",", "[", "0", ",", "2", ",", "1", ",", "3", ",", "4", "]", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch", ",", "heads", ",", "(", "num_blocks", "-", "1", ")", "*", "block_length", ",", "depth_v", "]", ")", "output", "=", "tf", ".", "concat", "(", "[", "first_output", ",", "output", "]", ",", "axis", "=", "2", ")", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "original_length", ",", "-", "1", "]", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch", ",", "heads", ",", "original_length", ",", "depth_v", "]", ")", "return", "output" ]
Masked local 1d attention with relative positions. The sequence is divided into blocks of length block_size. Attention for a given query position can only see memory positions less than or equal to the query position, in the corresponding block and the previous block. If mask_right is True, then a target position cannot see greater source positions. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer make_image_summary: a boolean, whether to make an attention image summary. dropout_rate: Dropout rate for attention dropout heads_share_relative_embedding: a boolean for sharing relative embeddings. add_relative_to_values: a boolean for whether to add relative component to values. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] Raises: ValueError: wwhen the name for the variable scope is not passed.
[ "Masked", "local", "1d", "attention", "with", "relative", "positions", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L2825-L3033
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
local_attention_1d
def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None): """Strided block local self-attention. The sequence is divided into blocks of length block_length. Attention for a given query position can see all memory positions in the corresponding block and filter_width many positions to the left and right of the block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer filter_width: an integer indicating how much to look left and right of the block. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="local_self_attention_1d", values=[q, k, v]): # Check that q, k, v have the same shape except in their depth dimension. q.get_shape()[:-1].assert_is_compatible_with(k.get_shape()[:-1]) q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) batch_size, num_heads, original_length, _ = common_layers.shape_list(q) # Pad query, key, value to ensure multiple of corresponding lengths. def pad_to_multiple(x, pad_length): x_length = common_layers.shape_list(x)[2] return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) def pad_l_and_r(x, pad_length): return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) # Set up query blocks. # [batch, heads, blocks_q, block_length, depth_k] q = pad_to_multiple(q, block_length) q = reshape_by_blocks(q, common_layers.shape_list(q), block_length) total_query_blocks = common_layers.shape_list(q)[2] # Set up key and value blocks. # [batch, heads, blocks_k, block_length, depth_k] blocks_per_filter_width = filter_width // block_length remaining_items = filter_width % block_length k = pad_to_multiple(k, block_length) v = pad_to_multiple(v, block_length) k = pad_l_and_r(k, filter_width + block_length - remaining_items) v = pad_l_and_r(v, filter_width + block_length - remaining_items) k = reshape_by_blocks(k, common_layers.shape_list(k), block_length) v = reshape_by_blocks(v, common_layers.shape_list(v), block_length) total_kv_blocks = common_layers.shape_list(k)[2] slices = [] # prepare the left-most and right-most partial blocks if needed if remaining_items: first_partial_block_k = tf.slice( k, [0, 0, 0, block_length - remaining_items, 0], [-1, -1, total_query_blocks, -1, -1]) first_partial_block_v = tf.slice( v, [0, 0, 0, block_length - remaining_items, 0], [-1, -1, total_query_blocks, -1, -1]) last_partial_block_k = tf.slice( k, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], [-1, -1, -1, remaining_items, -1]) last_partial_block_v = tf.slice( v, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], [-1, -1, -1, remaining_items, -1]) slices.append((first_partial_block_k, first_partial_block_v)) slices.append((last_partial_block_k, last_partial_block_v)) # Prepare the rest of the blocks first_block_index = 1 if remaining_items else 0 attention_blocks = 2 * blocks_per_filter_width + 1 for i in range(first_block_index, attention_blocks + first_block_index): block_k = tf.slice(k, [0, 0, i, 0, 0], [-1, -1, total_query_blocks, -1, -1]) block_v = tf.slice(v, [0, 0, i, 0, 0], [-1, -1, total_query_blocks, -1, -1]) slices.append((block_k, block_v)) # [batch, heads, blocks_q, block_length + 2 * filter_width, depth_k] k = tf.concat([s[0] for s in slices], axis=3) v = tf.concat([s[1] for s in slices], axis=3) attention_bias = tf.expand_dims(embedding_to_padding(k) * -1e9, axis=-2) depth_v = common_layers.shape_list(v)[-1] output = dot_product_attention( q, k, v, attention_bias, dropout_rate=0., name="local_1d", make_image_summary=False) output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in (batch_size, num_heads, original_length, depth_v)]) return output
python
def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None): """Strided block local self-attention. The sequence is divided into blocks of length block_length. Attention for a given query position can see all memory positions in the corresponding block and filter_width many positions to the left and right of the block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer filter_width: an integer indicating how much to look left and right of the block. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v] """ with tf.variable_scope( name, default_name="local_self_attention_1d", values=[q, k, v]): # Check that q, k, v have the same shape except in their depth dimension. q.get_shape()[:-1].assert_is_compatible_with(k.get_shape()[:-1]) q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) batch_size, num_heads, original_length, _ = common_layers.shape_list(q) # Pad query, key, value to ensure multiple of corresponding lengths. def pad_to_multiple(x, pad_length): x_length = common_layers.shape_list(x)[2] return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) def pad_l_and_r(x, pad_length): return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) # Set up query blocks. # [batch, heads, blocks_q, block_length, depth_k] q = pad_to_multiple(q, block_length) q = reshape_by_blocks(q, common_layers.shape_list(q), block_length) total_query_blocks = common_layers.shape_list(q)[2] # Set up key and value blocks. # [batch, heads, blocks_k, block_length, depth_k] blocks_per_filter_width = filter_width // block_length remaining_items = filter_width % block_length k = pad_to_multiple(k, block_length) v = pad_to_multiple(v, block_length) k = pad_l_and_r(k, filter_width + block_length - remaining_items) v = pad_l_and_r(v, filter_width + block_length - remaining_items) k = reshape_by_blocks(k, common_layers.shape_list(k), block_length) v = reshape_by_blocks(v, common_layers.shape_list(v), block_length) total_kv_blocks = common_layers.shape_list(k)[2] slices = [] # prepare the left-most and right-most partial blocks if needed if remaining_items: first_partial_block_k = tf.slice( k, [0, 0, 0, block_length - remaining_items, 0], [-1, -1, total_query_blocks, -1, -1]) first_partial_block_v = tf.slice( v, [0, 0, 0, block_length - remaining_items, 0], [-1, -1, total_query_blocks, -1, -1]) last_partial_block_k = tf.slice( k, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], [-1, -1, -1, remaining_items, -1]) last_partial_block_v = tf.slice( v, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], [-1, -1, -1, remaining_items, -1]) slices.append((first_partial_block_k, first_partial_block_v)) slices.append((last_partial_block_k, last_partial_block_v)) # Prepare the rest of the blocks first_block_index = 1 if remaining_items else 0 attention_blocks = 2 * blocks_per_filter_width + 1 for i in range(first_block_index, attention_blocks + first_block_index): block_k = tf.slice(k, [0, 0, i, 0, 0], [-1, -1, total_query_blocks, -1, -1]) block_v = tf.slice(v, [0, 0, i, 0, 0], [-1, -1, total_query_blocks, -1, -1]) slices.append((block_k, block_v)) # [batch, heads, blocks_q, block_length + 2 * filter_width, depth_k] k = tf.concat([s[0] for s in slices], axis=3) v = tf.concat([s[1] for s in slices], axis=3) attention_bias = tf.expand_dims(embedding_to_padding(k) * -1e9, axis=-2) depth_v = common_layers.shape_list(v)[-1] output = dot_product_attention( q, k, v, attention_bias, dropout_rate=0., name="local_1d", make_image_summary=False) output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in (batch_size, num_heads, original_length, depth_v)]) return output
[ "def", "local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "128", ",", "filter_width", "=", "100", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"local_self_attention_1d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "# Check that q, k, v have the same shape except in their depth dimension.", "q", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ".", "assert_is_compatible_with", "(", "k", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ")", "q", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ".", "assert_is_compatible_with", "(", "v", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ")", "batch_size", ",", "num_heads", ",", "original_length", ",", "_", "=", "common_layers", ".", "shape_list", "(", "q", ")", "# Pad query, key, value to ensure multiple of corresponding lengths.", "def", "pad_to_multiple", "(", "x", ",", "pad_length", ")", ":", "x_length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "return", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "-", "x_length", "%", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "def", "pad_l_and_r", "(", "x", ",", "pad_length", ")", ":", "return", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "pad_length", ",", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "# Set up query blocks.", "# [batch, heads, blocks_q, block_length, depth_k]", "q", "=", "pad_to_multiple", "(", "q", ",", "block_length", ")", "q", "=", "reshape_by_blocks", "(", "q", ",", "common_layers", ".", "shape_list", "(", "q", ")", ",", "block_length", ")", "total_query_blocks", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", "# Set up key and value blocks.", "# [batch, heads, blocks_k, block_length, depth_k]", "blocks_per_filter_width", "=", "filter_width", "//", "block_length", "remaining_items", "=", "filter_width", "%", "block_length", "k", "=", "pad_to_multiple", "(", "k", ",", "block_length", ")", "v", "=", "pad_to_multiple", "(", "v", ",", "block_length", ")", "k", "=", "pad_l_and_r", "(", "k", ",", "filter_width", "+", "block_length", "-", "remaining_items", ")", "v", "=", "pad_l_and_r", "(", "v", ",", "filter_width", "+", "block_length", "-", "remaining_items", ")", "k", "=", "reshape_by_blocks", "(", "k", ",", "common_layers", ".", "shape_list", "(", "k", ")", ",", "block_length", ")", "v", "=", "reshape_by_blocks", "(", "v", ",", "common_layers", ".", "shape_list", "(", "v", ")", ",", "block_length", ")", "total_kv_blocks", "=", "common_layers", ".", "shape_list", "(", "k", ")", "[", "2", "]", "slices", "=", "[", "]", "# prepare the left-most and right-most partial blocks if needed", "if", "remaining_items", ":", "first_partial_block_k", "=", "tf", ".", "slice", "(", "k", ",", "[", "0", ",", "0", ",", "0", ",", "block_length", "-", "remaining_items", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "total_query_blocks", ",", "-", "1", ",", "-", "1", "]", ")", "first_partial_block_v", "=", "tf", ".", "slice", "(", "v", ",", "[", "0", ",", "0", ",", "0", ",", "block_length", "-", "remaining_items", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "total_query_blocks", ",", "-", "1", ",", "-", "1", "]", ")", "last_partial_block_k", "=", "tf", ".", "slice", "(", "k", ",", "[", "0", ",", "0", ",", "total_kv_blocks", "-", "total_query_blocks", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "remaining_items", ",", "-", "1", "]", ")", "last_partial_block_v", "=", "tf", ".", "slice", "(", "v", ",", "[", "0", ",", "0", ",", "total_kv_blocks", "-", "total_query_blocks", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "-", "1", ",", "remaining_items", ",", "-", "1", "]", ")", "slices", ".", "append", "(", "(", "first_partial_block_k", ",", "first_partial_block_v", ")", ")", "slices", ".", "append", "(", "(", "last_partial_block_k", ",", "last_partial_block_v", ")", ")", "# Prepare the rest of the blocks", "first_block_index", "=", "1", "if", "remaining_items", "else", "0", "attention_blocks", "=", "2", "*", "blocks_per_filter_width", "+", "1", "for", "i", "in", "range", "(", "first_block_index", ",", "attention_blocks", "+", "first_block_index", ")", ":", "block_k", "=", "tf", ".", "slice", "(", "k", ",", "[", "0", ",", "0", ",", "i", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "total_query_blocks", ",", "-", "1", ",", "-", "1", "]", ")", "block_v", "=", "tf", ".", "slice", "(", "v", ",", "[", "0", ",", "0", ",", "i", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "total_query_blocks", ",", "-", "1", ",", "-", "1", "]", ")", "slices", ".", "append", "(", "(", "block_k", ",", "block_v", ")", ")", "# [batch, heads, blocks_q, block_length + 2 * filter_width, depth_k]", "k", "=", "tf", ".", "concat", "(", "[", "s", "[", "0", "]", "for", "s", "in", "slices", "]", ",", "axis", "=", "3", ")", "v", "=", "tf", ".", "concat", "(", "[", "s", "[", "1", "]", "for", "s", "in", "slices", "]", ",", "axis", "=", "3", ")", "attention_bias", "=", "tf", ".", "expand_dims", "(", "embedding_to_padding", "(", "k", ")", "*", "-", "1e9", ",", "axis", "=", "-", "2", ")", "depth_v", "=", "common_layers", ".", "shape_list", "(", "v", ")", "[", "-", "1", "]", "output", "=", "dot_product_attention", "(", "q", ",", "k", ",", "v", ",", "attention_bias", ",", "dropout_rate", "=", "0.", ",", "name", "=", "\"local_1d\"", ",", "make_image_summary", "=", "False", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch_size", ",", "num_heads", ",", "-", "1", ",", "depth_v", "]", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "original_length", ",", "-", "1", "]", ")", "output", ".", "set_shape", "(", "[", "None", "if", "isinstance", "(", "dim", ",", "tf", ".", "Tensor", ")", "else", "dim", "for", "dim", "in", "(", "batch_size", ",", "num_heads", ",", "original_length", ",", "depth_v", ")", "]", ")", "return", "output" ]
Strided block local self-attention. The sequence is divided into blocks of length block_length. Attention for a given query position can see all memory positions in the corresponding block and filter_width many positions to the left and right of the block. Args: q: a Tensor with shape [batch, heads, length, depth_k] k: a Tensor with shape [batch, heads, length, depth_k] v: a Tensor with shape [batch, heads, length, depth_v] block_length: an integer filter_width: an integer indicating how much to look left and right of the block. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth_v]
[ "Strided", "block", "local", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3052-L3154
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
reshape_by_blocks
def reshape_by_blocks(x, x_shape, memory_block_size): """Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth]. """ x = tf.reshape(x, [ x_shape[0], x_shape[1], x_shape[2] // memory_block_size, memory_block_size, x_shape[3] ]) return x
python
def reshape_by_blocks(x, x_shape, memory_block_size): """Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth]. """ x = tf.reshape(x, [ x_shape[0], x_shape[1], x_shape[2] // memory_block_size, memory_block_size, x_shape[3] ]) return x
[ "def", "reshape_by_blocks", "(", "x", ",", "x_shape", ",", "memory_block_size", ")", ":", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", ",", "x_shape", "[", "2", "]", "//", "memory_block_size", ",", "memory_block_size", ",", "x_shape", "[", "3", "]", "]", ")", "return", "x" ]
Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth].
[ "Reshapes", "input", "by", "splitting", "its", "length", "over", "blocks", "of", "memory_block_size", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3157-L3173
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
dilated_self_attention_1d
def dilated_self_attention_1d(q, k, v, query_block_size=128, memory_block_size=128, gap_size=2, num_memory_blocks=2, name=None): """Dilated self-attention. Args: q: a Tensor with shape [batch, heads, length, depth] k: a Tensor with shape [batch, heads, length, depth] v: a Tensor with shape [batch, heads, length, depth] query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gap_size: an integer indicating the gap size num_memory_blocks: how many memory blocks to look at to the left and right. Each will be separated by gap_size. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth] """ with tf.variable_scope( name, default_name="dilated_self_attention_1d", values=[q, k, v]): v_list_shape = v.get_shape().as_list() assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" v_shape = common_layers.shape_list(v) depth_v = v_shape[3] batch_size = v_shape[0] num_heads = v_shape[1] original_length = common_layers.shape_list(q)[2] # Pad query, key, value to ensure multiple of corresponding lengths. def pad_to_multiple(x, pad_length): x_length = common_layers.shape_list(x)[2] return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) def pad_l_and_r(x, pad_length): return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) q = pad_to_multiple(q, query_block_size) v = pad_to_multiple(v, query_block_size) k = pad_to_multiple(k, query_block_size) # Set up query blocks. new_q_shape = common_layers.shape_list(q) q = reshape_by_blocks(q, new_q_shape, query_block_size) self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) # Set up key and value windows. k_v_padding = (gap_size + memory_block_size) * num_memory_blocks k = pad_l_and_r(k, k_v_padding) v = pad_l_and_r(v, k_v_padding) # Get gather indices. index_length = (new_q_shape[2] - query_block_size + memory_block_size) indices = tf.range(0, index_length, delta=1, name="index_range") indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) gather_indices = tf.nn.conv1d( tf.cast(indices, tf.float32), kernel, query_block_size, padding="VALID", name="gather_conv") gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) # Get left and right memory blocks for each query. # [length, batch, heads, dim] k_t = tf.transpose(k, [2, 0, 1, 3]) v_t = tf.transpose(v, [2, 0, 1, 3]) left_k = gather_dilated_memory_blocks( k_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) left_v = gather_dilated_memory_blocks( v_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) right_k = gather_dilated_memory_blocks( k_t[k_v_padding:, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction="right") right_v = gather_dilated_memory_blocks( v_t[k_v_padding:, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction="right") k_windows = tf.concat([left_k, self_k_part, right_k], axis=3) v_windows = tf.concat([left_v, self_v_part, right_v], axis=3) attention_bias = tf.expand_dims( embedding_to_padding(k_windows) * -1e9, axis=-2) output = dot_product_attention( q, k_windows, v_windows, attention_bias, dropout_rate=0., name="dilated_1d", make_image_summary=False) output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape(v_list_shape) return output
python
def dilated_self_attention_1d(q, k, v, query_block_size=128, memory_block_size=128, gap_size=2, num_memory_blocks=2, name=None): """Dilated self-attention. Args: q: a Tensor with shape [batch, heads, length, depth] k: a Tensor with shape [batch, heads, length, depth] v: a Tensor with shape [batch, heads, length, depth] query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gap_size: an integer indicating the gap size num_memory_blocks: how many memory blocks to look at to the left and right. Each will be separated by gap_size. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth] """ with tf.variable_scope( name, default_name="dilated_self_attention_1d", values=[q, k, v]): v_list_shape = v.get_shape().as_list() assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" v_shape = common_layers.shape_list(v) depth_v = v_shape[3] batch_size = v_shape[0] num_heads = v_shape[1] original_length = common_layers.shape_list(q)[2] # Pad query, key, value to ensure multiple of corresponding lengths. def pad_to_multiple(x, pad_length): x_length = common_layers.shape_list(x)[2] return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) def pad_l_and_r(x, pad_length): return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) q = pad_to_multiple(q, query_block_size) v = pad_to_multiple(v, query_block_size) k = pad_to_multiple(k, query_block_size) # Set up query blocks. new_q_shape = common_layers.shape_list(q) q = reshape_by_blocks(q, new_q_shape, query_block_size) self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) # Set up key and value windows. k_v_padding = (gap_size + memory_block_size) * num_memory_blocks k = pad_l_and_r(k, k_v_padding) v = pad_l_and_r(v, k_v_padding) # Get gather indices. index_length = (new_q_shape[2] - query_block_size + memory_block_size) indices = tf.range(0, index_length, delta=1, name="index_range") indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) gather_indices = tf.nn.conv1d( tf.cast(indices, tf.float32), kernel, query_block_size, padding="VALID", name="gather_conv") gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) # Get left and right memory blocks for each query. # [length, batch, heads, dim] k_t = tf.transpose(k, [2, 0, 1, 3]) v_t = tf.transpose(v, [2, 0, 1, 3]) left_k = gather_dilated_memory_blocks( k_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) left_v = gather_dilated_memory_blocks( v_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) right_k = gather_dilated_memory_blocks( k_t[k_v_padding:, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction="right") right_v = gather_dilated_memory_blocks( v_t[k_v_padding:, :, :, :], num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction="right") k_windows = tf.concat([left_k, self_k_part, right_k], axis=3) v_windows = tf.concat([left_v, self_v_part, right_v], axis=3) attention_bias = tf.expand_dims( embedding_to_padding(k_windows) * -1e9, axis=-2) output = dot_product_attention( q, k_windows, v_windows, attention_bias, dropout_rate=0., name="dilated_1d", make_image_summary=False) output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape(v_list_shape) return output
[ "def", "dilated_self_attention_1d", "(", "q", ",", "k", ",", "v", ",", "query_block_size", "=", "128", ",", "memory_block_size", "=", "128", ",", "gap_size", "=", "2", ",", "num_memory_blocks", "=", "2", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dilated_self_attention_1d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "v_list_shape", "=", "v", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "assert", "v_list_shape", "==", "k", ".", "shape", ".", "as_list", "(", ")", ",", "\"K and V depths must be equal\"", "v_shape", "=", "common_layers", ".", "shape_list", "(", "v", ")", "depth_v", "=", "v_shape", "[", "3", "]", "batch_size", "=", "v_shape", "[", "0", "]", "num_heads", "=", "v_shape", "[", "1", "]", "original_length", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", "# Pad query, key, value to ensure multiple of corresponding lengths.", "def", "pad_to_multiple", "(", "x", ",", "pad_length", ")", ":", "x_length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "return", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "-", "x_length", "%", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "def", "pad_l_and_r", "(", "x", ",", "pad_length", ")", ":", "return", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "pad_length", ",", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "q", "=", "pad_to_multiple", "(", "q", ",", "query_block_size", ")", "v", "=", "pad_to_multiple", "(", "v", ",", "query_block_size", ")", "k", "=", "pad_to_multiple", "(", "k", ",", "query_block_size", ")", "# Set up query blocks.", "new_q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "q", "=", "reshape_by_blocks", "(", "q", ",", "new_q_shape", ",", "query_block_size", ")", "self_k_part", "=", "reshape_by_blocks", "(", "k", ",", "new_q_shape", ",", "query_block_size", ")", "self_v_part", "=", "reshape_by_blocks", "(", "v", ",", "new_q_shape", ",", "query_block_size", ")", "# Set up key and value windows.", "k_v_padding", "=", "(", "gap_size", "+", "memory_block_size", ")", "*", "num_memory_blocks", "k", "=", "pad_l_and_r", "(", "k", ",", "k_v_padding", ")", "v", "=", "pad_l_and_r", "(", "v", ",", "k_v_padding", ")", "# Get gather indices.", "index_length", "=", "(", "new_q_shape", "[", "2", "]", "-", "query_block_size", "+", "memory_block_size", ")", "indices", "=", "tf", ".", "range", "(", "0", ",", "index_length", ",", "delta", "=", "1", ",", "name", "=", "\"index_range\"", ")", "indices", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "1", ",", "-", "1", ",", "1", "]", ")", "# [1, length, 1] for convs", "kernel", "=", "tf", ".", "expand_dims", "(", "tf", ".", "eye", "(", "memory_block_size", ")", ",", "axis", "=", "1", ")", "gather_indices", "=", "tf", ".", "nn", ".", "conv1d", "(", "tf", ".", "cast", "(", "indices", ",", "tf", ".", "float32", ")", ",", "kernel", ",", "query_block_size", ",", "padding", "=", "\"VALID\"", ",", "name", "=", "\"gather_conv\"", ")", "gather_indices", "=", "tf", ".", "squeeze", "(", "tf", ".", "cast", "(", "gather_indices", ",", "tf", ".", "int32", ")", ",", "axis", "=", "0", ")", "# Get left and right memory blocks for each query.", "# [length, batch, heads, dim]", "k_t", "=", "tf", ".", "transpose", "(", "k", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "v_t", "=", "tf", ".", "transpose", "(", "v", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "left_k", "=", "gather_dilated_memory_blocks", "(", "k_t", "[", ":", "-", "k_v_padding", ",", ":", ",", ":", ",", ":", "]", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ")", "left_v", "=", "gather_dilated_memory_blocks", "(", "v_t", "[", ":", "-", "k_v_padding", ",", ":", ",", ":", ",", ":", "]", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ")", "right_k", "=", "gather_dilated_memory_blocks", "(", "k_t", "[", "k_v_padding", ":", ",", ":", ",", ":", ",", ":", "]", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ",", "direction", "=", "\"right\"", ")", "right_v", "=", "gather_dilated_memory_blocks", "(", "v_t", "[", "k_v_padding", ":", ",", ":", ",", ":", ",", ":", "]", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ",", "direction", "=", "\"right\"", ")", "k_windows", "=", "tf", ".", "concat", "(", "[", "left_k", ",", "self_k_part", ",", "right_k", "]", ",", "axis", "=", "3", ")", "v_windows", "=", "tf", ".", "concat", "(", "[", "left_v", ",", "self_v_part", ",", "right_v", "]", ",", "axis", "=", "3", ")", "attention_bias", "=", "tf", ".", "expand_dims", "(", "embedding_to_padding", "(", "k_windows", ")", "*", "-", "1e9", ",", "axis", "=", "-", "2", ")", "output", "=", "dot_product_attention", "(", "q", ",", "k_windows", ",", "v_windows", ",", "attention_bias", ",", "dropout_rate", "=", "0.", ",", "name", "=", "\"dilated_1d\"", ",", "make_image_summary", "=", "False", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch_size", ",", "num_heads", ",", "-", "1", ",", "depth_v", "]", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "original_length", ",", "-", "1", "]", ")", "output", ".", "set_shape", "(", "v_list_shape", ")", "return", "output" ]
Dilated self-attention. Args: q: a Tensor with shape [batch, heads, length, depth] k: a Tensor with shape [batch, heads, length, depth] v: a Tensor with shape [batch, heads, length, depth] query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gap_size: an integer indicating the gap size num_memory_blocks: how many memory blocks to look at to the left and right. Each will be separated by gap_size. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth]
[ "Dilated", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3176-L3293
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
gather_dilated_memory_blocks
def gather_dilated_memory_blocks(x, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction="left"): """Gathers blocks with gaps in between. Args: x: Tensor of shape [length, batch, heads, depth] num_memory_blocks: how many memory blocks to look in "direction". Each will be separated by gap_size. gap_size: an integer indicating the gap size query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gather_indices: The indices to gather from. direction: left or right Returns: Tensor of shape [batch, heads, blocks, block_length, depth] """ gathered_blocks = [] # gathering memory blocks for block_id in range(num_memory_blocks): block_end_index = -(query_block_size + gap_size * (block_id + 1) + memory_block_size * block_id) block_start_index = ( (memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1))) if direction != "left": [block_end_index, block_start_index] = [-block_start_index, -block_end_index] if block_end_index == 0: x_block = x[block_start_index:] else: x_block = x[block_start_index:block_end_index] def gather_dilated_1d_blocks(x, gather_indices): x_new = tf.gather(x, gather_indices) # [batch, heads, blocks, block_length, dim] return tf.transpose(x_new, [2, 3, 0, 1, 4]) gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices)) return tf.concat(gathered_blocks, 3)
python
def gather_dilated_memory_blocks(x, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices, direction="left"): """Gathers blocks with gaps in between. Args: x: Tensor of shape [length, batch, heads, depth] num_memory_blocks: how many memory blocks to look in "direction". Each will be separated by gap_size. gap_size: an integer indicating the gap size query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gather_indices: The indices to gather from. direction: left or right Returns: Tensor of shape [batch, heads, blocks, block_length, depth] """ gathered_blocks = [] # gathering memory blocks for block_id in range(num_memory_blocks): block_end_index = -(query_block_size + gap_size * (block_id + 1) + memory_block_size * block_id) block_start_index = ( (memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1))) if direction != "left": [block_end_index, block_start_index] = [-block_start_index, -block_end_index] if block_end_index == 0: x_block = x[block_start_index:] else: x_block = x[block_start_index:block_end_index] def gather_dilated_1d_blocks(x, gather_indices): x_new = tf.gather(x, gather_indices) # [batch, heads, blocks, block_length, dim] return tf.transpose(x_new, [2, 3, 0, 1, 4]) gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices)) return tf.concat(gathered_blocks, 3)
[ "def", "gather_dilated_memory_blocks", "(", "x", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ",", "direction", "=", "\"left\"", ")", ":", "gathered_blocks", "=", "[", "]", "# gathering memory blocks", "for", "block_id", "in", "range", "(", "num_memory_blocks", ")", ":", "block_end_index", "=", "-", "(", "query_block_size", "+", "gap_size", "*", "(", "block_id", "+", "1", ")", "+", "memory_block_size", "*", "block_id", ")", "block_start_index", "=", "(", "(", "memory_block_size", "+", "gap_size", ")", "*", "(", "num_memory_blocks", "-", "(", "block_id", "+", "1", ")", ")", ")", "if", "direction", "!=", "\"left\"", ":", "[", "block_end_index", ",", "block_start_index", "]", "=", "[", "-", "block_start_index", ",", "-", "block_end_index", "]", "if", "block_end_index", "==", "0", ":", "x_block", "=", "x", "[", "block_start_index", ":", "]", "else", ":", "x_block", "=", "x", "[", "block_start_index", ":", "block_end_index", "]", "def", "gather_dilated_1d_blocks", "(", "x", ",", "gather_indices", ")", ":", "x_new", "=", "tf", ".", "gather", "(", "x", ",", "gather_indices", ")", "# [batch, heads, blocks, block_length, dim]", "return", "tf", ".", "transpose", "(", "x_new", ",", "[", "2", ",", "3", ",", "0", ",", "1", ",", "4", "]", ")", "gathered_blocks", ".", "append", "(", "gather_dilated_1d_blocks", "(", "x_block", ",", "gather_indices", ")", ")", "return", "tf", ".", "concat", "(", "gathered_blocks", ",", "3", ")" ]
Gathers blocks with gaps in between. Args: x: Tensor of shape [length, batch, heads, depth] num_memory_blocks: how many memory blocks to look in "direction". Each will be separated by gap_size. gap_size: an integer indicating the gap size query_block_size: an integer indicating size of query block memory_block_size: an integer indicating the size of a memory block. gather_indices: The indices to gather from. direction: left or right Returns: Tensor of shape [batch, heads, blocks, block_length, depth]
[ "Gathers", "blocks", "with", "gaps", "in", "between", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3296-L3339
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
masked_dilated_self_attention_1d
def masked_dilated_self_attention_1d(q, k, v, query_block_size=64, memory_block_size=64, gap_size=2, num_memory_blocks=2, name=None): """Dilated self-attention. TODO(avaswani): Try it and write a paper on it. Args: q: a Tensor with shape [batch, heads, length, depth] k: a Tensor with shape [batch, heads, length, depth] v: a Tensor with shape [batch, heads, length, depth] query_block_size: an integer memory_block_size: an integer indicating how much to look left. gap_size: an integer indicating the gap size num_memory_blocks: how many memory blocks to look at to the left. Each will be separated by gap_size. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth] """ with tf.variable_scope( name, default_name="masked_dilated_self_attention_1d", values=[q, k, v]): v_list_shape = v.get_shape().as_list() assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" v_shape = common_layers.shape_list(v) depth_v = v_shape[3] batch_size = v_shape[0] num_heads = v_shape[1] original_length = common_layers.shape_list(q)[2] # Pad query, key, value to ensure multiple of corresponding lengths. def pad_to_multiple(x, pad_length): x_length = common_layers.shape_list(x)[2] return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) def pad_l(x, left_pad_length): return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]]) q = pad_to_multiple(q, query_block_size) v = pad_to_multiple(v, query_block_size) k = pad_to_multiple(k, query_block_size) # Set up query blocks. new_q_shape = common_layers.shape_list(q) q = reshape_by_blocks(q, new_q_shape, query_block_size) # Set up key and value windows. self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) k_v_padding = (gap_size + memory_block_size) * num_memory_blocks k = pad_l(k, k_v_padding) v = pad_l(v, k_v_padding) # Get gather indices. index_length = (new_q_shape[2] - query_block_size + memory_block_size) indices = tf.range(0, index_length, delta=1, name="index_range") indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) gather_indices = tf.nn.conv1d( tf.cast(indices, tf.float32), kernel, query_block_size, padding="VALID", name="gather_conv") gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) # Get left and right memory blocks for each query. # [length, batch, heads, dim] k_t = tf.transpose(k, [2, 0, 1, 3]) v_t = tf.transpose(v, [2, 0, 1, 3]) k_unmasked_windows = gather_dilated_memory_blocks( k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) v_unmasked_windows = gather_dilated_memory_blocks( v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) # Combine memory windows. block_q_shape = common_layers.shape_list(q) masked_attention_bias = tf.tile( tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0), [block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1]) padding_attention_bias = tf.expand_dims( embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2) padding_attention_bias = tf.tile(padding_attention_bias, [1, 1, 1, query_block_size, 1]) attention_bias = tf.concat( [masked_attention_bias, padding_attention_bias], axis=-1) # combine memory windows k_windows = tf.concat([self_k_part, k_unmasked_windows], 3) v_windows = tf.concat([self_v_part, v_unmasked_windows], 3) output = dot_product_attention( q, k_windows, v_windows, attention_bias, dropout_rate=0., name="dilated_1d", make_image_summary=False) output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape(v_list_shape) return output
python
def masked_dilated_self_attention_1d(q, k, v, query_block_size=64, memory_block_size=64, gap_size=2, num_memory_blocks=2, name=None): """Dilated self-attention. TODO(avaswani): Try it and write a paper on it. Args: q: a Tensor with shape [batch, heads, length, depth] k: a Tensor with shape [batch, heads, length, depth] v: a Tensor with shape [batch, heads, length, depth] query_block_size: an integer memory_block_size: an integer indicating how much to look left. gap_size: an integer indicating the gap size num_memory_blocks: how many memory blocks to look at to the left. Each will be separated by gap_size. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth] """ with tf.variable_scope( name, default_name="masked_dilated_self_attention_1d", values=[q, k, v]): v_list_shape = v.get_shape().as_list() assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" v_shape = common_layers.shape_list(v) depth_v = v_shape[3] batch_size = v_shape[0] num_heads = v_shape[1] original_length = common_layers.shape_list(q)[2] # Pad query, key, value to ensure multiple of corresponding lengths. def pad_to_multiple(x, pad_length): x_length = common_layers.shape_list(x)[2] return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) def pad_l(x, left_pad_length): return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]]) q = pad_to_multiple(q, query_block_size) v = pad_to_multiple(v, query_block_size) k = pad_to_multiple(k, query_block_size) # Set up query blocks. new_q_shape = common_layers.shape_list(q) q = reshape_by_blocks(q, new_q_shape, query_block_size) # Set up key and value windows. self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) k_v_padding = (gap_size + memory_block_size) * num_memory_blocks k = pad_l(k, k_v_padding) v = pad_l(v, k_v_padding) # Get gather indices. index_length = (new_q_shape[2] - query_block_size + memory_block_size) indices = tf.range(0, index_length, delta=1, name="index_range") indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) gather_indices = tf.nn.conv1d( tf.cast(indices, tf.float32), kernel, query_block_size, padding="VALID", name="gather_conv") gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) # Get left and right memory blocks for each query. # [length, batch, heads, dim] k_t = tf.transpose(k, [2, 0, 1, 3]) v_t = tf.transpose(v, [2, 0, 1, 3]) k_unmasked_windows = gather_dilated_memory_blocks( k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) v_unmasked_windows = gather_dilated_memory_blocks( v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, gather_indices) # Combine memory windows. block_q_shape = common_layers.shape_list(q) masked_attention_bias = tf.tile( tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0), [block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1]) padding_attention_bias = tf.expand_dims( embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2) padding_attention_bias = tf.tile(padding_attention_bias, [1, 1, 1, query_block_size, 1]) attention_bias = tf.concat( [masked_attention_bias, padding_attention_bias], axis=-1) # combine memory windows k_windows = tf.concat([self_k_part, k_unmasked_windows], 3) v_windows = tf.concat([self_v_part, v_unmasked_windows], 3) output = dot_product_attention( q, k_windows, v_windows, attention_bias, dropout_rate=0., name="dilated_1d", make_image_summary=False) output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) output.set_shape(v_list_shape) return output
[ "def", "masked_dilated_self_attention_1d", "(", "q", ",", "k", ",", "v", ",", "query_block_size", "=", "64", ",", "memory_block_size", "=", "64", ",", "gap_size", "=", "2", ",", "num_memory_blocks", "=", "2", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"masked_dilated_self_attention_1d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "v_list_shape", "=", "v", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "assert", "v_list_shape", "==", "k", ".", "shape", ".", "as_list", "(", ")", ",", "\"K and V depths must be equal\"", "v_shape", "=", "common_layers", ".", "shape_list", "(", "v", ")", "depth_v", "=", "v_shape", "[", "3", "]", "batch_size", "=", "v_shape", "[", "0", "]", "num_heads", "=", "v_shape", "[", "1", "]", "original_length", "=", "common_layers", ".", "shape_list", "(", "q", ")", "[", "2", "]", "# Pad query, key, value to ensure multiple of corresponding lengths.", "def", "pad_to_multiple", "(", "x", ",", "pad_length", ")", ":", "x_length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "return", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "-", "x_length", "%", "pad_length", "]", ",", "[", "0", ",", "0", "]", "]", ")", "def", "pad_l", "(", "x", ",", "left_pad_length", ")", ":", "return", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "left_pad_length", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "q", "=", "pad_to_multiple", "(", "q", ",", "query_block_size", ")", "v", "=", "pad_to_multiple", "(", "v", ",", "query_block_size", ")", "k", "=", "pad_to_multiple", "(", "k", ",", "query_block_size", ")", "# Set up query blocks.", "new_q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "q", "=", "reshape_by_blocks", "(", "q", ",", "new_q_shape", ",", "query_block_size", ")", "# Set up key and value windows.", "self_k_part", "=", "reshape_by_blocks", "(", "k", ",", "new_q_shape", ",", "query_block_size", ")", "self_v_part", "=", "reshape_by_blocks", "(", "v", ",", "new_q_shape", ",", "query_block_size", ")", "k_v_padding", "=", "(", "gap_size", "+", "memory_block_size", ")", "*", "num_memory_blocks", "k", "=", "pad_l", "(", "k", ",", "k_v_padding", ")", "v", "=", "pad_l", "(", "v", ",", "k_v_padding", ")", "# Get gather indices.", "index_length", "=", "(", "new_q_shape", "[", "2", "]", "-", "query_block_size", "+", "memory_block_size", ")", "indices", "=", "tf", ".", "range", "(", "0", ",", "index_length", ",", "delta", "=", "1", ",", "name", "=", "\"index_range\"", ")", "indices", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "1", ",", "-", "1", ",", "1", "]", ")", "# [1, length, 1] for convs", "kernel", "=", "tf", ".", "expand_dims", "(", "tf", ".", "eye", "(", "memory_block_size", ")", ",", "axis", "=", "1", ")", "gather_indices", "=", "tf", ".", "nn", ".", "conv1d", "(", "tf", ".", "cast", "(", "indices", ",", "tf", ".", "float32", ")", ",", "kernel", ",", "query_block_size", ",", "padding", "=", "\"VALID\"", ",", "name", "=", "\"gather_conv\"", ")", "gather_indices", "=", "tf", ".", "squeeze", "(", "tf", ".", "cast", "(", "gather_indices", ",", "tf", ".", "int32", ")", ",", "axis", "=", "0", ")", "# Get left and right memory blocks for each query.", "# [length, batch, heads, dim]", "k_t", "=", "tf", ".", "transpose", "(", "k", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "v_t", "=", "tf", ".", "transpose", "(", "v", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "k_unmasked_windows", "=", "gather_dilated_memory_blocks", "(", "k_t", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ")", "v_unmasked_windows", "=", "gather_dilated_memory_blocks", "(", "v_t", ",", "num_memory_blocks", ",", "gap_size", ",", "query_block_size", ",", "memory_block_size", ",", "gather_indices", ")", "# Combine memory windows.", "block_q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "masked_attention_bias", "=", "tf", ".", "tile", "(", "tf", ".", "expand_dims", "(", "attention_bias_lower_triangle", "(", "query_block_size", ")", ",", "axis", "=", "0", ")", ",", "[", "block_q_shape", "[", "0", "]", ",", "block_q_shape", "[", "1", "]", ",", "block_q_shape", "[", "2", "]", ",", "1", ",", "1", "]", ")", "padding_attention_bias", "=", "tf", ".", "expand_dims", "(", "embedding_to_padding", "(", "k_unmasked_windows", ")", "*", "-", "1e9", ",", "axis", "=", "-", "2", ")", "padding_attention_bias", "=", "tf", ".", "tile", "(", "padding_attention_bias", ",", "[", "1", ",", "1", ",", "1", ",", "query_block_size", ",", "1", "]", ")", "attention_bias", "=", "tf", ".", "concat", "(", "[", "masked_attention_bias", ",", "padding_attention_bias", "]", ",", "axis", "=", "-", "1", ")", "# combine memory windows", "k_windows", "=", "tf", ".", "concat", "(", "[", "self_k_part", ",", "k_unmasked_windows", "]", ",", "3", ")", "v_windows", "=", "tf", ".", "concat", "(", "[", "self_v_part", ",", "v_unmasked_windows", "]", ",", "3", ")", "output", "=", "dot_product_attention", "(", "q", ",", "k_windows", ",", "v_windows", ",", "attention_bias", ",", "dropout_rate", "=", "0.", ",", "name", "=", "\"dilated_1d\"", ",", "make_image_summary", "=", "False", ")", "output", "=", "tf", ".", "reshape", "(", "output", ",", "[", "batch_size", ",", "num_heads", ",", "-", "1", ",", "depth_v", "]", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "original_length", ",", "-", "1", "]", ")", "output", ".", "set_shape", "(", "v_list_shape", ")", "return", "output" ]
Dilated self-attention. TODO(avaswani): Try it and write a paper on it. Args: q: a Tensor with shape [batch, heads, length, depth] k: a Tensor with shape [batch, heads, length, depth] v: a Tensor with shape [batch, heads, length, depth] query_block_size: an integer memory_block_size: an integer indicating how much to look left. gap_size: an integer indicating the gap size num_memory_blocks: how many memory blocks to look at to the left. Each will be separated by gap_size. name: an optional string Returns: a Tensor of shape [batch, heads, length, depth]
[ "Dilated", "self", "-", "attention", ".", "TODO", "(", "avaswani", ")", ":", "Try", "it", "and", "write", "a", "paper", "on", "it", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3342-L3452
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
local_attention_2d
def local_attention_2d(q, k, v, query_shape=(8, 16), memory_flange=(8, 16), name=None): """Strided block local self-attention. The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention for a given query position can only see memory positions less than or equal to the query position. The memory positions are the corresponding block with memory_flange many positions to add to the height and width of the block (namely, left, top, and right). Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width from each query block. name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v] """ with tf.variable_scope( name, default_name="local_self_attention_2d", values=[q, k, v]): v_shape = common_layers.shape_list(v) # Pad query, key, value to ensure multiple of corresponding lengths. q = pad_to_multiple_2d(q, query_shape) k = pad_to_multiple_2d(k, query_shape) v = pad_to_multiple_2d(v, query_shape) paddings = [[0, 0], [0, 0], [memory_flange[0], memory_flange[1]], [memory_flange[0], memory_flange[1]], [0, 0]] k = tf.pad(k, paddings) v = tf.pad(v, paddings) # Set up query blocks. q_indices = gather_indices_2d(q, query_shape, query_shape) q_new = gather_blocks_2d(q, q_indices) # Set up key and value blocks. memory_shape = (query_shape[0] + 2 * memory_flange[0], query_shape[1] + 2 * memory_flange[1]) k_and_v_indices = gather_indices_2d(k, memory_shape, query_shape) k_new = gather_blocks_2d(k, k_and_v_indices) v_new = gather_blocks_2d(v, k_and_v_indices) attention_bias = tf.expand_dims( tf.to_float(embedding_to_padding(k_new)) * -1e9, axis=-2) output = dot_product_attention( q_new, k_new, v_new, attention_bias, dropout_rate=0., name="local_2d", make_image_summary=False) # Put representations back into original shapes. padded_q_shape = common_layers.shape_list(q) output = scatter_blocks_2d(output, q_indices, padded_q_shape) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1]) return output
python
def local_attention_2d(q, k, v, query_shape=(8, 16), memory_flange=(8, 16), name=None): """Strided block local self-attention. The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention for a given query position can only see memory positions less than or equal to the query position. The memory positions are the corresponding block with memory_flange many positions to add to the height and width of the block (namely, left, top, and right). Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width from each query block. name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v] """ with tf.variable_scope( name, default_name="local_self_attention_2d", values=[q, k, v]): v_shape = common_layers.shape_list(v) # Pad query, key, value to ensure multiple of corresponding lengths. q = pad_to_multiple_2d(q, query_shape) k = pad_to_multiple_2d(k, query_shape) v = pad_to_multiple_2d(v, query_shape) paddings = [[0, 0], [0, 0], [memory_flange[0], memory_flange[1]], [memory_flange[0], memory_flange[1]], [0, 0]] k = tf.pad(k, paddings) v = tf.pad(v, paddings) # Set up query blocks. q_indices = gather_indices_2d(q, query_shape, query_shape) q_new = gather_blocks_2d(q, q_indices) # Set up key and value blocks. memory_shape = (query_shape[0] + 2 * memory_flange[0], query_shape[1] + 2 * memory_flange[1]) k_and_v_indices = gather_indices_2d(k, memory_shape, query_shape) k_new = gather_blocks_2d(k, k_and_v_indices) v_new = gather_blocks_2d(v, k_and_v_indices) attention_bias = tf.expand_dims( tf.to_float(embedding_to_padding(k_new)) * -1e9, axis=-2) output = dot_product_attention( q_new, k_new, v_new, attention_bias, dropout_rate=0., name="local_2d", make_image_summary=False) # Put representations back into original shapes. padded_q_shape = common_layers.shape_list(q) output = scatter_blocks_2d(output, q_indices, padded_q_shape) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1]) return output
[ "def", "local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "(", "8", ",", "16", ")", ",", "memory_flange", "=", "(", "8", ",", "16", ")", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"local_self_attention_2d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "v_shape", "=", "common_layers", ".", "shape_list", "(", "v", ")", "# Pad query, key, value to ensure multiple of corresponding lengths.", "q", "=", "pad_to_multiple_2d", "(", "q", ",", "query_shape", ")", "k", "=", "pad_to_multiple_2d", "(", "k", ",", "query_shape", ")", "v", "=", "pad_to_multiple_2d", "(", "v", ",", "query_shape", ")", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "memory_flange", "[", "0", "]", ",", "memory_flange", "[", "1", "]", "]", ",", "[", "memory_flange", "[", "0", "]", ",", "memory_flange", "[", "1", "]", "]", ",", "[", "0", ",", "0", "]", "]", "k", "=", "tf", ".", "pad", "(", "k", ",", "paddings", ")", "v", "=", "tf", ".", "pad", "(", "v", ",", "paddings", ")", "# Set up query blocks.", "q_indices", "=", "gather_indices_2d", "(", "q", ",", "query_shape", ",", "query_shape", ")", "q_new", "=", "gather_blocks_2d", "(", "q", ",", "q_indices", ")", "# Set up key and value blocks.", "memory_shape", "=", "(", "query_shape", "[", "0", "]", "+", "2", "*", "memory_flange", "[", "0", "]", ",", "query_shape", "[", "1", "]", "+", "2", "*", "memory_flange", "[", "1", "]", ")", "k_and_v_indices", "=", "gather_indices_2d", "(", "k", ",", "memory_shape", ",", "query_shape", ")", "k_new", "=", "gather_blocks_2d", "(", "k", ",", "k_and_v_indices", ")", "v_new", "=", "gather_blocks_2d", "(", "v", ",", "k_and_v_indices", ")", "attention_bias", "=", "tf", ".", "expand_dims", "(", "tf", ".", "to_float", "(", "embedding_to_padding", "(", "k_new", ")", ")", "*", "-", "1e9", ",", "axis", "=", "-", "2", ")", "output", "=", "dot_product_attention", "(", "q_new", ",", "k_new", ",", "v_new", ",", "attention_bias", ",", "dropout_rate", "=", "0.", ",", "name", "=", "\"local_2d\"", ",", "make_image_summary", "=", "False", ")", "# Put representations back into original shapes.", "padded_q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "output", "=", "scatter_blocks_2d", "(", "output", ",", "q_indices", ",", "padded_q_shape", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "v_shape", "[", "2", "]", ",", "v_shape", "[", "3", "]", ",", "-", "1", "]", ")", "return", "output" ]
Strided block local self-attention. The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention for a given query position can only see memory positions less than or equal to the query position. The memory positions are the corresponding block with memory_flange many positions to add to the height and width of the block (namely, left, top, and right). Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width from each query block. name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v]
[ "Strided", "block", "local", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3455-L3523
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
pad_to_multiple_2d
def pad_to_multiple_2d(x, block_shape): """Making sure x is a multiple of shape. Args: x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor block_shape: a 2-d list of integer shapes Returns: padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor """ old_shape = x.get_shape().dims last = old_shape[-1] if len(old_shape) == 4: height_padding = -common_layers.shape_list(x)[1] % block_shape[0] width_padding = -common_layers.shape_list(x)[2] % block_shape[1] paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]] elif len(old_shape) == 5: height_padding = -common_layers.shape_list(x)[2] % block_shape[0] width_padding = -common_layers.shape_list(x)[3] % block_shape[1] paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]] padded_x = tf.pad(x, paddings) padded_shape = padded_x.get_shape().as_list() padded_shape = padded_shape[:-1] + [last] padded_x.set_shape(padded_shape) return padded_x
python
def pad_to_multiple_2d(x, block_shape): """Making sure x is a multiple of shape. Args: x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor block_shape: a 2-d list of integer shapes Returns: padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor """ old_shape = x.get_shape().dims last = old_shape[-1] if len(old_shape) == 4: height_padding = -common_layers.shape_list(x)[1] % block_shape[0] width_padding = -common_layers.shape_list(x)[2] % block_shape[1] paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]] elif len(old_shape) == 5: height_padding = -common_layers.shape_list(x)[2] % block_shape[0] width_padding = -common_layers.shape_list(x)[3] % block_shape[1] paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]] padded_x = tf.pad(x, paddings) padded_shape = padded_x.get_shape().as_list() padded_shape = padded_shape[:-1] + [last] padded_x.set_shape(padded_shape) return padded_x
[ "def", "pad_to_multiple_2d", "(", "x", ",", "block_shape", ")", ":", "old_shape", "=", "x", ".", "get_shape", "(", ")", ".", "dims", "last", "=", "old_shape", "[", "-", "1", "]", "if", "len", "(", "old_shape", ")", "==", "4", ":", "height_padding", "=", "-", "common_layers", ".", "shape_list", "(", "x", ")", "[", "1", "]", "%", "block_shape", "[", "0", "]", "width_padding", "=", "-", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "%", "block_shape", "[", "1", "]", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "height_padding", "]", ",", "[", "0", ",", "width_padding", "]", ",", "[", "0", ",", "0", "]", "]", "elif", "len", "(", "old_shape", ")", "==", "5", ":", "height_padding", "=", "-", "common_layers", ".", "shape_list", "(", "x", ")", "[", "2", "]", "%", "block_shape", "[", "0", "]", "width_padding", "=", "-", "common_layers", ".", "shape_list", "(", "x", ")", "[", "3", "]", "%", "block_shape", "[", "1", "]", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "height_padding", "]", ",", "[", "0", ",", "width_padding", "]", ",", "[", "0", ",", "0", "]", "]", "padded_x", "=", "tf", ".", "pad", "(", "x", ",", "paddings", ")", "padded_shape", "=", "padded_x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "padded_shape", "=", "padded_shape", "[", ":", "-", "1", "]", "+", "[", "last", "]", "padded_x", ".", "set_shape", "(", "padded_shape", ")", "return", "padded_x" ]
Making sure x is a multiple of shape. Args: x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor block_shape: a 2-d list of integer shapes Returns: padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
[ "Making", "sure", "x", "is", "a", "multiple", "of", "shape", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3526-L3551
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
reshape_range
def reshape_range(tensor, i, j, shape): """Reshapes a tensor between dimensions i and j.""" t_shape = common_layers.shape_list(tensor) target_shape = t_shape[:i] + shape + t_shape[j:] return tf.reshape(tensor, target_shape)
python
def reshape_range(tensor, i, j, shape): """Reshapes a tensor between dimensions i and j.""" t_shape = common_layers.shape_list(tensor) target_shape = t_shape[:i] + shape + t_shape[j:] return tf.reshape(tensor, target_shape)
[ "def", "reshape_range", "(", "tensor", ",", "i", ",", "j", ",", "shape", ")", ":", "t_shape", "=", "common_layers", ".", "shape_list", "(", "tensor", ")", "target_shape", "=", "t_shape", "[", ":", "i", "]", "+", "shape", "+", "t_shape", "[", "j", ":", "]", "return", "tf", ".", "reshape", "(", "tensor", ",", "target_shape", ")" ]
Reshapes a tensor between dimensions i and j.
[ "Reshapes", "a", "tensor", "between", "dimensions", "i", "and", "j", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3554-L3558
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
gather_blocks_2d
def gather_blocks_2d(x, indices): """Gathers flattened blocks from x.""" x_shape = common_layers.shape_list(x) x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])]) # [length, batch, heads, dim] x_t = tf.transpose(x, [2, 0, 1, 3]) x_new = tf.gather(x_t, indices) # returns [batch, heads, num_blocks, block_length ** 2, dim] return tf.transpose(x_new, [2, 3, 0, 1, 4])
python
def gather_blocks_2d(x, indices): """Gathers flattened blocks from x.""" x_shape = common_layers.shape_list(x) x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])]) # [length, batch, heads, dim] x_t = tf.transpose(x, [2, 0, 1, 3]) x_new = tf.gather(x_t, indices) # returns [batch, heads, num_blocks, block_length ** 2, dim] return tf.transpose(x_new, [2, 3, 0, 1, 4])
[ "def", "gather_blocks_2d", "(", "x", ",", "indices", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "x", "=", "reshape_range", "(", "x", ",", "2", ",", "4", ",", "[", "tf", ".", "reduce_prod", "(", "x_shape", "[", "2", ":", "4", "]", ")", "]", ")", "# [length, batch, heads, dim]", "x_t", "=", "tf", ".", "transpose", "(", "x", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "x_new", "=", "tf", ".", "gather", "(", "x_t", ",", "indices", ")", "# returns [batch, heads, num_blocks, block_length ** 2, dim]", "return", "tf", ".", "transpose", "(", "x_new", ",", "[", "2", ",", "3", ",", "0", ",", "1", ",", "4", "]", ")" ]
Gathers flattened blocks from x.
[ "Gathers", "flattened", "blocks", "from", "x", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3561-L3569
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
scatter_blocks_2d
def scatter_blocks_2d(x, indices, shape): """scatters blocks from x into shape with indices.""" x_shape = common_layers.shape_list(x) # [length, batch, heads, dim] x_t = tf.transpose( tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3]) x_t_shape = common_layers.shape_list(x_t) indices = tf.reshape(indices, [-1, 1]) scattered_x = tf.scatter_nd(indices, x_t, x_t_shape) scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3]) return tf.reshape(scattered_x, shape)
python
def scatter_blocks_2d(x, indices, shape): """scatters blocks from x into shape with indices.""" x_shape = common_layers.shape_list(x) # [length, batch, heads, dim] x_t = tf.transpose( tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3]) x_t_shape = common_layers.shape_list(x_t) indices = tf.reshape(indices, [-1, 1]) scattered_x = tf.scatter_nd(indices, x_t, x_t_shape) scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3]) return tf.reshape(scattered_x, shape)
[ "def", "scatter_blocks_2d", "(", "x", ",", "indices", ",", "shape", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# [length, batch, heads, dim]", "x_t", "=", "tf", ".", "transpose", "(", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", ",", "-", "1", ",", "x_shape", "[", "-", "1", "]", "]", ")", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "x_t_shape", "=", "common_layers", ".", "shape_list", "(", "x_t", ")", "indices", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "-", "1", ",", "1", "]", ")", "scattered_x", "=", "tf", ".", "scatter_nd", "(", "indices", ",", "x_t", ",", "x_t_shape", ")", "scattered_x", "=", "tf", ".", "transpose", "(", "scattered_x", ",", "[", "1", ",", "2", ",", "0", ",", "3", "]", ")", "return", "tf", ".", "reshape", "(", "scattered_x", ",", "shape", ")" ]
scatters blocks from x into shape with indices.
[ "scatters", "blocks", "from", "x", "into", "shape", "with", "indices", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3572-L3582
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
gather_indices_2d
def gather_indices_2d(x, block_shape, block_stride): """Getting gather indices.""" # making an identity matrix kernel kernel = tf.eye(block_shape[0] * block_shape[1]) kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) # making indices [1, h, w, 1] to appy convs x_shape = common_layers.shape_list(x) indices = tf.range(x_shape[2] * x_shape[3]) indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) indices = tf.nn.conv2d( tf.cast(indices, tf.float32), kernel, strides=[1, block_stride[0], block_stride[1], 1], padding="VALID") # making indices [num_blocks, dim] to gather dims = common_layers.shape_list(indices)[:3] if all([isinstance(dim, int) for dim in dims]): num_blocks = functools.reduce(operator.mul, dims, 1) else: num_blocks = tf.reduce_prod(dims) indices = tf.reshape(indices, [num_blocks, -1]) return tf.cast(indices, tf.int32)
python
def gather_indices_2d(x, block_shape, block_stride): """Getting gather indices.""" # making an identity matrix kernel kernel = tf.eye(block_shape[0] * block_shape[1]) kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) # making indices [1, h, w, 1] to appy convs x_shape = common_layers.shape_list(x) indices = tf.range(x_shape[2] * x_shape[3]) indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) indices = tf.nn.conv2d( tf.cast(indices, tf.float32), kernel, strides=[1, block_stride[0], block_stride[1], 1], padding="VALID") # making indices [num_blocks, dim] to gather dims = common_layers.shape_list(indices)[:3] if all([isinstance(dim, int) for dim in dims]): num_blocks = functools.reduce(operator.mul, dims, 1) else: num_blocks = tf.reduce_prod(dims) indices = tf.reshape(indices, [num_blocks, -1]) return tf.cast(indices, tf.int32)
[ "def", "gather_indices_2d", "(", "x", ",", "block_shape", ",", "block_stride", ")", ":", "# making an identity matrix kernel", "kernel", "=", "tf", ".", "eye", "(", "block_shape", "[", "0", "]", "*", "block_shape", "[", "1", "]", ")", "kernel", "=", "reshape_range", "(", "kernel", ",", "0", ",", "1", ",", "[", "block_shape", "[", "0", "]", ",", "block_shape", "[", "1", "]", ",", "1", "]", ")", "# making indices [1, h, w, 1] to appy convs", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "indices", "=", "tf", ".", "range", "(", "x_shape", "[", "2", "]", "*", "x_shape", "[", "3", "]", ")", "indices", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "1", ",", "x_shape", "[", "2", "]", ",", "x_shape", "[", "3", "]", ",", "1", "]", ")", "indices", "=", "tf", ".", "nn", ".", "conv2d", "(", "tf", ".", "cast", "(", "indices", ",", "tf", ".", "float32", ")", ",", "kernel", ",", "strides", "=", "[", "1", ",", "block_stride", "[", "0", "]", ",", "block_stride", "[", "1", "]", ",", "1", "]", ",", "padding", "=", "\"VALID\"", ")", "# making indices [num_blocks, dim] to gather", "dims", "=", "common_layers", ".", "shape_list", "(", "indices", ")", "[", ":", "3", "]", "if", "all", "(", "[", "isinstance", "(", "dim", ",", "int", ")", "for", "dim", "in", "dims", "]", ")", ":", "num_blocks", "=", "functools", ".", "reduce", "(", "operator", ".", "mul", ",", "dims", ",", "1", ")", "else", ":", "num_blocks", "=", "tf", ".", "reduce_prod", "(", "dims", ")", "indices", "=", "tf", ".", "reshape", "(", "indices", ",", "[", "num_blocks", ",", "-", "1", "]", ")", "return", "tf", ".", "cast", "(", "indices", ",", "tf", ".", "int32", ")" ]
Getting gather indices.
[ "Getting", "gather", "indices", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3585-L3606
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
make_2d_block_raster_mask
def make_2d_block_raster_mask(query_shape, memory_flange): """Creates a mask for 2d block raster scan. The query mask can look to the left, top left, top, and top right, but not to the right. Inside the query, we have the standard raster scan masking. Args: query_shape: A tuple of ints (query_height, query_width) memory_flange: A tuple of ints (memory_flange_height, memory_flange_width) Returns: A tensor of shape query_size, memory_size """ # mask inside the query block query_triangle = common_layers.ones_matrix_band_part( np.prod(query_shape), np.prod(query_shape), -1, 0) split_query_masks = tf.split(query_triangle, query_shape[0], axis=1) # adding mask for left and right mask_pieces = [ tf.concat( # pylint: disable=g-complex-comprehension [tf.ones([np.prod(query_shape), memory_flange[1]]), split_query_masks[i], tf.zeros([np.prod(query_shape), memory_flange[1]])], axis=1) for i in range(query_shape[0]) ] # adding mask for top final_mask = tf.concat( [ tf.ones([ np.prod(query_shape), (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0] ]), tf.concat(mask_pieces, axis=1) ], axis=1) # 0.0 is visible location, 1.0 is masked. return 1. - final_mask
python
def make_2d_block_raster_mask(query_shape, memory_flange): """Creates a mask for 2d block raster scan. The query mask can look to the left, top left, top, and top right, but not to the right. Inside the query, we have the standard raster scan masking. Args: query_shape: A tuple of ints (query_height, query_width) memory_flange: A tuple of ints (memory_flange_height, memory_flange_width) Returns: A tensor of shape query_size, memory_size """ # mask inside the query block query_triangle = common_layers.ones_matrix_band_part( np.prod(query_shape), np.prod(query_shape), -1, 0) split_query_masks = tf.split(query_triangle, query_shape[0], axis=1) # adding mask for left and right mask_pieces = [ tf.concat( # pylint: disable=g-complex-comprehension [tf.ones([np.prod(query_shape), memory_flange[1]]), split_query_masks[i], tf.zeros([np.prod(query_shape), memory_flange[1]])], axis=1) for i in range(query_shape[0]) ] # adding mask for top final_mask = tf.concat( [ tf.ones([ np.prod(query_shape), (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0] ]), tf.concat(mask_pieces, axis=1) ], axis=1) # 0.0 is visible location, 1.0 is masked. return 1. - final_mask
[ "def", "make_2d_block_raster_mask", "(", "query_shape", ",", "memory_flange", ")", ":", "# mask inside the query block", "query_triangle", "=", "common_layers", ".", "ones_matrix_band_part", "(", "np", ".", "prod", "(", "query_shape", ")", ",", "np", ".", "prod", "(", "query_shape", ")", ",", "-", "1", ",", "0", ")", "split_query_masks", "=", "tf", ".", "split", "(", "query_triangle", ",", "query_shape", "[", "0", "]", ",", "axis", "=", "1", ")", "# adding mask for left and right", "mask_pieces", "=", "[", "tf", ".", "concat", "(", "# pylint: disable=g-complex-comprehension", "[", "tf", ".", "ones", "(", "[", "np", ".", "prod", "(", "query_shape", ")", ",", "memory_flange", "[", "1", "]", "]", ")", ",", "split_query_masks", "[", "i", "]", ",", "tf", ".", "zeros", "(", "[", "np", ".", "prod", "(", "query_shape", ")", ",", "memory_flange", "[", "1", "]", "]", ")", "]", ",", "axis", "=", "1", ")", "for", "i", "in", "range", "(", "query_shape", "[", "0", "]", ")", "]", "# adding mask for top", "final_mask", "=", "tf", ".", "concat", "(", "[", "tf", ".", "ones", "(", "[", "np", ".", "prod", "(", "query_shape", ")", ",", "(", "query_shape", "[", "1", "]", "+", "2", "*", "memory_flange", "[", "1", "]", ")", "*", "memory_flange", "[", "0", "]", "]", ")", ",", "tf", ".", "concat", "(", "mask_pieces", ",", "axis", "=", "1", ")", "]", ",", "axis", "=", "1", ")", "# 0.0 is visible location, 1.0 is masked.", "return", "1.", "-", "final_mask" ]
Creates a mask for 2d block raster scan. The query mask can look to the left, top left, top, and top right, but not to the right. Inside the query, we have the standard raster scan masking. Args: query_shape: A tuple of ints (query_height, query_width) memory_flange: A tuple of ints (memory_flange_height, memory_flange_width) Returns: A tensor of shape query_size, memory_size
[ "Creates", "a", "mask", "for", "2d", "block", "raster", "scan", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3609-L3646
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_memory_region
def get_memory_region(x, query_block_shape, memory_flange, q_indices): """Get the memory regions that surround a 2d query. The memory regions will be the left and top right. Args: x: A tensor with shape [batch, heads, height, width, depth] query_block_shape: a 2-d tuple of integers memory_flange: a 2-d tuple of integers q_indices: a tensor of indices for each of the center blocks. [num_blocks, block_length] Returns: x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth] """ # Padding x to be multiple of query_shape and then # extracting the memory blocks from the same regions as the query blocks x_query_padded = pad_to_multiple_2d(x, query_block_shape) x_center = gather_blocks_2d(x_query_padded, q_indices) # Then padding the flange region paddings = [[0, 0], [0, 0], [memory_flange[0], 0], [memory_flange[1], memory_flange[1]], [0, 0]] x_memory_padded = tf.pad(x_query_padded, paddings) left_x = None top_x = None # Extracting the memory regions around the query block. left_x_region extends # to the left and the top_x_region is the combination of top left, top, and # top right of the query block # if no left region if memory_flange[1] > 0: left_x_region = x_memory_padded[:, :, memory_flange[ 0]:, :-(query_block_shape[1] + memory_flange[1]), :] left_memory_shape = (query_block_shape[0], memory_flange[1]) left_indices = gather_indices_2d(left_x_region, left_memory_shape, query_block_shape) left_x = gather_blocks_2d(left_x_region, left_indices) # if no top region if memory_flange[0] > 0: top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :] top_memory_shape = (memory_flange[0], query_block_shape[1] + 2 * memory_flange[1]) top_indices = gather_indices_2d(top_x_region, top_memory_shape, query_block_shape) top_x = gather_blocks_2d(top_x_region, top_indices) x_flange = None if top_x is not None and left_x is not None: x_flange = tf.concat([top_x, left_x], axis=3) else: x_flange = top_x if top_x is not None else left_x return x_flange, x_center
python
def get_memory_region(x, query_block_shape, memory_flange, q_indices): """Get the memory regions that surround a 2d query. The memory regions will be the left and top right. Args: x: A tensor with shape [batch, heads, height, width, depth] query_block_shape: a 2-d tuple of integers memory_flange: a 2-d tuple of integers q_indices: a tensor of indices for each of the center blocks. [num_blocks, block_length] Returns: x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth] """ # Padding x to be multiple of query_shape and then # extracting the memory blocks from the same regions as the query blocks x_query_padded = pad_to_multiple_2d(x, query_block_shape) x_center = gather_blocks_2d(x_query_padded, q_indices) # Then padding the flange region paddings = [[0, 0], [0, 0], [memory_flange[0], 0], [memory_flange[1], memory_flange[1]], [0, 0]] x_memory_padded = tf.pad(x_query_padded, paddings) left_x = None top_x = None # Extracting the memory regions around the query block. left_x_region extends # to the left and the top_x_region is the combination of top left, top, and # top right of the query block # if no left region if memory_flange[1] > 0: left_x_region = x_memory_padded[:, :, memory_flange[ 0]:, :-(query_block_shape[1] + memory_flange[1]), :] left_memory_shape = (query_block_shape[0], memory_flange[1]) left_indices = gather_indices_2d(left_x_region, left_memory_shape, query_block_shape) left_x = gather_blocks_2d(left_x_region, left_indices) # if no top region if memory_flange[0] > 0: top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :] top_memory_shape = (memory_flange[0], query_block_shape[1] + 2 * memory_flange[1]) top_indices = gather_indices_2d(top_x_region, top_memory_shape, query_block_shape) top_x = gather_blocks_2d(top_x_region, top_indices) x_flange = None if top_x is not None and left_x is not None: x_flange = tf.concat([top_x, left_x], axis=3) else: x_flange = top_x if top_x is not None else left_x return x_flange, x_center
[ "def", "get_memory_region", "(", "x", ",", "query_block_shape", ",", "memory_flange", ",", "q_indices", ")", ":", "# Padding x to be multiple of query_shape and then", "# extracting the memory blocks from the same regions as the query blocks", "x_query_padded", "=", "pad_to_multiple_2d", "(", "x", ",", "query_block_shape", ")", "x_center", "=", "gather_blocks_2d", "(", "x_query_padded", ",", "q_indices", ")", "# Then padding the flange region", "paddings", "=", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "memory_flange", "[", "0", "]", ",", "0", "]", ",", "[", "memory_flange", "[", "1", "]", ",", "memory_flange", "[", "1", "]", "]", ",", "[", "0", ",", "0", "]", "]", "x_memory_padded", "=", "tf", ".", "pad", "(", "x_query_padded", ",", "paddings", ")", "left_x", "=", "None", "top_x", "=", "None", "# Extracting the memory regions around the query block. left_x_region extends", "# to the left and the top_x_region is the combination of top left, top, and", "# top right of the query block", "# if no left region", "if", "memory_flange", "[", "1", "]", ">", "0", ":", "left_x_region", "=", "x_memory_padded", "[", ":", ",", ":", ",", "memory_flange", "[", "0", "]", ":", ",", ":", "-", "(", "query_block_shape", "[", "1", "]", "+", "memory_flange", "[", "1", "]", ")", ",", ":", "]", "left_memory_shape", "=", "(", "query_block_shape", "[", "0", "]", ",", "memory_flange", "[", "1", "]", ")", "left_indices", "=", "gather_indices_2d", "(", "left_x_region", ",", "left_memory_shape", ",", "query_block_shape", ")", "left_x", "=", "gather_blocks_2d", "(", "left_x_region", ",", "left_indices", ")", "# if no top region", "if", "memory_flange", "[", "0", "]", ">", "0", ":", "top_x_region", "=", "x_memory_padded", "[", ":", ",", ":", ",", ":", "-", "query_block_shape", "[", "0", "]", ",", ":", ",", ":", "]", "top_memory_shape", "=", "(", "memory_flange", "[", "0", "]", ",", "query_block_shape", "[", "1", "]", "+", "2", "*", "memory_flange", "[", "1", "]", ")", "top_indices", "=", "gather_indices_2d", "(", "top_x_region", ",", "top_memory_shape", ",", "query_block_shape", ")", "top_x", "=", "gather_blocks_2d", "(", "top_x_region", ",", "top_indices", ")", "x_flange", "=", "None", "if", "top_x", "is", "not", "None", "and", "left_x", "is", "not", "None", ":", "x_flange", "=", "tf", ".", "concat", "(", "[", "top_x", ",", "left_x", "]", ",", "axis", "=", "3", ")", "else", ":", "x_flange", "=", "top_x", "if", "top_x", "is", "not", "None", "else", "left_x", "return", "x_flange", ",", "x_center" ]
Get the memory regions that surround a 2d query. The memory regions will be the left and top right. Args: x: A tensor with shape [batch, heads, height, width, depth] query_block_shape: a 2-d tuple of integers memory_flange: a 2-d tuple of integers q_indices: a tensor of indices for each of the center blocks. [num_blocks, block_length] Returns: x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth]
[ "Get", "the", "memory", "regions", "that", "surround", "a", "2d", "query", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3649-L3700
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
get_shifted_center_blocks
def get_shifted_center_blocks(x, indices): """Get right shifted blocks for masked local attention 2d. Args: x: A tensor with shape [batch, heads, height, width, depth] indices: The indices to gather blocks Returns: x_shifted: a tensor of extracted blocks, each block right shifted along length. """ center_x = gather_blocks_2d(x, indices) # Shift right along the length dimension def shift_right_2d_blocks(x): """Shift the second to last dimension of x right by one.""" shifted_targets = ( tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :]) return shifted_targets x_shifted = shift_right_2d_blocks(center_x) return x_shifted
python
def get_shifted_center_blocks(x, indices): """Get right shifted blocks for masked local attention 2d. Args: x: A tensor with shape [batch, heads, height, width, depth] indices: The indices to gather blocks Returns: x_shifted: a tensor of extracted blocks, each block right shifted along length. """ center_x = gather_blocks_2d(x, indices) # Shift right along the length dimension def shift_right_2d_blocks(x): """Shift the second to last dimension of x right by one.""" shifted_targets = ( tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :]) return shifted_targets x_shifted = shift_right_2d_blocks(center_x) return x_shifted
[ "def", "get_shifted_center_blocks", "(", "x", ",", "indices", ")", ":", "center_x", "=", "gather_blocks_2d", "(", "x", ",", "indices", ")", "# Shift right along the length dimension", "def", "shift_right_2d_blocks", "(", "x", ")", ":", "\"\"\"Shift the second to last dimension of x right by one.\"\"\"", "shifted_targets", "=", "(", "tf", ".", "pad", "(", "x", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "1", ",", "0", "]", ",", "[", "0", ",", "0", "]", "]", ")", "[", ":", ",", ":", ",", ":", ",", ":", "-", "1", ",", ":", "]", ")", "return", "shifted_targets", "x_shifted", "=", "shift_right_2d_blocks", "(", "center_x", ")", "return", "x_shifted" ]
Get right shifted blocks for masked local attention 2d. Args: x: A tensor with shape [batch, heads, height, width, depth] indices: The indices to gather blocks Returns: x_shifted: a tensor of extracted blocks, each block right shifted along length.
[ "Get", "right", "shifted", "blocks", "for", "masked", "local", "attention", "2d", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3703-L3724
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
right_shift_blockwise
def right_shift_blockwise(x, query_shape, name=None): """Right shifts once in every block. Args: x: a tensor of shape [batch, height, width, depth] query_shape: A 2d tuple of ints name: a string Returns: output: a tensor of the same shape as x """ with tf.variable_scope( name, default_name="right_shift_blockwise", values=[x]): x_list_shape = x.get_shape().as_list() x_shape = common_layers.shape_list(x) # Add a dummy dimension for heads. x = tf.expand_dims(x, axis=1) x = pad_to_multiple_2d(x, query_shape) padded_x_shape = common_layers.shape_list(x) # Set up q blocks. x_indices = gather_indices_2d(x, query_shape, query_shape) x_new = get_shifted_center_blocks(x, x_indices) # Put representations back into original shapes. output = scatter_blocks_2d(x_new, x_indices, padded_x_shape) # Remove the dummy head dimension. output = tf.squeeze(output, axis=1) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1]) output.set_shape(x_list_shape) return output
python
def right_shift_blockwise(x, query_shape, name=None): """Right shifts once in every block. Args: x: a tensor of shape [batch, height, width, depth] query_shape: A 2d tuple of ints name: a string Returns: output: a tensor of the same shape as x """ with tf.variable_scope( name, default_name="right_shift_blockwise", values=[x]): x_list_shape = x.get_shape().as_list() x_shape = common_layers.shape_list(x) # Add a dummy dimension for heads. x = tf.expand_dims(x, axis=1) x = pad_to_multiple_2d(x, query_shape) padded_x_shape = common_layers.shape_list(x) # Set up q blocks. x_indices = gather_indices_2d(x, query_shape, query_shape) x_new = get_shifted_center_blocks(x, x_indices) # Put representations back into original shapes. output = scatter_blocks_2d(x_new, x_indices, padded_x_shape) # Remove the dummy head dimension. output = tf.squeeze(output, axis=1) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1]) output.set_shape(x_list_shape) return output
[ "def", "right_shift_blockwise", "(", "x", ",", "query_shape", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"right_shift_blockwise\"", ",", "values", "=", "[", "x", "]", ")", ":", "x_list_shape", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# Add a dummy dimension for heads.", "x", "=", "tf", ".", "expand_dims", "(", "x", ",", "axis", "=", "1", ")", "x", "=", "pad_to_multiple_2d", "(", "x", ",", "query_shape", ")", "padded_x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "# Set up q blocks.", "x_indices", "=", "gather_indices_2d", "(", "x", ",", "query_shape", ",", "query_shape", ")", "x_new", "=", "get_shifted_center_blocks", "(", "x", ",", "x_indices", ")", "# Put representations back into original shapes.", "output", "=", "scatter_blocks_2d", "(", "x_new", ",", "x_indices", ",", "padded_x_shape", ")", "# Remove the dummy head dimension.", "output", "=", "tf", ".", "squeeze", "(", "output", ",", "axis", "=", "1", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "x_shape", "[", "1", "]", ",", "x_shape", "[", "2", "]", ",", "-", "1", "]", ")", "output", ".", "set_shape", "(", "x_list_shape", ")", "return", "output" ]
Right shifts once in every block. Args: x: a tensor of shape [batch, height, width, depth] query_shape: A 2d tuple of ints name: a string Returns: output: a tensor of the same shape as x
[ "Right", "shifts", "once", "in", "every", "block", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3727-L3757
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
masked_local_attention_2d
def masked_local_attention_2d(q, k, v, query_shape=(8, 16), memory_flange=(8, 16), name=None): """Strided block local self-attention. Each position in a query block can attend to all the generated queries in the query block, which are generated in raster scan, and positions that are generated to the left and top. The shapes are specified by query shape and memory flange. Note that if you're using this function, you do not need to right shift. Right shifting happens inside this function separately for each block. Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. query_shape = block_shape memory_flange: an integer indicating how much to look in height and width from each query block. memory shape = query_shape + (block_flange[0], 2*block_flange[1]) name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v] """ with tf.variable_scope( name, default_name="local_masked_self_attention_2d", values=[q, k, v]): v_shape = common_layers.shape_list(v) # Pad query to ensure multiple of corresponding lengths. q = pad_to_multiple_2d(q, query_shape) # Set up query blocks. q_indices = gather_indices_2d(q, query_shape, query_shape) q_new = gather_blocks_2d(q, q_indices) # Set up key and value blocks. k_flange, k_center = get_memory_region(k, query_shape, memory_flange, q_indices) v_flange, v_center = get_memory_region(v, query_shape, memory_flange, q_indices) if k_flange is not None: k_new = tf.concat([k_flange, k_center], axis=3) v_new = tf.concat([v_flange, v_center], axis=3) else: k_new = k_center v_new = v_center # Set up the masks. query_elements = np.prod(query_shape) padding_mask = None if k_flange is not None: padding_mask = tf.expand_dims( embedding_to_padding(k_flange) * -1e9, axis=-2) padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1]) center_attention_bias = attention_bias_lower_triangle( np.prod(query_elements)) center_attention_bias = tf.reshape( center_attention_bias, [1, 1, 1, query_elements, query_elements]) v_center_shape = common_layers.shape_list(v_center) center_attention_bias = tf.tile( center_attention_bias, [v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1]) if padding_mask is not None: # Combine the mask for padding and visible region. attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4) else: attention_bias = center_attention_bias output = dot_product_attention( q_new, k_new, v_new, attention_bias, dropout_rate=0., name="masked_local_2d", make_image_summary=False) # Put representations back into original shapes. padded_q_shape = common_layers.shape_list(q) output = scatter_blocks_2d(output, q_indices, padded_q_shape) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1]) return output
python
def masked_local_attention_2d(q, k, v, query_shape=(8, 16), memory_flange=(8, 16), name=None): """Strided block local self-attention. Each position in a query block can attend to all the generated queries in the query block, which are generated in raster scan, and positions that are generated to the left and top. The shapes are specified by query shape and memory flange. Note that if you're using this function, you do not need to right shift. Right shifting happens inside this function separately for each block. Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. query_shape = block_shape memory_flange: an integer indicating how much to look in height and width from each query block. memory shape = query_shape + (block_flange[0], 2*block_flange[1]) name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v] """ with tf.variable_scope( name, default_name="local_masked_self_attention_2d", values=[q, k, v]): v_shape = common_layers.shape_list(v) # Pad query to ensure multiple of corresponding lengths. q = pad_to_multiple_2d(q, query_shape) # Set up query blocks. q_indices = gather_indices_2d(q, query_shape, query_shape) q_new = gather_blocks_2d(q, q_indices) # Set up key and value blocks. k_flange, k_center = get_memory_region(k, query_shape, memory_flange, q_indices) v_flange, v_center = get_memory_region(v, query_shape, memory_flange, q_indices) if k_flange is not None: k_new = tf.concat([k_flange, k_center], axis=3) v_new = tf.concat([v_flange, v_center], axis=3) else: k_new = k_center v_new = v_center # Set up the masks. query_elements = np.prod(query_shape) padding_mask = None if k_flange is not None: padding_mask = tf.expand_dims( embedding_to_padding(k_flange) * -1e9, axis=-2) padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1]) center_attention_bias = attention_bias_lower_triangle( np.prod(query_elements)) center_attention_bias = tf.reshape( center_attention_bias, [1, 1, 1, query_elements, query_elements]) v_center_shape = common_layers.shape_list(v_center) center_attention_bias = tf.tile( center_attention_bias, [v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1]) if padding_mask is not None: # Combine the mask for padding and visible region. attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4) else: attention_bias = center_attention_bias output = dot_product_attention( q_new, k_new, v_new, attention_bias, dropout_rate=0., name="masked_local_2d", make_image_summary=False) # Put representations back into original shapes. padded_q_shape = common_layers.shape_list(q) output = scatter_blocks_2d(output, q_indices, padded_q_shape) # Remove the padding if introduced. output = tf.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1]) return output
[ "def", "masked_local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "(", "8", ",", "16", ")", ",", "memory_flange", "=", "(", "8", ",", "16", ")", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"local_masked_self_attention_2d\"", ",", "values", "=", "[", "q", ",", "k", ",", "v", "]", ")", ":", "v_shape", "=", "common_layers", ".", "shape_list", "(", "v", ")", "# Pad query to ensure multiple of corresponding lengths.", "q", "=", "pad_to_multiple_2d", "(", "q", ",", "query_shape", ")", "# Set up query blocks.", "q_indices", "=", "gather_indices_2d", "(", "q", ",", "query_shape", ",", "query_shape", ")", "q_new", "=", "gather_blocks_2d", "(", "q", ",", "q_indices", ")", "# Set up key and value blocks.", "k_flange", ",", "k_center", "=", "get_memory_region", "(", "k", ",", "query_shape", ",", "memory_flange", ",", "q_indices", ")", "v_flange", ",", "v_center", "=", "get_memory_region", "(", "v", ",", "query_shape", ",", "memory_flange", ",", "q_indices", ")", "if", "k_flange", "is", "not", "None", ":", "k_new", "=", "tf", ".", "concat", "(", "[", "k_flange", ",", "k_center", "]", ",", "axis", "=", "3", ")", "v_new", "=", "tf", ".", "concat", "(", "[", "v_flange", ",", "v_center", "]", ",", "axis", "=", "3", ")", "else", ":", "k_new", "=", "k_center", "v_new", "=", "v_center", "# Set up the masks.", "query_elements", "=", "np", ".", "prod", "(", "query_shape", ")", "padding_mask", "=", "None", "if", "k_flange", "is", "not", "None", ":", "padding_mask", "=", "tf", ".", "expand_dims", "(", "embedding_to_padding", "(", "k_flange", ")", "*", "-", "1e9", ",", "axis", "=", "-", "2", ")", "padding_mask", "=", "tf", ".", "tile", "(", "padding_mask", ",", "[", "1", ",", "1", ",", "1", ",", "query_elements", ",", "1", "]", ")", "center_attention_bias", "=", "attention_bias_lower_triangle", "(", "np", ".", "prod", "(", "query_elements", ")", ")", "center_attention_bias", "=", "tf", ".", "reshape", "(", "center_attention_bias", ",", "[", "1", ",", "1", ",", "1", ",", "query_elements", ",", "query_elements", "]", ")", "v_center_shape", "=", "common_layers", ".", "shape_list", "(", "v_center", ")", "center_attention_bias", "=", "tf", ".", "tile", "(", "center_attention_bias", ",", "[", "v_center_shape", "[", "0", "]", ",", "v_center_shape", "[", "1", "]", ",", "v_center_shape", "[", "2", "]", ",", "1", ",", "1", "]", ")", "if", "padding_mask", "is", "not", "None", ":", "# Combine the mask for padding and visible region.", "attention_bias", "=", "tf", ".", "concat", "(", "[", "padding_mask", ",", "center_attention_bias", "]", ",", "axis", "=", "4", ")", "else", ":", "attention_bias", "=", "center_attention_bias", "output", "=", "dot_product_attention", "(", "q_new", ",", "k_new", ",", "v_new", ",", "attention_bias", ",", "dropout_rate", "=", "0.", ",", "name", "=", "\"masked_local_2d\"", ",", "make_image_summary", "=", "False", ")", "# Put representations back into original shapes.", "padded_q_shape", "=", "common_layers", ".", "shape_list", "(", "q", ")", "output", "=", "scatter_blocks_2d", "(", "output", ",", "q_indices", ",", "padded_q_shape", ")", "# Remove the padding if introduced.", "output", "=", "tf", ".", "slice", "(", "output", ",", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "[", "-", "1", ",", "-", "1", ",", "v_shape", "[", "2", "]", ",", "v_shape", "[", "3", "]", ",", "-", "1", "]", ")", "return", "output" ]
Strided block local self-attention. Each position in a query block can attend to all the generated queries in the query block, which are generated in raster scan, and positions that are generated to the left and top. The shapes are specified by query shape and memory flange. Note that if you're using this function, you do not need to right shift. Right shifting happens inside this function separately for each block. Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. query_shape = block_shape memory_flange: an integer indicating how much to look in height and width from each query block. memory shape = query_shape + (block_flange[0], 2*block_flange[1]) name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v]
[ "Strided", "block", "local", "self", "-", "attention", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3760-L3850
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
compute_attention_component
def compute_attention_component(antecedent, total_depth, filter_width=1, padding="VALID", name="c", vars_3d_num_heads=0, layer_collection=None): """Computes attention compoenent (query, key or value). Args: antecedent: a Tensor with shape [batch, length, channels] total_depth: an integer filter_width: An integer specifying how wide you want the attention component to be. padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. name: a string specifying scope name. vars_3d_num_heads: an optional integer (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: c : [batch, length, depth] tensor """ if layer_collection is not None: if filter_width != 1 or vars_3d_num_heads != 0: raise ValueError( "KFAC implementation only supports filter_width=1 (actual: {}) and " "vars_3d_num_heads=0 (actual: {}).".format( filter_width, vars_3d_num_heads)) if vars_3d_num_heads > 0: assert filter_width == 1 input_depth = antecedent.get_shape().as_list()[-1] depth_per_head = total_depth // vars_3d_num_heads initializer_stddev = input_depth ** -0.5 if "q" in name: initializer_stddev *= depth_per_head ** -0.5 var = tf.get_variable( name, [input_depth, vars_3d_num_heads, total_depth // vars_3d_num_heads], initializer=tf.random_normal_initializer(stddev=initializer_stddev)) var = tf.cast(var, antecedent.dtype) var = tf.reshape(var, [input_depth, total_depth]) return tf.tensordot(antecedent, var, axes=1) if filter_width == 1: return common_layers.dense( antecedent, total_depth, use_bias=False, name=name, layer_collection=layer_collection) else: return common_layers.conv1d( antecedent, total_depth, filter_width, padding=padding, name=name)
python
def compute_attention_component(antecedent, total_depth, filter_width=1, padding="VALID", name="c", vars_3d_num_heads=0, layer_collection=None): """Computes attention compoenent (query, key or value). Args: antecedent: a Tensor with shape [batch, length, channels] total_depth: an integer filter_width: An integer specifying how wide you want the attention component to be. padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. name: a string specifying scope name. vars_3d_num_heads: an optional integer (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: c : [batch, length, depth] tensor """ if layer_collection is not None: if filter_width != 1 or vars_3d_num_heads != 0: raise ValueError( "KFAC implementation only supports filter_width=1 (actual: {}) and " "vars_3d_num_heads=0 (actual: {}).".format( filter_width, vars_3d_num_heads)) if vars_3d_num_heads > 0: assert filter_width == 1 input_depth = antecedent.get_shape().as_list()[-1] depth_per_head = total_depth // vars_3d_num_heads initializer_stddev = input_depth ** -0.5 if "q" in name: initializer_stddev *= depth_per_head ** -0.5 var = tf.get_variable( name, [input_depth, vars_3d_num_heads, total_depth // vars_3d_num_heads], initializer=tf.random_normal_initializer(stddev=initializer_stddev)) var = tf.cast(var, antecedent.dtype) var = tf.reshape(var, [input_depth, total_depth]) return tf.tensordot(antecedent, var, axes=1) if filter_width == 1: return common_layers.dense( antecedent, total_depth, use_bias=False, name=name, layer_collection=layer_collection) else: return common_layers.conv1d( antecedent, total_depth, filter_width, padding=padding, name=name)
[ "def", "compute_attention_component", "(", "antecedent", ",", "total_depth", ",", "filter_width", "=", "1", ",", "padding", "=", "\"VALID\"", ",", "name", "=", "\"c\"", ",", "vars_3d_num_heads", "=", "0", ",", "layer_collection", "=", "None", ")", ":", "if", "layer_collection", "is", "not", "None", ":", "if", "filter_width", "!=", "1", "or", "vars_3d_num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"KFAC implementation only supports filter_width=1 (actual: {}) and \"", "\"vars_3d_num_heads=0 (actual: {}).\"", ".", "format", "(", "filter_width", ",", "vars_3d_num_heads", ")", ")", "if", "vars_3d_num_heads", ">", "0", ":", "assert", "filter_width", "==", "1", "input_depth", "=", "antecedent", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", "depth_per_head", "=", "total_depth", "//", "vars_3d_num_heads", "initializer_stddev", "=", "input_depth", "**", "-", "0.5", "if", "\"q\"", "in", "name", ":", "initializer_stddev", "*=", "depth_per_head", "**", "-", "0.5", "var", "=", "tf", ".", "get_variable", "(", "name", ",", "[", "input_depth", ",", "vars_3d_num_heads", ",", "total_depth", "//", "vars_3d_num_heads", "]", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "initializer_stddev", ")", ")", "var", "=", "tf", ".", "cast", "(", "var", ",", "antecedent", ".", "dtype", ")", "var", "=", "tf", ".", "reshape", "(", "var", ",", "[", "input_depth", ",", "total_depth", "]", ")", "return", "tf", ".", "tensordot", "(", "antecedent", ",", "var", ",", "axes", "=", "1", ")", "if", "filter_width", "==", "1", ":", "return", "common_layers", ".", "dense", "(", "antecedent", ",", "total_depth", ",", "use_bias", "=", "False", ",", "name", "=", "name", ",", "layer_collection", "=", "layer_collection", ")", "else", ":", "return", "common_layers", ".", "conv1d", "(", "antecedent", ",", "total_depth", ",", "filter_width", ",", "padding", "=", "padding", ",", "name", "=", "name", ")" ]
Computes attention compoenent (query, key or value). Args: antecedent: a Tensor with shape [batch, length, channels] total_depth: an integer filter_width: An integer specifying how wide you want the attention component to be. padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. name: a string specifying scope name. vars_3d_num_heads: an optional integer (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: c : [batch, length, depth] tensor
[ "Computes", "attention", "compoenent", "(", "query", "key", "or", "value", ")", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3853-L3903
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
compute_qkv
def compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", vars_3d_num_heads=0, layer_collection=None): """Computes query, key and value. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. vars_3d_num_heads: an optional (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: q, k, v : [batch, length, depth] tensors """ if memory_antecedent is None: memory_antecedent = query_antecedent q = compute_attention_component( query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) k = compute_attention_component( memory_antecedent, total_key_depth, kv_filter_width, kv_padding, "k", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) v = compute_attention_component( memory_antecedent, total_value_depth, kv_filter_width, kv_padding, "v", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) return q, k, v
python
def compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", vars_3d_num_heads=0, layer_collection=None): """Computes query, key and value. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. vars_3d_num_heads: an optional (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: q, k, v : [batch, length, depth] tensors """ if memory_antecedent is None: memory_antecedent = query_antecedent q = compute_attention_component( query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) k = compute_attention_component( memory_antecedent, total_key_depth, kv_filter_width, kv_padding, "k", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) v = compute_attention_component( memory_antecedent, total_value_depth, kv_filter_width, kv_padding, "v", vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) return q, k, v
[ "def", "compute_qkv", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ",", "q_filter_width", "=", "1", ",", "kv_filter_width", "=", "1", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ",", "vars_3d_num_heads", "=", "0", ",", "layer_collection", "=", "None", ")", ":", "if", "memory_antecedent", "is", "None", ":", "memory_antecedent", "=", "query_antecedent", "q", "=", "compute_attention_component", "(", "query_antecedent", ",", "total_key_depth", ",", "q_filter_width", ",", "q_padding", ",", "\"q\"", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ",", "layer_collection", "=", "layer_collection", ")", "k", "=", "compute_attention_component", "(", "memory_antecedent", ",", "total_key_depth", ",", "kv_filter_width", ",", "kv_padding", ",", "\"k\"", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ",", "layer_collection", "=", "layer_collection", ")", "v", "=", "compute_attention_component", "(", "memory_antecedent", ",", "total_value_depth", ",", "kv_filter_width", ",", "kv_padding", ",", "\"v\"", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ",", "layer_collection", "=", "layer_collection", ")", "return", "q", ",", "k", ",", "v" ]
Computes query, key and value. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] total_key_depth: an integer total_value_depth: an integer q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. vars_3d_num_heads: an optional (if we want to use 3d variables) layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: q, k, v : [batch, length, depth] tensors
[ "Computes", "query", "key", "and", "value", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3906-L3961
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
multihead_attention
def multihead_attention(query_antecedent, memory_antecedent, bias, total_key_depth, total_value_depth, output_depth, num_heads, dropout_rate, attention_type="dot_product", max_relative_position=None, heads_share_relative_embedding=False, add_relative_to_values=False, image_shapes=None, block_length=128, block_width=128, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", cache=None, gap_size=0, num_memory_blocks=2, name="multihead_attention", save_weights_to=None, make_image_summary=True, dropout_broadcast_dims=None, vars_3d=False, layer_collection=None, recurrent_memory=None, chunk_number=None, hard_attention_k=0, max_area_width=1, max_area_height=1, memory_height=1, area_key_mode="mean", area_value_mode="sum", training=True, **kwargs): """Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) vars_3d_num_heads = num_heads if vars_3d else 0 if layer_collection is not None: if cache is not None: raise ValueError("KFAC implementation only supports cache is None.") if vars_3d: raise ValueError("KFAC implementation does not support 3d vars.") if recurrent_memory is not None: if memory_antecedent is not None: raise ValueError("Recurrent memory requires memory_antecedent is None.") if cache is not None: raise ValueError("Cache is not supported when using recurrent memory.") if vars_3d: raise ValueError("3d vars are not supported when using recurrent memory.") if layer_collection is not None: raise ValueError("KFAC is not supported when using recurrent memory.") if chunk_number is None: raise ValueError("chunk_number is required when using recurrent memory.") with tf.variable_scope(name, default_name="multihead_attention", values=[query_antecedent, memory_antecedent]): if recurrent_memory is not None: ( recurrent_memory_transaction, query_antecedent, memory_antecedent, bias, ) = recurrent_memory.pre_attention( chunk_number, query_antecedent, memory_antecedent, bias, ) if cache is None or memory_antecedent is None: q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width, kv_filter_width, q_padding, kv_padding, vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) if cache is not None: if attention_type not in ["dot_product", "dot_product_relative"]: # TODO(petershaw): Support caching when using relative position # representations, i.e. "dot_product_relative" attention. raise NotImplementedError( "Caching is not guaranteed to work with attention types other than" " dot_product.") if bias is None: raise ValueError("Bias required for caching. See function docstring " "for details.") if memory_antecedent is not None: # Encoder-Decoder Attention Cache q = compute_attention_component(query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads) k = cache["k_encdec"] v = cache["v_encdec"] else: k = split_heads(k, num_heads) v = split_heads(v, num_heads) decode_loop_step = kwargs.get("decode_loop_step") if decode_loop_step is None: k = cache["k"] = tf.concat([cache["k"], k], axis=2) v = cache["v"] = tf.concat([cache["v"], v], axis=2) else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. # The performance of current implementation is better than updating # the tensor by adding the result of matmul(one_hot, # update_in_current_step) tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) tmp_k = inplace_ops.alias_inplace_update( tmp_k, decode_loop_step, tf.squeeze(k, axis=2)) k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) tmp_v = inplace_ops.alias_inplace_update( tmp_v, decode_loop_step, tf.squeeze(v, axis=2)) v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) q = split_heads(q, num_heads) if cache is None: k = split_heads(k, num_heads) v = split_heads(v, num_heads) key_depth_per_head = total_key_depth // num_heads if not vars_3d: q *= key_depth_per_head**-0.5 additional_returned_value = None if callable(attention_type): # Generic way to extend multihead_attention x = attention_type(q, k, v, **kwargs) if isinstance(x, tuple): x, additional_returned_value = x # Unpack elif attention_type == "dot_product": if max_area_width > 1 or max_area_height > 1: x = area_attention.dot_product_area_attention( q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, dropout_broadcast_dims=dropout_broadcast_dims, max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=area_key_mode, area_value_mode=area_value_mode, training=training) else: x = dot_product_attention(q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, activation_dtype=kwargs.get( "activation_dtype"), hard_attention_k=hard_attention_k) elif attention_type == "dot_product_relative": x = dot_product_attention_relative( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, cache=cache is not None, allow_memory=recurrent_memory is not None, hard_attention_k=hard_attention_k) elif attention_type == "dot_product_unmasked_relative_v2": x = dot_product_unmasked_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "dot_product_relative_v2": x = dot_product_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "local_within_block_mask_right": x = masked_within_block_local_attention_1d( q, k, v, block_length=block_length) elif attention_type == "local_relative_mask_right": x = masked_relative_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary, dropout_rate=dropout_rate, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values, name="masked_relative_local_attention_1d") elif attention_type == "local_mask_right": x = masked_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary) elif attention_type == "local_unmasked": x = local_attention_1d( q, k, v, block_length=block_length, filter_width=block_width) elif attention_type == "masked_dilated_1d": x = masked_dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) else: assert attention_type == "unmasked_dilated_1d" x = dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) x = combine_heads(x) # Set last dim specifically. x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) if vars_3d: o_var = tf.get_variable( "o", [num_heads, total_value_depth // num_heads, output_depth]) o_var = tf.cast(o_var, x.dtype) o_var = tf.reshape(o_var, [total_value_depth, output_depth]) x = tf.tensordot(x, o_var, axes=1) else: x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform", layer_collection=layer_collection) if recurrent_memory is not None: x = recurrent_memory.post_attention(recurrent_memory_transaction, x) if additional_returned_value is not None: return x, additional_returned_value return x
python
def multihead_attention(query_antecedent, memory_antecedent, bias, total_key_depth, total_value_depth, output_depth, num_heads, dropout_rate, attention_type="dot_product", max_relative_position=None, heads_share_relative_embedding=False, add_relative_to_values=False, image_shapes=None, block_length=128, block_width=128, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", cache=None, gap_size=0, num_memory_blocks=2, name="multihead_attention", save_weights_to=None, make_image_summary=True, dropout_broadcast_dims=None, vars_3d=False, layer_collection=None, recurrent_memory=None, chunk_number=None, hard_attention_k=0, max_area_width=1, max_area_height=1, memory_height=1, area_key_mode="mean", area_value_mode="sum", training=True, **kwargs): """Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) vars_3d_num_heads = num_heads if vars_3d else 0 if layer_collection is not None: if cache is not None: raise ValueError("KFAC implementation only supports cache is None.") if vars_3d: raise ValueError("KFAC implementation does not support 3d vars.") if recurrent_memory is not None: if memory_antecedent is not None: raise ValueError("Recurrent memory requires memory_antecedent is None.") if cache is not None: raise ValueError("Cache is not supported when using recurrent memory.") if vars_3d: raise ValueError("3d vars are not supported when using recurrent memory.") if layer_collection is not None: raise ValueError("KFAC is not supported when using recurrent memory.") if chunk_number is None: raise ValueError("chunk_number is required when using recurrent memory.") with tf.variable_scope(name, default_name="multihead_attention", values=[query_antecedent, memory_antecedent]): if recurrent_memory is not None: ( recurrent_memory_transaction, query_antecedent, memory_antecedent, bias, ) = recurrent_memory.pre_attention( chunk_number, query_antecedent, memory_antecedent, bias, ) if cache is None or memory_antecedent is None: q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width, kv_filter_width, q_padding, kv_padding, vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) if cache is not None: if attention_type not in ["dot_product", "dot_product_relative"]: # TODO(petershaw): Support caching when using relative position # representations, i.e. "dot_product_relative" attention. raise NotImplementedError( "Caching is not guaranteed to work with attention types other than" " dot_product.") if bias is None: raise ValueError("Bias required for caching. See function docstring " "for details.") if memory_antecedent is not None: # Encoder-Decoder Attention Cache q = compute_attention_component(query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads) k = cache["k_encdec"] v = cache["v_encdec"] else: k = split_heads(k, num_heads) v = split_heads(v, num_heads) decode_loop_step = kwargs.get("decode_loop_step") if decode_loop_step is None: k = cache["k"] = tf.concat([cache["k"], k], axis=2) v = cache["v"] = tf.concat([cache["v"], v], axis=2) else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. # The performance of current implementation is better than updating # the tensor by adding the result of matmul(one_hot, # update_in_current_step) tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) tmp_k = inplace_ops.alias_inplace_update( tmp_k, decode_loop_step, tf.squeeze(k, axis=2)) k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) tmp_v = inplace_ops.alias_inplace_update( tmp_v, decode_loop_step, tf.squeeze(v, axis=2)) v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) q = split_heads(q, num_heads) if cache is None: k = split_heads(k, num_heads) v = split_heads(v, num_heads) key_depth_per_head = total_key_depth // num_heads if not vars_3d: q *= key_depth_per_head**-0.5 additional_returned_value = None if callable(attention_type): # Generic way to extend multihead_attention x = attention_type(q, k, v, **kwargs) if isinstance(x, tuple): x, additional_returned_value = x # Unpack elif attention_type == "dot_product": if max_area_width > 1 or max_area_height > 1: x = area_attention.dot_product_area_attention( q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, dropout_broadcast_dims=dropout_broadcast_dims, max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=area_key_mode, area_value_mode=area_value_mode, training=training) else: x = dot_product_attention(q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, activation_dtype=kwargs.get( "activation_dtype"), hard_attention_k=hard_attention_k) elif attention_type == "dot_product_relative": x = dot_product_attention_relative( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, cache=cache is not None, allow_memory=recurrent_memory is not None, hard_attention_k=hard_attention_k) elif attention_type == "dot_product_unmasked_relative_v2": x = dot_product_unmasked_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "dot_product_relative_v2": x = dot_product_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "local_within_block_mask_right": x = masked_within_block_local_attention_1d( q, k, v, block_length=block_length) elif attention_type == "local_relative_mask_right": x = masked_relative_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary, dropout_rate=dropout_rate, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values, name="masked_relative_local_attention_1d") elif attention_type == "local_mask_right": x = masked_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary) elif attention_type == "local_unmasked": x = local_attention_1d( q, k, v, block_length=block_length, filter_width=block_width) elif attention_type == "masked_dilated_1d": x = masked_dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) else: assert attention_type == "unmasked_dilated_1d" x = dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) x = combine_heads(x) # Set last dim specifically. x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) if vars_3d: o_var = tf.get_variable( "o", [num_heads, total_value_depth // num_heads, output_depth]) o_var = tf.cast(o_var, x.dtype) o_var = tf.reshape(o_var, [total_value_depth, output_depth]) x = tf.tensordot(x, o_var, axes=1) else: x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform", layer_collection=layer_collection) if recurrent_memory is not None: x = recurrent_memory.post_attention(recurrent_memory_transaction, x) if additional_returned_value is not None: return x, additional_returned_value return x
[ "def", "multihead_attention", "(", "query_antecedent", ",", "memory_antecedent", ",", "bias", ",", "total_key_depth", ",", "total_value_depth", ",", "output_depth", ",", "num_heads", ",", "dropout_rate", ",", "attention_type", "=", "\"dot_product\"", ",", "max_relative_position", "=", "None", ",", "heads_share_relative_embedding", "=", "False", ",", "add_relative_to_values", "=", "False", ",", "image_shapes", "=", "None", ",", "block_length", "=", "128", ",", "block_width", "=", "128", ",", "q_filter_width", "=", "1", ",", "kv_filter_width", "=", "1", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ",", "cache", "=", "None", ",", "gap_size", "=", "0", ",", "num_memory_blocks", "=", "2", ",", "name", "=", "\"multihead_attention\"", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ",", "dropout_broadcast_dims", "=", "None", ",", "vars_3d", "=", "False", ",", "layer_collection", "=", "None", ",", "recurrent_memory", "=", "None", ",", "chunk_number", "=", "None", ",", "hard_attention_k", "=", "0", ",", "max_area_width", "=", "1", ",", "max_area_height", "=", "1", ",", "memory_height", "=", "1", ",", "area_key_mode", "=", "\"mean\"", ",", "area_value_mode", "=", "\"sum\"", ",", "training", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "total_key_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Key depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_key_depth", ",", "num_heads", ")", ")", "if", "total_value_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Value depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_value_depth", ",", "num_heads", ")", ")", "vars_3d_num_heads", "=", "num_heads", "if", "vars_3d", "else", "0", "if", "layer_collection", "is", "not", "None", ":", "if", "cache", "is", "not", "None", ":", "raise", "ValueError", "(", "\"KFAC implementation only supports cache is None.\"", ")", "if", "vars_3d", ":", "raise", "ValueError", "(", "\"KFAC implementation does not support 3d vars.\"", ")", "if", "recurrent_memory", "is", "not", "None", ":", "if", "memory_antecedent", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Recurrent memory requires memory_antecedent is None.\"", ")", "if", "cache", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cache is not supported when using recurrent memory.\"", ")", "if", "vars_3d", ":", "raise", "ValueError", "(", "\"3d vars are not supported when using recurrent memory.\"", ")", "if", "layer_collection", "is", "not", "None", ":", "raise", "ValueError", "(", "\"KFAC is not supported when using recurrent memory.\"", ")", "if", "chunk_number", "is", "None", ":", "raise", "ValueError", "(", "\"chunk_number is required when using recurrent memory.\"", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"multihead_attention\"", ",", "values", "=", "[", "query_antecedent", ",", "memory_antecedent", "]", ")", ":", "if", "recurrent_memory", "is", "not", "None", ":", "(", "recurrent_memory_transaction", ",", "query_antecedent", ",", "memory_antecedent", ",", "bias", ",", ")", "=", "recurrent_memory", ".", "pre_attention", "(", "chunk_number", ",", "query_antecedent", ",", "memory_antecedent", ",", "bias", ",", ")", "if", "cache", "is", "None", "or", "memory_antecedent", "is", "None", ":", "q", ",", "k", ",", "v", "=", "compute_qkv", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ",", "q_filter_width", ",", "kv_filter_width", ",", "q_padding", ",", "kv_padding", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ",", "layer_collection", "=", "layer_collection", ")", "if", "cache", "is", "not", "None", ":", "if", "attention_type", "not", "in", "[", "\"dot_product\"", ",", "\"dot_product_relative\"", "]", ":", "# TODO(petershaw): Support caching when using relative position", "# representations, i.e. \"dot_product_relative\" attention.", "raise", "NotImplementedError", "(", "\"Caching is not guaranteed to work with attention types other than\"", "\" dot_product.\"", ")", "if", "bias", "is", "None", ":", "raise", "ValueError", "(", "\"Bias required for caching. See function docstring \"", "\"for details.\"", ")", "if", "memory_antecedent", "is", "not", "None", ":", "# Encoder-Decoder Attention Cache", "q", "=", "compute_attention_component", "(", "query_antecedent", ",", "total_key_depth", ",", "q_filter_width", ",", "q_padding", ",", "\"q\"", ",", "vars_3d_num_heads", "=", "vars_3d_num_heads", ")", "k", "=", "cache", "[", "\"k_encdec\"", "]", "v", "=", "cache", "[", "\"v_encdec\"", "]", "else", ":", "k", "=", "split_heads", "(", "k", ",", "num_heads", ")", "v", "=", "split_heads", "(", "v", ",", "num_heads", ")", "decode_loop_step", "=", "kwargs", ".", "get", "(", "\"decode_loop_step\"", ")", "if", "decode_loop_step", "is", "None", ":", "k", "=", "cache", "[", "\"k\"", "]", "=", "tf", ".", "concat", "(", "[", "cache", "[", "\"k\"", "]", ",", "k", "]", ",", "axis", "=", "2", ")", "v", "=", "cache", "[", "\"v\"", "]", "=", "tf", ".", "concat", "(", "[", "cache", "[", "\"v\"", "]", ",", "v", "]", ",", "axis", "=", "2", ")", "else", ":", "# Inplace update is required for inference on TPU.", "# Inplace_ops only supports inplace_update on the first dimension.", "# The performance of current implementation is better than updating", "# the tensor by adding the result of matmul(one_hot,", "# update_in_current_step)", "tmp_k", "=", "tf", ".", "transpose", "(", "cache", "[", "\"k\"", "]", ",", "perm", "=", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "tmp_k", "=", "inplace_ops", ".", "alias_inplace_update", "(", "tmp_k", ",", "decode_loop_step", ",", "tf", ".", "squeeze", "(", "k", ",", "axis", "=", "2", ")", ")", "k", "=", "cache", "[", "\"k\"", "]", "=", "tf", ".", "transpose", "(", "tmp_k", ",", "perm", "=", "[", "1", ",", "2", ",", "0", ",", "3", "]", ")", "tmp_v", "=", "tf", ".", "transpose", "(", "cache", "[", "\"v\"", "]", ",", "perm", "=", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "tmp_v", "=", "inplace_ops", ".", "alias_inplace_update", "(", "tmp_v", ",", "decode_loop_step", ",", "tf", ".", "squeeze", "(", "v", ",", "axis", "=", "2", ")", ")", "v", "=", "cache", "[", "\"v\"", "]", "=", "tf", ".", "transpose", "(", "tmp_v", ",", "perm", "=", "[", "1", ",", "2", ",", "0", ",", "3", "]", ")", "q", "=", "split_heads", "(", "q", ",", "num_heads", ")", "if", "cache", "is", "None", ":", "k", "=", "split_heads", "(", "k", ",", "num_heads", ")", "v", "=", "split_heads", "(", "v", ",", "num_heads", ")", "key_depth_per_head", "=", "total_key_depth", "//", "num_heads", "if", "not", "vars_3d", ":", "q", "*=", "key_depth_per_head", "**", "-", "0.5", "additional_returned_value", "=", "None", "if", "callable", "(", "attention_type", ")", ":", "# Generic way to extend multihead_attention", "x", "=", "attention_type", "(", "q", ",", "k", ",", "v", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "x", ",", "tuple", ")", ":", "x", ",", "additional_returned_value", "=", "x", "# Unpack", "elif", "attention_type", "==", "\"dot_product\"", ":", "if", "max_area_width", ">", "1", "or", "max_area_height", ">", "1", ":", "x", "=", "area_attention", ".", "dot_product_area_attention", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "dropout_rate", ",", "image_shapes", ",", "save_weights_to", "=", "save_weights_to", ",", "dropout_broadcast_dims", "=", "dropout_broadcast_dims", ",", "max_area_width", "=", "max_area_width", ",", "max_area_height", "=", "max_area_height", ",", "memory_height", "=", "memory_height", ",", "area_key_mode", "=", "area_key_mode", ",", "area_value_mode", "=", "area_value_mode", ",", "training", "=", "training", ")", "else", ":", "x", "=", "dot_product_attention", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "dropout_rate", ",", "image_shapes", ",", "save_weights_to", "=", "save_weights_to", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "dropout_broadcast_dims", ",", "activation_dtype", "=", "kwargs", ".", "get", "(", "\"activation_dtype\"", ")", ",", "hard_attention_k", "=", "hard_attention_k", ")", "elif", "attention_type", "==", "\"dot_product_relative\"", ":", "x", "=", "dot_product_attention_relative", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", ",", "dropout_rate", ",", "image_shapes", ",", "save_weights_to", "=", "save_weights_to", ",", "make_image_summary", "=", "make_image_summary", ",", "cache", "=", "cache", "is", "not", "None", ",", "allow_memory", "=", "recurrent_memory", "is", "not", "None", ",", "hard_attention_k", "=", "hard_attention_k", ")", "elif", "attention_type", "==", "\"dot_product_unmasked_relative_v2\"", ":", "x", "=", "dot_product_unmasked_self_attention_relative_v2", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", ",", "dropout_rate", ",", "image_shapes", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "dropout_broadcast_dims", ",", "heads_share_relative_embedding", "=", "heads_share_relative_embedding", ",", "add_relative_to_values", "=", "add_relative_to_values", ")", "elif", "attention_type", "==", "\"dot_product_relative_v2\"", ":", "x", "=", "dot_product_self_attention_relative_v2", "(", "q", ",", "k", ",", "v", ",", "bias", ",", "max_relative_position", ",", "dropout_rate", ",", "image_shapes", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_broadcast_dims", "=", "dropout_broadcast_dims", ",", "heads_share_relative_embedding", "=", "heads_share_relative_embedding", ",", "add_relative_to_values", "=", "add_relative_to_values", ")", "elif", "attention_type", "==", "\"local_within_block_mask_right\"", ":", "x", "=", "masked_within_block_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "block_length", ")", "elif", "attention_type", "==", "\"local_relative_mask_right\"", ":", "x", "=", "masked_relative_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "block_length", ",", "make_image_summary", "=", "make_image_summary", ",", "dropout_rate", "=", "dropout_rate", ",", "heads_share_relative_embedding", "=", "heads_share_relative_embedding", ",", "add_relative_to_values", "=", "add_relative_to_values", ",", "name", "=", "\"masked_relative_local_attention_1d\"", ")", "elif", "attention_type", "==", "\"local_mask_right\"", ":", "x", "=", "masked_local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "block_length", ",", "make_image_summary", "=", "make_image_summary", ")", "elif", "attention_type", "==", "\"local_unmasked\"", ":", "x", "=", "local_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", "=", "block_length", ",", "filter_width", "=", "block_width", ")", "elif", "attention_type", "==", "\"masked_dilated_1d\"", ":", "x", "=", "masked_dilated_self_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", ",", "block_width", ",", "gap_size", ",", "num_memory_blocks", ")", "else", ":", "assert", "attention_type", "==", "\"unmasked_dilated_1d\"", "x", "=", "dilated_self_attention_1d", "(", "q", ",", "k", ",", "v", ",", "block_length", ",", "block_width", ",", "gap_size", ",", "num_memory_blocks", ")", "x", "=", "combine_heads", "(", "x", ")", "# Set last dim specifically.", "x", ".", "set_shape", "(", "x", ".", "shape", ".", "as_list", "(", ")", "[", ":", "-", "1", "]", "+", "[", "total_value_depth", "]", ")", "if", "vars_3d", ":", "o_var", "=", "tf", ".", "get_variable", "(", "\"o\"", ",", "[", "num_heads", ",", "total_value_depth", "//", "num_heads", ",", "output_depth", "]", ")", "o_var", "=", "tf", ".", "cast", "(", "o_var", ",", "x", ".", "dtype", ")", "o_var", "=", "tf", ".", "reshape", "(", "o_var", ",", "[", "total_value_depth", ",", "output_depth", "]", ")", "x", "=", "tf", ".", "tensordot", "(", "x", ",", "o_var", ",", "axes", "=", "1", ")", "else", ":", "x", "=", "common_layers", ".", "dense", "(", "x", ",", "output_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"output_transform\"", ",", "layer_collection", "=", "layer_collection", ")", "if", "recurrent_memory", "is", "not", "None", ":", "x", "=", "recurrent_memory", ".", "post_attention", "(", "recurrent_memory_transaction", ",", "x", ")", "if", "additional_returned_value", "is", "not", "None", ":", "return", "x", ",", "additional_returned_value", "return", "x" ]
Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
[ "Multihead", "scaled", "-", "dot", "-", "product", "attention", "with", "input", "/", "output", "transformations", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3964-L4299
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
multihead_attention_2d
def multihead_attention_2d(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, attention_type="local_attention_2d", query_shape=(8, 16), memory_flange=(8, 16), name=None): """2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) with tf.variable_scope( name, default_name="multihead_attention_2d", values=[query_antecedent, memory_antecedent]): q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth) # after splitting, shape is [batch, heads, h, w, depth] q = split_heads_2d(q, num_heads) k = split_heads_2d(k, num_heads) v = split_heads_2d(v, num_heads) key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 if attention_type == "local_attention_2d": x = local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) elif attention_type == "masked_local_attention_2d": assert attention_type == "masked_local_attention_2d" x = masked_local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) else: assert attention_type == "unmasked_local_attention_2d_tpu" x = dot_product_unmasked_attention_local_2d_tpu( q, k, v, None, max_relative_position=None, query_shape=query_shape) x = combine_heads_2d(x) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
python
def multihead_attention_2d(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, attention_type="local_attention_2d", query_shape=(8, 16), memory_flange=(8, 16), name=None): """2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) with tf.variable_scope( name, default_name="multihead_attention_2d", values=[query_antecedent, memory_antecedent]): q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth) # after splitting, shape is [batch, heads, h, w, depth] q = split_heads_2d(q, num_heads) k = split_heads_2d(k, num_heads) v = split_heads_2d(v, num_heads) key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 if attention_type == "local_attention_2d": x = local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) elif attention_type == "masked_local_attention_2d": assert attention_type == "masked_local_attention_2d" x = masked_local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) else: assert attention_type == "unmasked_local_attention_2d_tpu" x = dot_product_unmasked_attention_local_2d_tpu( q, k, v, None, max_relative_position=None, query_shape=query_shape) x = combine_heads_2d(x) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
[ "def", "multihead_attention_2d", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ",", "output_depth", ",", "num_heads", ",", "attention_type", "=", "\"local_attention_2d\"", ",", "query_shape", "=", "(", "8", ",", "16", ")", ",", "memory_flange", "=", "(", "8", ",", "16", ")", ",", "name", "=", "None", ")", ":", "if", "total_key_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Key depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_key_depth", ",", "num_heads", ")", ")", "if", "total_value_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Value depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_value_depth", ",", "num_heads", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"multihead_attention_2d\"", ",", "values", "=", "[", "query_antecedent", ",", "memory_antecedent", "]", ")", ":", "q", ",", "k", ",", "v", "=", "compute_qkv", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ")", "# after splitting, shape is [batch, heads, h, w, depth]", "q", "=", "split_heads_2d", "(", "q", ",", "num_heads", ")", "k", "=", "split_heads_2d", "(", "k", ",", "num_heads", ")", "v", "=", "split_heads_2d", "(", "v", ",", "num_heads", ")", "key_depth_per_head", "=", "total_key_depth", "//", "num_heads", "q", "*=", "key_depth_per_head", "**", "-", "0.5", "if", "attention_type", "==", "\"local_attention_2d\"", ":", "x", "=", "local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "query_shape", ",", "memory_flange", "=", "memory_flange", ")", "elif", "attention_type", "==", "\"masked_local_attention_2d\"", ":", "assert", "attention_type", "==", "\"masked_local_attention_2d\"", "x", "=", "masked_local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "query_shape", ",", "memory_flange", "=", "memory_flange", ")", "else", ":", "assert", "attention_type", "==", "\"unmasked_local_attention_2d_tpu\"", "x", "=", "dot_product_unmasked_attention_local_2d_tpu", "(", "q", ",", "k", ",", "v", ",", "None", ",", "max_relative_position", "=", "None", ",", "query_shape", "=", "query_shape", ")", "x", "=", "combine_heads_2d", "(", "x", ")", "x", "=", "common_layers", ".", "dense", "(", "x", ",", "output_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"output_transform\"", ")", "return", "x" ]
2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
[ "2d", "Multihead", "scaled", "-", "dot", "-", "product", "attention", "with", "inp", "/", "output", "transformations", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4302-L4365
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
ffn_self_attention_layer
def ffn_self_attention_layer(x, filter_depth, output_depth, num_parts, dropout_rate, share_kv=False, name=None): """Self-attention feedforward layer. We use self-attention to do feedforward computations. We apply this function positionwise where for each position, we linearly transform the output to have depth filter_depth, and break up the result depth-wise into num_parts contiguous parts. The parts self-attend, we concatenate the results depth-wise, and we linearly transform to a depth of output_depth. The goal is to get multiplicative interactions between components of a representation. Args: x: a Tensor with shape [batch, length, channels] filter_depth: an integer output_depth: an integer num_parts: an integer dividing filter depth dropout_rate: a floating point number share_kv: Share the key value transform name: an optional string Returns: A Tensor with shape [batch, length, output_depth]. """ with tf.variable_scope( name, default_name="feedforward_self_attention", values=[x]): x_shape = common_layers.shape_list(x) part_depth = filter_depth // num_parts if not share_kv: combined = common_layers.dense( x, filter_depth * 3, use_bias=False, name="qkv_transform") combined = tf.expand_dims(combined, axis=2) q, k, v = tf.split(combined, 3, axis=3) else: q = tf.expand_dims( common_layers.dense( x, filter_depth, use_bias=False, name="q_transform"), axis=2) kv_combined = tf.expand_dims( common_layers.dense( tf.concat([x, x], axis=1), filter_depth, use_bias=False, name="kv_transform"), axis=2) k, v = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1) batch_q = tf.reshape(q, [-1, 1, num_parts, part_depth]) batch_k = tf.reshape(k, [-1, 1, num_parts, part_depth]) batch_v = tf.reshape(v, [-1, 1, num_parts, part_depth]) batch_q *= part_depth**-0.5 # non-masked bias bias = None x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate) x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth]) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
python
def ffn_self_attention_layer(x, filter_depth, output_depth, num_parts, dropout_rate, share_kv=False, name=None): """Self-attention feedforward layer. We use self-attention to do feedforward computations. We apply this function positionwise where for each position, we linearly transform the output to have depth filter_depth, and break up the result depth-wise into num_parts contiguous parts. The parts self-attend, we concatenate the results depth-wise, and we linearly transform to a depth of output_depth. The goal is to get multiplicative interactions between components of a representation. Args: x: a Tensor with shape [batch, length, channels] filter_depth: an integer output_depth: an integer num_parts: an integer dividing filter depth dropout_rate: a floating point number share_kv: Share the key value transform name: an optional string Returns: A Tensor with shape [batch, length, output_depth]. """ with tf.variable_scope( name, default_name="feedforward_self_attention", values=[x]): x_shape = common_layers.shape_list(x) part_depth = filter_depth // num_parts if not share_kv: combined = common_layers.dense( x, filter_depth * 3, use_bias=False, name="qkv_transform") combined = tf.expand_dims(combined, axis=2) q, k, v = tf.split(combined, 3, axis=3) else: q = tf.expand_dims( common_layers.dense( x, filter_depth, use_bias=False, name="q_transform"), axis=2) kv_combined = tf.expand_dims( common_layers.dense( tf.concat([x, x], axis=1), filter_depth, use_bias=False, name="kv_transform"), axis=2) k, v = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1) batch_q = tf.reshape(q, [-1, 1, num_parts, part_depth]) batch_k = tf.reshape(k, [-1, 1, num_parts, part_depth]) batch_v = tf.reshape(v, [-1, 1, num_parts, part_depth]) batch_q *= part_depth**-0.5 # non-masked bias bias = None x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate) x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth]) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
[ "def", "ffn_self_attention_layer", "(", "x", ",", "filter_depth", ",", "output_depth", ",", "num_parts", ",", "dropout_rate", ",", "share_kv", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"feedforward_self_attention\"", ",", "values", "=", "[", "x", "]", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "part_depth", "=", "filter_depth", "//", "num_parts", "if", "not", "share_kv", ":", "combined", "=", "common_layers", ".", "dense", "(", "x", ",", "filter_depth", "*", "3", ",", "use_bias", "=", "False", ",", "name", "=", "\"qkv_transform\"", ")", "combined", "=", "tf", ".", "expand_dims", "(", "combined", ",", "axis", "=", "2", ")", "q", ",", "k", ",", "v", "=", "tf", ".", "split", "(", "combined", ",", "3", ",", "axis", "=", "3", ")", "else", ":", "q", "=", "tf", ".", "expand_dims", "(", "common_layers", ".", "dense", "(", "x", ",", "filter_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"q_transform\"", ")", ",", "axis", "=", "2", ")", "kv_combined", "=", "tf", ".", "expand_dims", "(", "common_layers", ".", "dense", "(", "tf", ".", "concat", "(", "[", "x", ",", "x", "]", ",", "axis", "=", "1", ")", ",", "filter_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"kv_transform\"", ")", ",", "axis", "=", "2", ")", "k", ",", "v", "=", "tf", ".", "split", "(", "kv_combined", ",", "[", "x_shape", "[", "1", "]", ",", "x_shape", "[", "1", "]", "]", ",", "axis", "=", "1", ")", "batch_q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "-", "1", ",", "1", ",", "num_parts", ",", "part_depth", "]", ")", "batch_k", "=", "tf", ".", "reshape", "(", "k", ",", "[", "-", "1", ",", "1", ",", "num_parts", ",", "part_depth", "]", ")", "batch_v", "=", "tf", ".", "reshape", "(", "v", ",", "[", "-", "1", ",", "1", ",", "num_parts", ",", "part_depth", "]", ")", "batch_q", "*=", "part_depth", "**", "-", "0.5", "# non-masked bias", "bias", "=", "None", "x", "=", "dot_product_attention", "(", "batch_q", ",", "batch_k", ",", "batch_v", ",", "bias", ",", "dropout_rate", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "x_shape", "[", "0", "]", ",", "x_shape", "[", "1", "]", ",", "filter_depth", "]", ")", "x", "=", "common_layers", ".", "dense", "(", "x", ",", "output_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"output_transform\"", ")", "return", "x" ]
Self-attention feedforward layer. We use self-attention to do feedforward computations. We apply this function positionwise where for each position, we linearly transform the output to have depth filter_depth, and break up the result depth-wise into num_parts contiguous parts. The parts self-attend, we concatenate the results depth-wise, and we linearly transform to a depth of output_depth. The goal is to get multiplicative interactions between components of a representation. Args: x: a Tensor with shape [batch, length, channels] filter_depth: an integer output_depth: an integer num_parts: an integer dividing filter depth dropout_rate: a floating point number share_kv: Share the key value transform name: an optional string Returns: A Tensor with shape [batch, length, output_depth].
[ "Self", "-", "attention", "feedforward", "layer", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4368-L4430
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
parameter_attention
def parameter_attention(x, total_key_depth, total_value_depth, output_depth, memory_rows, num_heads, dropout_rate, name=None): """Attention over parameters. We use the same multi-headed attention as in the other layers, but the memory keys and values are model parameters. There are no linear transformation on the keys or values. We are also a bit more careful about memory usage, since the number of memory positions may be very large. Args: x: a Tensor with shape [batch, length_q, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer memory_rows: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth]. """ with tf.variable_scope(name, default_name="parameter_attention", values=[x]): head_size_k = total_key_depth // num_heads head_size_v = total_value_depth // num_heads var_shape_k = [num_heads, memory_rows, head_size_k] var_shape_v = [num_heads, memory_rows, head_size_v] k = tf.get_variable( "k", var_shape_k, initializer=tf.random_normal_initializer( 0, output_depth**-0.5 * (num_heads**0.5))) v = tf.get_variable( "v", var_shape_v, initializer=tf.random_normal_initializer( 0, output_depth**-0.5 * (output_depth**0.5))) batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] q = common_layers.dense( x, total_key_depth, use_bias=False, name="q_transform") if dropout_rate: # This is a cheaper form of attention dropout where we use to use # the same dropout decisions across batch elements and query positions, # but different decisions across heads and memory positions. v = tf.nn.dropout( v, 1.0 - dropout_rate, noise_shape=[num_heads, memory_rows, 1]) # query is [batch, length, hidden_size] # reshape and transpose it to [heads, batch * length, head_size] q = tf.reshape(q, [batch_size, length, num_heads, head_size_k]) q = tf.transpose(q, [2, 0, 1, 3]) q = tf.reshape(q, [num_heads, batch_size * length, head_size_k]) weights = tf.matmul(q, k, transpose_b=True) weights = tf.nn.softmax(weights) y = tf.matmul(weights, v) y = tf.reshape(y, [num_heads, batch_size, length, head_size_v]) y = tf.transpose(y, [1, 2, 0, 3]) y = tf.reshape(y, [batch_size, length, total_value_depth]) y.set_shape([None, None, total_value_depth]) y = common_layers.dense( y, output_depth, use_bias=False, name="output_transform") return y
python
def parameter_attention(x, total_key_depth, total_value_depth, output_depth, memory_rows, num_heads, dropout_rate, name=None): """Attention over parameters. We use the same multi-headed attention as in the other layers, but the memory keys and values are model parameters. There are no linear transformation on the keys or values. We are also a bit more careful about memory usage, since the number of memory positions may be very large. Args: x: a Tensor with shape [batch, length_q, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer memory_rows: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth]. """ with tf.variable_scope(name, default_name="parameter_attention", values=[x]): head_size_k = total_key_depth // num_heads head_size_v = total_value_depth // num_heads var_shape_k = [num_heads, memory_rows, head_size_k] var_shape_v = [num_heads, memory_rows, head_size_v] k = tf.get_variable( "k", var_shape_k, initializer=tf.random_normal_initializer( 0, output_depth**-0.5 * (num_heads**0.5))) v = tf.get_variable( "v", var_shape_v, initializer=tf.random_normal_initializer( 0, output_depth**-0.5 * (output_depth**0.5))) batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] q = common_layers.dense( x, total_key_depth, use_bias=False, name="q_transform") if dropout_rate: # This is a cheaper form of attention dropout where we use to use # the same dropout decisions across batch elements and query positions, # but different decisions across heads and memory positions. v = tf.nn.dropout( v, 1.0 - dropout_rate, noise_shape=[num_heads, memory_rows, 1]) # query is [batch, length, hidden_size] # reshape and transpose it to [heads, batch * length, head_size] q = tf.reshape(q, [batch_size, length, num_heads, head_size_k]) q = tf.transpose(q, [2, 0, 1, 3]) q = tf.reshape(q, [num_heads, batch_size * length, head_size_k]) weights = tf.matmul(q, k, transpose_b=True) weights = tf.nn.softmax(weights) y = tf.matmul(weights, v) y = tf.reshape(y, [num_heads, batch_size, length, head_size_v]) y = tf.transpose(y, [1, 2, 0, 3]) y = tf.reshape(y, [batch_size, length, total_value_depth]) y.set_shape([None, None, total_value_depth]) y = common_layers.dense( y, output_depth, use_bias=False, name="output_transform") return y
[ "def", "parameter_attention", "(", "x", ",", "total_key_depth", ",", "total_value_depth", ",", "output_depth", ",", "memory_rows", ",", "num_heads", ",", "dropout_rate", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"parameter_attention\"", ",", "values", "=", "[", "x", "]", ")", ":", "head_size_k", "=", "total_key_depth", "//", "num_heads", "head_size_v", "=", "total_value_depth", "//", "num_heads", "var_shape_k", "=", "[", "num_heads", ",", "memory_rows", ",", "head_size_k", "]", "var_shape_v", "=", "[", "num_heads", ",", "memory_rows", ",", "head_size_v", "]", "k", "=", "tf", ".", "get_variable", "(", "\"k\"", ",", "var_shape_k", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "0", ",", "output_depth", "**", "-", "0.5", "*", "(", "num_heads", "**", "0.5", ")", ")", ")", "v", "=", "tf", ".", "get_variable", "(", "\"v\"", ",", "var_shape_v", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "0", ",", "output_depth", "**", "-", "0.5", "*", "(", "output_depth", "**", "0.5", ")", ")", ")", "batch_size", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "0", "]", "length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "1", "]", "q", "=", "common_layers", ".", "dense", "(", "x", ",", "total_key_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"q_transform\"", ")", "if", "dropout_rate", ":", "# This is a cheaper form of attention dropout where we use to use", "# the same dropout decisions across batch elements and query positions,", "# but different decisions across heads and memory positions.", "v", "=", "tf", ".", "nn", ".", "dropout", "(", "v", ",", "1.0", "-", "dropout_rate", ",", "noise_shape", "=", "[", "num_heads", ",", "memory_rows", ",", "1", "]", ")", "# query is [batch, length, hidden_size]", "# reshape and transpose it to [heads, batch * length, head_size]", "q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "batch_size", ",", "length", ",", "num_heads", ",", "head_size_k", "]", ")", "q", "=", "tf", ".", "transpose", "(", "q", ",", "[", "2", ",", "0", ",", "1", ",", "3", "]", ")", "q", "=", "tf", ".", "reshape", "(", "q", ",", "[", "num_heads", ",", "batch_size", "*", "length", ",", "head_size_k", "]", ")", "weights", "=", "tf", ".", "matmul", "(", "q", ",", "k", ",", "transpose_b", "=", "True", ")", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "weights", ")", "y", "=", "tf", ".", "matmul", "(", "weights", ",", "v", ")", "y", "=", "tf", ".", "reshape", "(", "y", ",", "[", "num_heads", ",", "batch_size", ",", "length", ",", "head_size_v", "]", ")", "y", "=", "tf", ".", "transpose", "(", "y", ",", "[", "1", ",", "2", ",", "0", ",", "3", "]", ")", "y", "=", "tf", ".", "reshape", "(", "y", ",", "[", "batch_size", ",", "length", ",", "total_value_depth", "]", ")", "y", ".", "set_shape", "(", "[", "None", ",", "None", ",", "total_value_depth", "]", ")", "y", "=", "common_layers", ".", "dense", "(", "y", ",", "output_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"output_transform\"", ")", "return", "y" ]
Attention over parameters. We use the same multi-headed attention as in the other layers, but the memory keys and values are model parameters. There are no linear transformation on the keys or values. We are also a bit more careful about memory usage, since the number of memory positions may be very large. Args: x: a Tensor with shape [batch, length_q, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer memory_rows: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth].
[ "Attention", "over", "parameters", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4433-L4502
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
coordinate_tensor
def coordinate_tensor(shape, axis): """Return a tensor with given shape containing coordinate along given axis. Args: shape: a Tensor representing the shape of the output Tensor axis: an integer Returns: A tensor with shape shape and type tf.int32, where each elements its coordinate along the given axis. """ if axis < 0: axis = tf.size(shape) + axis # Convert to positive for the one_hot indice r = tf.range(shape[axis]) r_shape = tf.one_hot( axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32) return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape)
python
def coordinate_tensor(shape, axis): """Return a tensor with given shape containing coordinate along given axis. Args: shape: a Tensor representing the shape of the output Tensor axis: an integer Returns: A tensor with shape shape and type tf.int32, where each elements its coordinate along the given axis. """ if axis < 0: axis = tf.size(shape) + axis # Convert to positive for the one_hot indice r = tf.range(shape[axis]) r_shape = tf.one_hot( axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32) return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape)
[ "def", "coordinate_tensor", "(", "shape", ",", "axis", ")", ":", "if", "axis", "<", "0", ":", "axis", "=", "tf", ".", "size", "(", "shape", ")", "+", "axis", "# Convert to positive for the one_hot indice", "r", "=", "tf", ".", "range", "(", "shape", "[", "axis", "]", ")", "r_shape", "=", "tf", ".", "one_hot", "(", "axis", ",", "tf", ".", "size", "(", "shape", ")", ",", "on_value", "=", "-", "1", ",", "off_value", "=", "1", ",", "dtype", "=", "tf", ".", "int32", ")", "return", "tf", ".", "zeros", "(", "shape", ",", "dtype", "=", "tf", ".", "int32", ")", "+", "tf", ".", "reshape", "(", "r", ",", "r_shape", ")" ]
Return a tensor with given shape containing coordinate along given axis. Args: shape: a Tensor representing the shape of the output Tensor axis: an integer Returns: A tensor with shape shape and type tf.int32, where each elements its coordinate along the given axis.
[ "Return", "a", "tensor", "with", "given", "shape", "containing", "coordinate", "along", "given", "axis", "." ]
272500b6efe353aeb638d2745ed56e519462ca31
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4506-L4523
train