Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def rlmb_long_stochastic_discrete_simulation_deterministic_starts(): hparams = rlmb_base_stochastic_discrete() hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long" hparams.ppo_epochs_num = 1000 hparams.simulation_random_starts = False re...
[ "Long setting with stochastic discrete model & deterministic sim starts." ]
Please provide a description of the function:def rlmb_long_stochastic_discrete_100steps(): hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 100 hparams.simulated_rollout_length = 100 hparams.simulated_batch_size = 8 return hparams
[ "Long setting with stochastic discrete model, changed ppo steps." ]
Please provide a description of the function:def rlmb_long_stochastic_discrete_25steps(): hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 25 hparams.simulated_rollout_length = 25 hparams.simulated_batch_size = 32 return hparams
[ "Long setting with stochastic discrete model, changed ppo steps." ]
Please provide a description of the function:def rlmb_base_stochastic_discrete_noresize(): hparams = rlmb_base() hparams.generative_model = "next_frame_basic_stochastic_discrete" hparams.generative_model_params = "next_frame_basic_stochastic_discrete" hparams.resize_height_factor = 1 hparams.resize_width_f...
[ "Base setting with stochastic discrete model." ]
Please provide a description of the function:def rlmb_base_sv2p(): hparams = rlmb_base() hparams.learning_rate_bump = 1.0 hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_atari" return hparams
[ "Base setting with sv2p as world model." ]
Please provide a description of the function:def _rlmb_tiny_overrides(): return dict( epochs=1, num_real_env_frames=128, model_train_steps=2, max_num_noops=1, eval_max_num_noops=1, generative_model_params="next_frame_tiny", stop_loop_early=True, resize_height_factor=...
[ "Parameters to override for tiny setting excluding agent-related hparams." ]
Please provide a description of the function:def rlmb_ppo_tiny(): hparams = rlmb_ppo_base() hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) update_hparams(hparams, dict( ppo_epochs_num=2, ppo_epoch_length=10, real_ppo_epoch_length=36, real_ppo_effective_num_agents=2, ...
[ "Tiny set for testing." ]
Please provide a description of the function:def rlmb_dqn_tiny(): hparams = rlmb_dqn_base() hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) update_hparams(hparams, dict( simulated_rollout_length=2, dqn_time_limit=2, dqn_num_frames=128, real_dqn_replay_buffer_replay_capacity...
[ "Tiny set for testing." ]
Please provide a description of the function:def rlmb_tiny_stochastic(): hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_basic_stochastic" return hparams
[ "Tiny setting with a stochastic next-frame model." ]
Please provide a description of the function:def rlmb_tiny_recurrent(): hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_recurrent" hparams.generative_model_params = "next_frame_basic_recurrent" return hparams
[ "Tiny setting with a recurrent next-frame model." ]
Please provide a description of the function:def rlmb_tiny_sv2p(): hparams = rlmb_ppo_tiny() hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_tiny" hparams.grayscale = False return hparams
[ "Tiny setting with a tiny sv2p model." ]
Please provide a description of the function:def rlmb_grid(rhp): rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) base = 100000 medium = base // 2 small = medium // 2 rhp.set_discrete("loop.num_real_env_frames", [base, medium, small]) # Dummy parameter to get 5 runs for each configurati...
[ "Grid over games and frames, and 5 runs each for variance." ]
Please provide a description of the function:def merge_unscoped_hparams(scopes_and_hparams): merged_values = {} for (scope, hparams) in scopes_and_hparams: for key, value in six.iteritems(hparams.values()): scoped_key = "%s.%s" % (scope, key) merged_values[scoped_key] = value return hparam.HPa...
[ "Merge multiple HParams into one with scopes." ]
Please provide a description of the function:def split_scoped_hparams(scopes, merged_hparams): split_values = {scope: {} for scope in scopes} merged_values = merged_hparams.values() for scoped_key, value in six.iteritems(merged_values): scope = scoped_key.split(".")[0] key = scoped_key[len(scope) + 1:]...
[ "Split single HParams with scoped keys into multiple." ]
Please provide a description of the function:def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id): trial_hp_overrides = scoped_overrides.values() # Create loop, model, and ppo base HParams loop_hp = create_loop_hparams() model_hp_name = trial_hp_overrides.get( "loop.generative_m...
[ "Create HParams suitable for training loop from scoped HParams.\n\n Args:\n scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These\n parameters are overrides for the base HParams created by\n create_loop_hparams.\n trial_id: str, trial identifier. This is used to register unique...
Please provide a description of the function:def get_keys_to_action(self): # Based on gym AtariEnv.get_keys_to_action() keyword_to_key = { "UP": ord("w"), "DOWN": ord("s"), "LEFT": ord("a"), "RIGHT": ord("d"), "FIRE": ord(" "), } keys_to_action = {} for...
[ "Get mapping from keyboard keys to actions.\n\n Required by gym.utils.play in environment or top level wrapper.\n\n Returns:\n {\n Unicode code point for keyboard key: action (formatted for step()),\n ...\n }\n " ]
Please provide a description of the function:def step(self, action): # Special codes if action in self._player_actions(): envs_step_tuples = self._player_actions()[action]() elif self._wait and action == self.name_to_action_num["NOOP"]: # Ignore no-op, do not pass to environment. envs...
[ "Pass action to underlying environment(s) or perform special action." ]
Please provide a description of the function:def _augment_observation(self, ob, reward, cumulative_reward): img = PIL_Image().new("RGB", (ob.shape[1], self.HEADER_HEIGHT,)) draw = PIL_ImageDraw().Draw(img) draw.text( (1, 0), "c:{:3}, r:{:3}".format(int(cumulative_rewar...
[ "\"Expand observation array with additional information header (top rows).\n\n Args:\n ob: observation\n reward: reward to be included in header.\n cumulative_reward: total cumulated reward to be included in header.\n\n Returns:\n Expanded observation array.\n " ]
Please provide a description of the function:def _player_step_tuple(self, envs_step_tuples): ob_real, reward_real, _, _ = envs_step_tuples["real_env"] ob_sim, reward_sim, _, _ = envs_step_tuples["sim_env"] ob_err = absolute_hinge_difference(ob_sim, ob_real) ob_real_aug = self._augment_observation(...
[ "Construct observation, return usual step tuple.\n\n Args:\n envs_step_tuples: tuples.\n\n Returns:\n Step tuple: ob, reward, done, info\n ob: concatenated images [simulated observation, real observation,\n difference], with additional informations in header.\n reward: real en...
Please provide a description of the function:def reset(self): self._frame_counter = 0 ob_real = self.real_env.reset() # Initialize simulated environment with frames from real one. self.sim_env.add_to_initial_stack(ob_real) for _ in range(3): ob_real, _, _, _ = self.real_env.step(self.name...
[ "Reset simulated and real environments." ]
Please provide a description of the function:def _step_envs(self, action): self._frame_counter += 1 real_env_step_tuple = self.real_env.step(action) sim_env_step_tuple = self.sim_env.step(action) self.sim_env.add_to_initial_stack(real_env_step_tuple[0]) return self._pack_step_tuples(real_env_st...
[ "Perform step(action) on environments and update initial_frame_stack." ]
Please provide a description of the function:def _player_step_tuple(self, envs_step_tuples): ob, reward, done, info = envs_step_tuples["env"] ob = self._augment_observation(ob, reward, self.cumulative_reward) return ob, reward, done, info
[ "Augment observation, return usual step tuple." ]
Please provide a description of the function:def add_delta_deltas(filterbanks, name=None): delta_filter = np.array([2, 1, 0, -1, -2]) delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full") delta_filter_stack = np.array( [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0]...
[ "Compute time first and second-order derivative channels.\n\n Args:\n filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]\n name: scope name\n\n Returns:\n float32 tensor with shape [batch_size, len, num_bins, 3]\n " ]
Please provide a description of the function:def compute_mel_filterbank_features( waveforms, sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97, frame_length=25, frame_step=10, fft_length=None, window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True), lower_edg...
[ "Implement mel-filterbank extraction using tf ops.\n\n Args:\n waveforms: float32 tensor with shape [batch_size, max_len]\n sample_rate: sampling rate of the waveform\n dither: stddev of Gaussian noise added to waveform to prevent quantization\n artefacts\n preemphasis: waveform high-pass filterin...
Please provide a description of the function:def play_env_problem_randomly(env_problem, num_steps): # Reset all environments. env_problem.reset() # Play all environments, sampling random actions each time. for _ in range(num_steps): # Sample batch_size actions from the acti...
[ "Plays the env problem by randomly sampling actions for `num_steps`." ]
Please provide a description of the function:def generate_plaintext_random(plain_vocab, distribution, train_samples, length): if distribution is not None: assert len(distribution) == len(plain_vocab) train_indices = np.random.choice( range(len(plain_vocab)), (train_sample...
[ "Generates samples of text from the provided vocabulary.\n\n Args:\n plain_vocab: vocabulary.\n distribution: distribution.\n train_samples: samples for training.\n length: length.\n\n Returns:\n train_indices (np.array of Integers): random integers for training.\n shape = [num_samples, length...
Please provide a description of the function:def encipher_shift(plaintext, plain_vocab, shift): ciphertext = [] cipher = ShiftEncryptionLayer(plain_vocab, shift) for _, sentence in enumerate(plaintext): cipher_sentence = [] for _, character in enumerate(sentence): encrypted_char = cipher.encrypt...
[ "Encrypt plain text with a single shift layer.\n\n Args:\n plaintext (list of list of Strings): a list of plain text to encrypt.\n plain_vocab (list of Integer): unique vocabularies being used.\n shift (Integer): number of shift, shift to the right if shift is positive.\n Returns:\n ciphertext (list o...
Please provide a description of the function:def encipher_vigenere(plaintext, plain_vocab, key): ciphertext = [] # generate Vigenere table layers = [ ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab)) ] for i, sentence in enumerate(plaintext): cipher_sentence = [] for j, c...
[ "Encrypt plain text with given key.\n\n Args:\n plaintext (list of list of Strings): a list of plain text to encrypt.\n plain_vocab (list of Integer): unique vocabularies being used.\n key (list of Integer): key to encrypt cipher using Vigenere table.\n\n Returns:\n ciphertext (list of Strings): encry...
Please provide a description of the function:def _super_stack(inputs, attention_bias, hparams, mp, padding="LEFT"): layers = hparams.layers.strip(",").split(",") moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")] if hpara...
[ "A stack of super_lm layers.\n\n Args:\n inputs: a list of Tensors\n attention_bias: list of bias Tensor for self-attention\n (see common_attention.attention_bias())\n hparams: hyperparameters for model\n mp: a Parallelism object\n padding: a string\n\n Returns:\n y: a list of Tensors\n ...
Please provide a description of the function:def super_lm_base(): hparams = common_hparams.basic_params1() hparams.hidden_size = 512 hparams.moe_hidden_sizes = "512" hparams.batch_size = 16384 hparams.max_length = 0 # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in tr...
[ "Set of hyperparameters." ]
Please provide a description of the function:def super_lm_moe(): hparams = super_lm_base() hparams.layers = ( ("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d") hparams.moe_num_experts = 32 hparams.moe_hidden_sizes = "1024" return hparams
[ "Add mixture of experts with ~1B params." ]
Please provide a description of the function:def xmoe_tr_dense_2k(): hparams = mtf_transformer2.mtf_bitransformer_base() hparams.encoder_layers = ["self_att", "drd"] * 4 hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4 hparams.batch_size = 64 hparams.shared_embedding_and_softmax_weights = True ...
[ "Series of architectural experiments on Translation.\n\n # run on 8-core setup\n\n 119M params, einsum=0.95e13\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_tr_1d(): hparams = xmoe_tr_dense_2k() hparams.encoder_layers = ["self_att", "moe_1d"] * 4 hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4 hparams.layout = "batch:batch;experts:batch" hparams.moe_hidden_size = 2048 hparams.moe_num_expert...
[ "Mixture of experts (16 experts).\n\n\n 623M Params, einsum=1.09e13\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_tr_2d(): hparams = xmoe_tr_dense_2k() hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.encoder_layers = ["self_att", "moe_2d"] * 4 hparams.decoder_layers = [...
[ "Mixture of experts (16 experts).\n\n 623M Params, einsum=1.09e13\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_dense_4k(): hparams = mtf_transformer.mtf_transformer_base_lm() hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.layer_prepostprocess_dropout = 0.0 # The following hparams are constant across all these experiments. hparams.batch_siz...
[ "Series of architectural experiments on cheap language models.\n\n For all of these architectures, we run on languagemodel_lm1b8k_packed\n for 32000 steps.\n\n All log-perplexities are per-token - multiply by 1.298 for per-word\n\n Results:\n model params(M) einsum alltoall mxu-util log-ppl\n ...
Please provide a description of the function:def xmoe_top_2(): hparams = xmoe_dense_4k() moe.set_default_moe_hparams(hparams) hparams.mesh_shape = "all:8" hparams.layout = "batch:all;experts:all" return hparams
[ "Mixture of experts (16 experts)." ]
Please provide a description of the function:def xmoe_2d(): hparams = xmoe_top_2() hparams.decoder_layers = ["att", "hmoe"] * 4 hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.moe_num_experts = [4, 4] return ...
[ "Two-dimensional hierarchical mixture of 16 experts." ]
Please provide a description of the function:def xmoe2_dense(sz): hparams = mtf_transformer.mtf_transformer_paper_lm(sz) hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.max_length = 1024 hparams.batch_size = 128 hparams.learning_rate_schedul...
[ "Series of architectural experiments on language modeling.\n\n Larger models than the ones above.\n\n All models are trained on sequences of 1024 tokens.\n\n We assume infinite training data, so no dropout necessary.\n We process 2^36 tokens in training = 524288 steps at batch size 128\n\n TODO(noam): find a l...
Please provide a description of the function:def xmoe2_v1(): hparams = xmoe2_dense(0) moe.set_default_moe_hparams(hparams) hparams.decoder_layers = ( ["local_att", "local_att", "drd", "att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1] hparams.d_ff = 2048 hparams.d_kv = 128 hparams.moe...
[ "Model incorporating mixture-of-experts and local-attention.\n\n ~6B parameters\n\n 32 experts in 3 hierarchichal moe layers.\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe2_v1_x128(): hparams = xmoe2_v1() hparams.moe_num_experts = [16, 8] hparams.outer_batch_size = 8 hparams.mesh_shape = "b0:8;b1:16" hparams.batch_size = 512 hparams.learning_rate_decay_steps = 16384 return hparams
[ "128 experts, ~25B params - Train for 131072 steps on 8x8." ]
Please provide a description of the function:def xmoe2_tiny(): hparams = xmoe2_v1() hparams.decoder_layers = [ "local_att", "att", "compressed_att", "drd", "hmoe"] hparams.d_model = 128 hparams.moe_hidden_size = 512 hparams.outer_batch_size = 0 hparams.batch_size = 2 hparams.mesh_shape = "" hpa...
[ "Test on local cpu." ]
Please provide a description of the function:def xmoe2_v1_l4k(): hparams = xmoe2_v1() hparams.batch_size = 32 hparams.max_length = 4096 hparams.split_to_length = 4096 hparams.reshape_logits_hack = True return hparams
[ "With sequence length 4096." ]
Please provide a description of the function:def xmoe2_v1_l4k_local_only(): hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "local_att" if l == "att" else l for l in hparams.decoder_layers] return hparams
[ "With sequence length 4096." ]
Please provide a description of the function:def xmoe2_v1_l4k_global_only(): hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] return hparams
[ "With sequence length 4096." ]
Please provide a description of the function:def xmoe2_v1_l4k_compressed_c4(): hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "compressed_att" if l == "att" else l for l in hparams.decoder_layers] hparams.compression_factor = 4 return hparams
[ "With compressed attention." ]
Please provide a description of the function:def wiki_2x2_base(): hparams = mtf_transformer.mtf_transformer_base_lm() hparams.shared_embedding_and_softmax_weights = False # no dropout - dataset is big enough to avoid overfitting. hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.layer_pr...
[ "Set of architectural experiments - language model on wikipedia on a 2x2.\n\n 1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def denoise_z15(): hparams = xmoe2_dense_0() hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15} hparams.noising_use_eval_during_train = 0.25 return hparams
[ "Replace tokens instead of masking." ]
Please provide a description of the function:def denoise_v1_m15(): hparams = xmoe2_v1() # no local attention # TODO(noam): non-masked version of local-attention hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] hparams.decoder_type = "denoising" hparams.no...
[ "Denoising experiment." ]
Please provide a description of the function:def _download_mlu_data(tmp_dir, data_dir): if not tf.gfile.Exists(data_dir): tf.gfile.MakeDirs(data_dir) filename = os.path.basename(_URL) file_path = os.path.join(tmp_dir, filename) headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) " ...
[ "Downloads and extracts the dataset.\n\n Args:\n tmp_dir: temp directory to download and extract the dataset\n data_dir: The base directory where data and vocab files are stored.\n\n Returns:\n tmp_dir: temp directory containing the raw data.\n " ]
Please provide a description of the function:def _get_ngram_counter(ids, n): # Remove zero IDs used to pad the sequence. ids = [token_id for token_id in ids if token_id != 0] ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)] ngrams = set(ngram_list) counts = collections.Counter() for ng...
[ "Get a Counter with the ngrams of the given ID list.\n\n Args:\n ids: np.array or a list corresponding to a single sentence\n n: n-gram size\n\n Returns:\n collections.Counter with ID tuples as keys and 1s as values.\n " ]
Please provide a description of the function:def _get_fbeta_score(true_positives, selected, relevant, beta=1): precision = 1 if selected > 0: precision = true_positives / selected if beta == 0: return precision recall = 1 if relevant > 0: recall = true_positives / relevant if precision > 0 an...
[ "Compute Fbeta score.\n\n Args:\n true_positives: Number of true positive ngrams.\n selected: Number of selected ngrams.\n relevant: Number of relevant ngrams.\n beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.\n\n Returns:\n Fbeta score.\n " ]
Please provide a description of the function:def get_addition_score(source_counts, prediction_counts, target_counts): added_to_prediction_counts = prediction_counts - source_counts true_positives = sum((added_to_prediction_counts & target_counts).values()) selected = sum(added_to_prediction_counts.values()) ...
[ "Compute the addition score (Equation 4 in the paper)." ]
Please provide a description of the function:def get_keep_score(source_counts, prediction_counts, target_counts): source_and_prediction_counts = source_counts & prediction_counts source_and_target_counts = source_counts & target_counts true_positives = sum((source_and_prediction_counts & ...
[ "Compute the keep score (Equation 5 in the paper)." ]
Please provide a description of the function:def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0): source_not_prediction_counts = source_counts - prediction_counts source_not_target_counts = source_counts - target_counts true_positives = sum((source_not_prediction_counts & ...
[ "Compute the deletion score (Equation 6 in the paper)." ]
Please provide a description of the function:def get_sari_score(source_ids, prediction_ids, list_of_targets, max_gram_size=4, beta_for_deletion=0): addition_scores = [] keep_scores = [] deletion_scores = [] for n in range(1, max_gram_size + 1): source_counts = _get_ngram_counter(source...
[ "Compute the SARI score for a single prediction and one or more targets.\n\n Args:\n source_ids: a list / np.array of SentencePiece IDs\n prediction_ids: a list / np.array of SentencePiece IDs\n list_of_targets: a list of target ID lists / np.arrays\n max_gram_size: int. largest n-gram size we care abo...
Please provide a description of the function:def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4): def get_sari_numpy(source_ids, prediction_ids, target_ids): sari_scores = [] keep_scores = [] add_scores = [] deletion_scores = [] # Iterate over elements in the batch. ...
[ "Computes the SARI scores from the given source, prediction and targets.\n\n Args:\n source_ids: A 2D tf.Tensor of size (batch_size , sequence_length)\n prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length)\n target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets,\n sequen...
Please provide a description of the function:def sari_score(predictions, labels, features, **unused_kwargs): if "inputs" not in features: raise ValueError("sari_score requires inputs feature") # Convert the inputs and outputs to a [batch_size, sequence_length] tensor. inputs = tf.squeeze(features["inputs"...
[ "Computes the SARI scores from the given source, prediction and targets.\n\n An approximate SARI scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4.\n Also, this does not have beam search.\n\n Args:\n predictions: tensor, model pre...
Please provide a description of the function:def _get_mnist(directory): for filename in [ _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME ]: generator_utils.maybe_download(directory, filename, _MNIST_URL + filename)
[ "Download all MNIST files to directory unless they are there." ]
Please provide a description of the function:def _extract_mnist_images(filename, num_images): with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_image...
[ "Extract images from an MNIST file into a numpy array.\n\n Args:\n filename: The path to an MNIST images file.\n num_images: The number of images in the file.\n\n Returns:\n A numpy array of shape [number_of_images, height, width, channels].\n " ]
Please provide a description of the function:def _extract_mnist_labels(filename, num_labels): with gzip.open(filename) as bytestream: bytestream.read(8) buf = bytestream.read(num_labels) labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) return labels
[ "Extract labels from an MNIST file into integers.\n\n Args:\n filename: The path to an MNIST labels file.\n num_labels: The number of labels in the file.\n\n Returns:\n A int64 numpy array of shape [num_labels]\n " ]
Please provide a description of the function:def mnist_common_generator(tmp_dir, training, how_many, data_filename, label_filename, start_from=0): data_path = os.path.join(tmp_dir,...
[ "Image generator for MNIST.\n\n Args:\n tmp_dir: path to temporary storage directory.\n training: a Boolean; if true, we use the train set, otherwise the test set.\n how_many: how many images and labels to generate.\n data_filename: file that contains features data.\n label_filename: file that conta...
Please provide a description of the function:def mnist_generator(tmp_dir, training, how_many, start_from=0): _get_mnist(tmp_dir) d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME return mnist_common_generato...
[ "Image generator for MNIST.\n\n Args:\n tmp_dir: path to temporary storage directory.\n training: a Boolean; if true, we use the train set, otherwise the test set.\n how_many: how many images and labels to generate.\n start_from: from which image to start.\n\n Returns:\n An instance of image_genera...
Please provide a description of the function:def _get_fashion_mnist(directory): # Fashion mnist files have the same names as MNIST. # We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir. for filename in [ _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, _MNIST_TEST_...
[ "Download all FashionMNIST files to directory unless they are there." ]
Please provide a description of the function:def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0): _get_fashion_mnist(tmp_dir) d = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME) l = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( _...
[ "Image generator for FashionMNIST.\n\n Args:\n tmp_dir: path to temporary storage directory.\n training: a Boolean; if true, we use the train set, otherwise the test set.\n how_many: how many images and labels to generate.\n start_from: from which image to start.\n\n Returns:\n An instance of image...
Please provide a description of the function:def generate_data(timeseries_length, timeseries_params): x = range(timeseries_length) multi_timeseries = [] for p in timeseries_params: # Trend y1 = [p["m"] * i + p["b"] for i in x] # Period y2 = [p["A"] * p["fn"](i / p["freqcoeff"]) for i in x] ...
[ "Generates synthetic timeseries using input parameters.\n\n Each generated timeseries has timeseries_length data points.\n Parameters for each timeseries are specified by timeseries_params.\n\n Args:\n timeseries_length: Number of data points to generate for each timeseries.\n timeseries_params: Parameters...
Please provide a description of the function:def next_frame_basic_stochastic(): hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_st...
[ "Basic 2-frame conv model with stochastic tower." ]
Please provide a description of the function:def next_frame_sampling_stochastic(): hparams = basic_deterministic_params.next_frame_sampling() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 15...
[ "Basic 2-frame conv model with stochastic tower." ]
Please provide a description of the function:def next_frame_basic_stochastic_discrete(): hparams = basic_deterministic_params.next_frame_sampling() hparams.batch_size = 4 hparams.video_num_target_frames = 6 hparams.scheduled_sampling_mode = "prob_inverse_lin" hparams.scheduled_sampling_decay_steps = 40000 ...
[ "Basic 2-frame conv model with stochastic discrete latent." ]
Please provide a description of the function:def next_frame_stochastic_discrete_range(rhp): rhp.set_float("learning_rate_constant", 0.001, 0.01) rhp.set_float("dropout", 0.2, 0.6) rhp.set_int("filter_double_steps", 3, 5) rhp.set_discrete("hidden_size", [64, 96, 128]) rhp.set_discrete("bottleneck_bits", [32...
[ "Next frame stochastic discrete tuning grid." ]
Please provide a description of the function:def nested_map(x, f): if isinstance(x, list): return [nested_map(y, f) for y in x] if isinstance(x, tuple): return tuple([nested_map(y, f) for y in x]) if isinstance(x, dict): return {k: nested_map(x[k], f) for k in x} return f(x)
[ "Map the function f to the nested structure x (dicts, tuples, lists)." ]
Please provide a description of the function:def shapes(x): def shape(x): try: return x.shape except Exception: # pylint: disable=broad-except return [] return nested_map(x, shape)
[ "Get a structure of shapes for a structure of nested arrays." ]
Please provide a description of the function:def sizes(x): def size(x): try: return x.size except Exception: # pylint: disable=broad-except return 0 return nested_map(x, size)
[ "Get a structure of sizes for a structure of nested arrays." ]
Please provide a description of the function:def _find_frame(stack, start=0): # We want to find the first place where the layer was called # that is *not* an __init__ function of an inheriting layer. frame = inspect.getframeinfo(stack[start][0]) # If we are in an init, move on. if frame.function == '__init...
[ "Find the frame with the caller on the stack." ]
Please provide a description of the function:def _shorten_file_path(line): start = line.lower().find('file') if start < 0: return line first_quote = line.find('"', start) if first_quote < 0: return line second_quote = line.find('"', first_quote + 1) if second_quote < 0: return line path = l...
[ "Shorten file path in error lines for more readable tracebacks." ]
Please provide a description of the function:def _short_traceback(skip=3): counter, res = 0, [] # Skipping 3 lines by default: the top (useless) and self-call. lines = traceback.format_exc().splitlines()[skip:] for l in lines: res.append(_shorten_file_path(l)) if counter % 2 == 1: res.append(''...
[ "Cleaned-up form of traceback." ]
Please provide a description of the function:def layer(output_shape=None, new_parameters=None): def layer_decorator(call): def output_shape_fun(self, input_shape): if output_shape is None: return input_shape kwargs = self._init_kwargs # pylint: disable=protected-access return ou...
[ "Create a layer class from a function.", "Decorating the call function.", "The call function of the created class, derived from call." ]
Please provide a description of the function:def initialize(self, input_shape, rng): try: # Re-using this layer, no new parameters. if not self._first_init: return () # First call of this layer, create parameters. self._first_init = False self._params = self.new_parameter...
[ "Initialize the layer given an input shape and rng.\n\n Returns new_parameters(input_shape, rng) on the first call and () on any\n subsequent call, as the layer is already initialized. This is used for\n networks that share parameters, so the layer only produces them once.\n\n Note that all arguments an...
Please provide a description of the function:def _references_content(ref_files): example_spec = { "url": tf.FixedLenFeature([], tf.string), "content": tf.FixedLenFeature([], tf.string), } data = {} for ex in generator_utils.tfrecord_iterator( ref_files, gzipped=True, example_spec=example_sp...
[ "Returns dict<str ref_url, str ref_content>." ]
Please provide a description of the function:def _wiki_urls_for_shard(shard_id, urls_dir=None): urls_dir = urls_dir or WIKI_URLS_DIR urls_filepath = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id) with tf.gfile.GFile(urls_filepath) as f: return json.loads(f.read())
[ "Urls for chunk: dict<str wiki_url, list<str> ref_urls>." ]
Please provide a description of the function:def _wiki_articles(shard_id, wikis_dir=None): if not wikis_dir: wikis_dir = WIKI_CONTENT_DIR with tf.Graph().as_default(): dataset = tf.data.TFRecordDataset( cc_utils.readahead( os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)), ...
[ "Generates WikipediaArticles from GCS that are part of shard shard_id.", "Parse serialized Example containing Wikipedia article content." ]
Please provide a description of the function:def rank_reference_paragraphs(wiki_title, references_content, normalize=True): normalized_title = _normalize_text(wiki_title) title_tokens = _tokens_to_score( set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title)))) ref_paragraph_info = [] do...
[ "Rank and return reference paragraphs by tf-idf score on title tokens." ]
Please provide a description of the function:def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path, out_filepaths): # * Join the Wikipedia articles with their references # * Run Tf-idf to sort reference paragraphs # * Encode the Wikipedia and reference text with the voc...
[ "Produce examples from shard_ids to out_filepaths.", "Generate Example dicts." ]
Please provide a description of the function:def _encode_wiki_sections(sections, vocab): ids = [] section_boundaries = [] for i, section in enumerate(sections): if i > 0: # Skip including article title ids.extend(vocab.encode(_format_title(_normalize_text(section.title)))) ids.extend(vocab....
[ "Encodes sections with vocab. Returns ids and section boundaries." ]
Please provide a description of the function:def extract_references_from_wets(wet_files, metadata_dir, out_dir, tmp_dir=None): # Setup output files shard_files = make_ref_shard_files(out_dir) num_refs = 0 for i, wet_file in enumerate(wet_files): num_refs_in_wet = 0 t...
[ "Extract references from WET files into sharded output files." ]
Please provide a description of the function:def _dump_to_pages(dump): pos = 0 ret = [] start_tag = u"<page>\n" end_tag = u"</page>\n" while True: start_pos = dump.find(start_tag, pos) if start_pos == -1: break start_pos += len(start_tag) end_pos = dump.find(end_tag, start_pos) if...
[ "Extract pages from an xml dump.\n\n Args:\n dump: a unicode string\n Returns:\n a list of unicode strings\n " ]
Please provide a description of the function:def _page_to_title(page): # print("page=%s" % page) start_tag = u"<title>" end_tag = u"</title>" start_pos = page.find(start_tag) end_pos = page.find(end_tag) assert start_pos != -1 assert end_pos != -1 start_pos += len(start_tag) return page[start_pos:e...
[ "Extract the title from a page.\n\n Args:\n page: a unicode string\n Returns:\n a unicode string\n " ]
Please provide a description of the function:def _page_to_text(page): # text start tag looks like "<text ..otherstuff>" start_pos = page.find(u"<text") assert start_pos != -1 end_tag_pos = page.find(u">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(u">") end_pos = page.find(u"</text>") if ...
[ "Extract the text from a page.\n\n Args:\n page: a unicode string\n Returns:\n a unicode string\n " ]
Please provide a description of the function:def _find_and_replace(text, start_string, end_string, replace_fn): ret = u"" current_pos = 0 while True: start_pos = text.find(start_string, current_pos) if start_pos == -1: ret += text[current_pos:] break ret += text[current_pos:start_pos] ...
[ "Remove everything found between instances of start_string and end_string.\n\n Replace each such instance with replace_fn(removed_text)\n\n e.g. _find_and_replace(u\"the [[fat]] cat [[sat]]\", u\"[[\", u\"]]\", lambda x: x)\n = u\"the fat cat sat\"\n\n Args:\n text: a unicode string\n start_string: a un...
Please provide a description of the function:def _remove_double_brackets(text): def replacement_fn(s): if u":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s.find(u"|") if bar_pos == -1: return s return s[bar_po...
[ "Remove double brackets (internal links) but leave the viewable text.\n\n Args:\n text: a unicode string\n Returns:\n a unicode string\n " ]
Please provide a description of the function:def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): x = image_feat image_hidden_size = hparams.image_hidden_size or hparams.hidden_size ...
[ "A stack of self attention layers." ]
Please provide a description of the function:def prepare_question_encoder(inputs, hparams): encoder_input = inputs # Usual case - not a packed dataset. encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding...
[ "Prepare question encoder.\n\n Args:\n inputs: a Tensor.\n hparams: run hyperparameters\n\n Returns:\n encoder_input: a Tensor, bottom of encoder stack\n encoder_self_attention_bias: a bias tensor for use in encoder self-attention\n " ]
Please provide a description of the function:def question_encoder(question, question_self_attention_bias, hparams, name="question_encoder", save_weights_to=None, make_image_summary=True): x = question with tf...
[ "A stack of self attention layers." ]
Please provide a description of the function:def attn(image_feat, query, hparams, name="attn", save_weights_to=None, make_image_summary=True): with tf.variable_scope(name, "attn", values=[image_feat, query]): total_key_depth = hparams.attention_key_channels or hpara...
[ "Attention on image feature with question as query." ]
Please provide a description of the function:def mlp(feature, hparams, name="mlp"): with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_size = hparams.mlp_size for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_size, activatio...
[ "Multi layer perceptron with dropout and relu activation." ]
Please provide a description of the function:def prepare_image_question_encoder(image_feat, question, hparams): encoder_input = tf.concat([image_feat, question], axis=1) encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( ...
[ "Prepare encoder.\n\n Args:\n image_feat: a Tensor.\n question: a Tensor.\n hparams: run hyperparameters\n\n Returns:\n encoder_input: a Tensor, bottom of encoder stack\n encoder_self_attention_bias: a bias tensor for use in encoder self-attention\n " ]
Please provide a description of the function:def image_question_encoder(encoder_inputs, encoder_self_attention_bias, hparams, query=None, name="image_question_encoder", save_weights_to=...
[ "A stack of self attention layers." ]
Please provide a description of the function:def decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", save_weights_to=None, make_image_summary=True,): x = deco...
[ "A stack of transformer layers.\n\n Args:\n decoder_input: a Tensor\n encoder_output: a Tensor\n decoder_self_attention_bias: bias Tensor for self-attention\n (see common_attention.attention_bias())\n encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention\n (see common_atte...
Please provide a description of the function:def iterative_encoder_decoder(encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias, query, hparams): for _ in range(hparams.num_...
[ "Iterative encoder decoder." ]
Please provide a description of the function:def vqa_self_attention_base(): hparams = common_hparams.basic_params1() hparams.batch_size = 128 hparams.use_fixed_batch_size = True, hparams.optimizer = "adam" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.997 hparams.optimizer_adam_ep...
[ "VQA attention baseline hparams." ]
Please provide a description of the function:def vqa_self_attention_feature_batch1024_big(): hparams = vqa_self_attention_feature_batch1024() hparams.learning_rate_constant = 7e-4 hparams.batch_size = 256 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 hparams.layer_prepost...
[ "Big model." ]