id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
8,761
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import random from albert import tokenization import numpy as np import six from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS def create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates `TrainingInstance`s for a single document.""" document = all_documents[document_index] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if rng.random() < short_seq_prob: target_seq_length = rng.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = rng.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next is_random_next = False if len(current_chunk) == 1 or \ (FLAGS.random_next_sentence and rng.random() < 0.5): is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = rng.randint(0, len(all_documents) - 1) if random_document_index != document_index: break random_document = all_documents[random_document_index] random_start = rng.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments elif not FLAGS.random_next_sentence and rng.random() < 0.5: is_random_next = True for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) # Note(mingdachen): in this case, we just swap tokens_a and tokens_b tokens_a, tokens_b = tokens_b, tokens_a # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels, token_boundary) = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance( tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, token_boundary=token_boundary, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances The provided code snippet includes necessary dependencies for implementing the `create_training_instances` function. Write a Python function `def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng)` to solve the following problem: Create `TrainingInstance`s from raw text. Here is the function: def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng): """Create `TrainingInstance`s from raw text.""" all_documents = [[]] # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. for input_file in input_files: with tf.gfile.GFile(input_file, FLAGS.input_file_mode) as reader: while True: line = reader.readline() if not FLAGS.spm_model_file: line = tokenization.convert_to_unicode(line) if not line: break if FLAGS.spm_model_file: line = tokenization.preprocess_text(line, lower=FLAGS.do_lower_case) else: line = line.strip() # Empty lines are used as document delimiters if not line: all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[-1].append(tokens) # Remove empty documents all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(dupe_factor): for document_index in range(len(all_documents)): instances.extend( create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) return instances
Create `TrainingInstance`s from raw text.
8,762
from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import random import time from albert import fine_tuning_utils from albert import modeling from albert import squad_utils import six import tensorflow.compat.v1 as tf from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver from tensorflow.contrib import tpu as contrib_tpu FLAGS = flags.FLAGS The provided code snippet includes necessary dependencies for implementing the `validate_flags_or_throw` function. Write a Python function `def validate_flags_or_throw(albert_config)` to solve the following problem: Validate the input FLAGS or throw an exception. Here is the function: def validate_flags_or_throw(albert_config): """Validate the input FLAGS or throw an exception.""" if not FLAGS.do_train and not FLAGS.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if FLAGS.do_train: if not FLAGS.train_file: raise ValueError( "If `do_train` is True, then `train_file` must be specified.") if FLAGS.do_predict: if not FLAGS.predict_file: raise ValueError( "If `do_predict` is True, then `predict_file` must be specified.") if not FLAGS.predict_feature_file: raise ValueError( "If `do_predict` is True, then `predict_feature_file` must be " "specified.") if not FLAGS.predict_feature_left_file: raise ValueError( "If `do_predict` is True, then `predict_feature_left_file` must be " "specified.") if FLAGS.max_seq_length > albert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the ALBERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, albert_config.max_position_embeddings)) if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: raise ValueError( "The max_seq_length (%d) must be greater than max_query_length " "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
Validate the input FLAGS or throw an exception.
8,763
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import time from albert import classifier_utils from albert import fine_tuning_utils from albert import modeling import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver from tensorflow.contrib import tpu as contrib_tpu FLAGS = flags.FLAGS tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") The provided code snippet includes necessary dependencies for implementing the `_serving_input_receiver_fn` function. Write a Python function `def _serving_input_receiver_fn()` to solve the following problem: Creates an input function for serving. Here is the function: def _serving_input_receiver_fn(): """Creates an input function for serving.""" seq_len = FLAGS.max_seq_length serialized_example = tf.placeholder( dtype=tf.string, shape=[None], name="serialized_example") features = { "input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64), "input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64), "segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64), } feature_map = tf.parse_example(serialized_example, features=features) feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32) feature_map["label_ids"] = tf.constant(0, dtype=tf.int32) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in feature_map.keys(): t = feature_map[name] if t.dtype == tf.int64: t = tf.to_int32(t) feature_map[name] = t return tf_estimator.export.ServingInputReceiver( features=feature_map, receiver_tensors=serialized_example)
Creates an input function for serving.
8,764
from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import time from albert import classifier_utils from albert import fine_tuning_utils from albert import modeling import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver from tensorflow.contrib import tpu as contrib_tpu tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") The provided code snippet includes necessary dependencies for implementing the `_add_threshold_to_model_fn` function. Write a Python function `def _add_threshold_to_model_fn(model_fn, threshold)` to solve the following problem: Adds the classifier threshold to the given model_fn. Here is the function: def _add_threshold_to_model_fn(model_fn, threshold): """Adds the classifier threshold to the given model_fn.""" def new_model_fn(features, labels, mode, params): spec = model_fn(features, labels, mode, params) threshold_tensor = tf.constant(threshold, dtype=tf.float32) default_serving_export = spec.export_outputs[ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] default_serving_export.outputs["threshold"] = threshold_tensor return spec return new_model_fn
Adds the classifier threshold to the given model_fn.
8,765
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import os from albert import classifier_utils from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import tpu as contrib_tpu def convert_single_example(example_index, example, label_size, max_seq_length, tokenizer, max_qa_length): """Loads a data file into a list of `InputBatch`s.""" # RACE is a multiple choice task. To perform this task using AlBERT, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given RACE example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. if isinstance(example, classifier_utils.PaddingInputExample): return classifier_utils.InputFeatures( example_id=0, input_ids=[[0] * max_seq_length] * label_size, input_mask=[[0] * max_seq_length] * label_size, segment_ids=[[0] * max_seq_length] * label_size, label_id=0, is_real_example=False) else: context_tokens = tokenizer.tokenize(example.context_sentence) if example.start_ending is not None: start_ending_tokens = tokenizer.tokenize(example.start_ending) all_input_tokens = [] all_input_ids = [] all_input_mask = [] all_segment_ids = [] for ending in example.endings: # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] if example.start_ending is not None: ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) else: ending_tokens = tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" ending_tokens = ending_tokens[- max_qa_length:] if len(context_tokens_choice) + len(ending_tokens) > max_seq_length - 3: context_tokens_choice = context_tokens_choice[: ( max_seq_length - 3 - len(ending_tokens))] tokens = ["[CLS]"] + context_tokens_choice + ( ["[SEP]"] + ending_tokens + ["[SEP]"]) segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * ( len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length all_input_tokens.append(tokens) all_input_ids.append(input_ids) all_input_mask.append(input_mask) all_segment_ids.append(segment_ids) label = example.label if example_index < 5: tf.logging.info("*** Example ***") tf.logging.info("id: {}".format(example.example_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in \ enumerate(zip(all_input_tokens, all_input_ids, all_input_mask, all_segment_ids)): tf.logging.info("choice: {}".format(choice_idx)) tf.logging.info("tokens: {}".format(" ".join(tokens))) tf.logging.info( "input_ids: {}".format(" ".join(map(str, input_ids)))) tf.logging.info( "input_mask: {}".format(" ".join(map(str, input_mask)))) tf.logging.info( "segment_ids: {}".format(" ".join(map(str, segment_ids)))) tf.logging.info("label: {}".format(label)) return classifier_utils.InputFeatures( example_id=example.example_id, input_ids=all_input_ids, input_mask=all_input_mask, segment_ids=all_segment_ids, label_id=label ) The provided code snippet includes necessary dependencies for implementing the `file_based_convert_examples_to_features` function. Write a Python function `def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file, max_qa_length)` to solve the following problem: Convert a set of `InputExample`s to a TFRecord file. Here is the function: def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file, max_qa_length): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, len(label_list), max_seq_length, tokenizer, max_qa_length) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(sum(feature.input_ids, [])) features["input_mask"] = create_int_feature(sum(feature.input_mask, [])) features["segment_ids"] = create_int_feature(sum(feature.segment_ids, [])) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close()
Convert a set of `InputExample`s to a TFRecord file.
8,766
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import os from albert import classifier_utils from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import tpu as contrib_tpu def create_model(albert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings, max_seq_length, dropout_prob, hub_module): """Creates a classification model.""" bsz_per_core = tf.shape(input_ids)[0] input_ids = tf.reshape(input_ids, [bsz_per_core * num_labels, max_seq_length]) input_mask = tf.reshape(input_mask, [bsz_per_core * num_labels, max_seq_length]) token_type_ids = tf.reshape(segment_ids, [bsz_per_core * num_labels, max_seq_length]) (output_layer, _) = fine_tuning_utils.create_albert( albert_config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=token_type_ids, use_one_hot_embeddings=use_one_hot_embeddings, use_einsum=True, hub_module=hub_module) hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [1, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [1], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout( output_layer, keep_prob=1 - dropout_prob) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [bsz_per_core, num_labels]) probabilities = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot( labels, depth=tf.cast(num_labels, dtype=tf.int32), dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, probabilities, logits, predictions) The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, max_seq_length, dropout_prob, hub_module)` to solve the following problem: Returns `model_fn` closure for TPUEstimator. Here is the function: def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, max_seq_length, dropout_prob, hub_module): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf_estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, probabilities, logits, predictions) = \ create_model(albert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings, max_seq_length, dropout_prob, hub_module) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf_estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf_estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities, "predictions": predictions}, scaffold_fn=scaffold_fn) return output_spec return model_fn
Returns `model_fn` closure for TPUEstimator.
8,767
from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from albert import lamb_optimizer import six from six.moves import zip import tensorflow.compat.v1 as tf from tensorflow.contrib import tpu as contrib_tpu class AdamWeightDecayOptimizer(tf.train.Optimizer): """A basic Adam optimizer that includes "correct" L2 weight decay.""" def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=None, name="AdamWeightDecayOptimizer"): """Constructs a AdamWeightDecayOptimizer.""" super(AdamWeightDecayOptimizer, self).__init__(False, name) self.learning_rate = learning_rate self.weight_decay_rate = weight_decay_rate self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon self.exclude_from_weight_decay = exclude_from_weight_decay def apply_gradients(self, grads_and_vars, global_step=None, name=None): """See base class.""" assignments = [] for (grad, param) in grads_and_vars: if grad is None or param is None: continue param_name = self._get_variable_name(param.name) m = tf.get_variable( name=six.ensure_str(param_name) + "/adam_m", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) v = tf.get_variable( name=six.ensure_str(param_name) + "/adam_v", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) # Standard Adam update. next_m = ( tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) next_v = ( tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))) update = next_m / (tf.sqrt(next_v) + self.epsilon) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want ot decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if self._do_use_weight_decay(param_name): update += self.weight_decay_rate * param update_with_lr = self.learning_rate * update next_param = param - update_with_lr assignments.extend( [param.assign(next_param), m.assign(next_m), v.assign(next_v)]) return tf.group(*assignments, name=name) def _do_use_weight_decay(self, param_name): """Whether to use L2 weight decay for `param_name`.""" if not self.weight_decay_rate: return False if self.exclude_from_weight_decay: for r in self.exclude_from_weight_decay: if re.search(r, param_name) is not None: return False return True def _get_variable_name(self, param_name): """Get the variable name from the tensor name.""" m = re.match("^(.*):\\d+$", six.ensure_str(param_name)) if m is not None: param_name = m.group(1) return param_name The provided code snippet includes necessary dependencies for implementing the `create_optimizer` function. Write a Python function `def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu, optimizer="adamw", poly_power=1.0, start_warmup_step=0, colocate_gradients_with_ops=False, excluded_tvars=None)` to solve the following problem: Creates an optimizer training op. Here is the function: def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu, optimizer="adamw", poly_power=1.0, start_warmup_step=0, colocate_gradients_with_ops=False, excluded_tvars=None): """Creates an optimizer training op.""" global_step = tf.train.get_or_create_global_step() learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32) # Implements linear decay of the learning rate. learning_rate = tf.train.polynomial_decay( learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=poly_power, cycle=False) # Implements linear warmup. I.e., if global_step - start_warmup_step < # num_warmup_steps, the learning rate will be # `(global_step - start_warmup_step)/num_warmup_steps * init_lr`. if num_warmup_steps: tf.logging.info("++++++ warmup starts at step " + str(start_warmup_step) + ", for " + str(num_warmup_steps) + " steps ++++++") global_steps_int = tf.cast(global_step, tf.int32) start_warm_int = tf.constant(start_warmup_step, dtype=tf.int32) global_steps_int = global_steps_int - start_warm_int warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) global_steps_float = tf.cast(global_steps_int, tf.float32) warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_learning_rate = init_lr * warmup_percent_done is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) learning_rate = ( (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) # It is OK that you use this optimizer for finetuning, since this # is how the model was trained (note that the Adam m/v variables are NOT # loaded from init_checkpoint.) # It is OK to use AdamW in the finetuning even the model is trained by LAMB. # As report in the Bert pulic github, the learning rate for SQuAD 1.1 finetune # is 3e-5, 4e-5 or 5e-5. For LAMB, the users can use 3e-4, 4e-4,or 5e-4 for a # batch size of 64 in the finetune. if optimizer == "adamw": tf.logging.info("using adamw") optimizer = AdamWeightDecayOptimizer( learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) elif optimizer == "lamb": tf.logging.info("using lamb") optimizer = lamb_optimizer.LAMBOptimizer( learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) else: raise ValueError("Not supported optimizer: ", optimizer) if use_tpu: optimizer = contrib_tpu.CrossShardOptimizer(optimizer) tvars = tf.trainable_variables() for tvar in tvars: if excluded_tvars and tvar.name in excluded_tvars: tvars.remove(tvar) grads = tf.gradients( loss, tvars, colocate_gradients_with_ops=colocate_gradients_with_ops) # This is how the model was pre-trained. (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) train_op = optimizer.apply_gradients( list(zip(grads, tvars)), global_step=global_step) # Normally the global step update is done inside of `apply_gradients`. # However, neither `AdamWeightDecayOptimizer` nor `LAMBOptimizer` do this. # But if you use a different optimizer, you should probably take this line # out. new_global_step = global_step + 1 train_op = tf.group(train_op, [global_step.assign(new_global_step)]) return train_op
Creates an optimizer training op.
8,768
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time from albert import modeling from albert import optimization from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver from tensorflow.contrib import tpu as contrib_tpu FLAGS = flags.FLAGS tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") def get_masked_lm_output(albert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=albert_config.embedding_size, activation=modeling.get_activation(albert_config.hidden_act), kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[albert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=albert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_sentence_order_output(albert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, albert_config.hidden_size], initializer=modeling.create_initializer( albert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(albert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, optimizer, poly_power, start_warmup_step)` to solve the following problem: Returns `model_fn` closure for TPUEstimator. Here is the function: def model_fn_builder(albert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, optimizer, poly_power, start_warmup_step): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] # Note: We keep this feature name `next_sentence_labels` to be compatible # with the original data created by lanzhzh@. However, in the ALBERT case # it does represent sentence_order_labels. sentence_order_labels = features["next_sentence_labels"] is_training = (mode == tf_estimator.ModeKeys.TRAIN) model = modeling.AlbertModel( config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(albert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (sentence_order_loss, sentence_order_example_loss, sentence_order_log_probs) = get_sentence_order_output( albert_config, model.get_pooled_output(), sentence_order_labels) total_loss = masked_lm_loss + sentence_order_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: tf.logging.info("number of hidden group %d to initialize", albert_config.num_hidden_groups) num_of_initialize_group = 1 if FLAGS.init_from_group0: num_of_initialize_group = albert_config.num_hidden_groups if albert_config.net_structure_type > 0: num_of_initialize_group = albert_config.num_hidden_layers (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint( tvars, init_checkpoint, num_of_initialize_group) if use_tpu: def tpu_scaffold(): for gid in range(num_of_initialize_group): tf.logging.info("initialize the %dth layer", gid) tf.logging.info(assignment_map[gid]) tf.train.init_from_checkpoint(init_checkpoint, assignment_map[gid]) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: for gid in range(num_of_initialize_group): tf.logging.info("initialize the %dth layer", gid) tf.logging.info(assignment_map[gid]) tf.train.init_from_checkpoint(init_checkpoint, assignment_map[gid]) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf_estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, optimizer, poly_power, start_warmup_step) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf_estimator.ModeKeys.EVAL: def metric_fn(*args): """Computes the loss and accuracy of the model.""" (masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, sentence_order_example_loss, sentence_order_log_probs, sentence_order_labels) = args[:7] masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) metrics = { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, } sentence_order_log_probs = tf.reshape( sentence_order_log_probs, [-1, sentence_order_log_probs.shape[-1]]) sentence_order_predictions = tf.argmax( sentence_order_log_probs, axis=-1, output_type=tf.int32) sentence_order_labels = tf.reshape(sentence_order_labels, [-1]) sentence_order_accuracy = tf.metrics.accuracy( labels=sentence_order_labels, predictions=sentence_order_predictions) sentence_order_mean_loss = tf.metrics.mean( values=sentence_order_example_loss) metrics.update({ "sentence_order_accuracy": sentence_order_accuracy, "sentence_order_loss": sentence_order_mean_loss }) return metrics metric_values = [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, sentence_order_example_loss, sentence_order_log_probs, sentence_order_labels ] eval_metrics = (metric_fn, metric_values) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn
Returns `model_fn` closure for TPUEstimator.
8,769
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time from albert import modeling from albert import optimization from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver from tensorflow.contrib import tpu as contrib_tpu FLAGS = flags.FLAGS tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example The provided code snippet includes necessary dependencies for implementing the `input_fn_builder` function. Write a Python function `def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4)` to solve the following problem: Creates an `input_fn` closure to be passed to TPUEstimator. Here is the function: def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), # Note: We keep this feature name `next_sentence_labels` to be # compatible with the original data created by lanzhzh@. However, in # the ALBERT case it does represent sentence_order_labels. "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } if FLAGS.masked_lm_budget: name_to_features.update({ "token_boundary": tf.FixedLenFeature([max_seq_length], tf.int64)}) else: name_to_features.update({ "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32)}) # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.data.experimental.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.data.experimental.map_and_batch_with_legacy_function( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) tf.logging.info(d) return d return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
8,770
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import metrics as contrib_metrics from tensorflow.contrib import tpu as contrib_tpu def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, task_name): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) if task_name != "sts-b": label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in ALBERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if task_name != "sts-b": label_id = label_map[example.label] else: label_id = example.label if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature The provided code snippet includes necessary dependencies for implementing the `file_based_convert_examples_to_features` function. Write a Python function `def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file, task_name)` to solve the following problem: Convert a set of `InputExample`s to a TFRecord file. Here is the function: def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file, task_name): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, task_name) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f def create_float_feature(values): f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_float_feature([feature.label_id])\ if task_name == "sts-b" else create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close()
Convert a set of `InputExample`s to a TFRecord file.
8,771
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import metrics as contrib_metrics from tensorflow.contrib import tpu as contrib_tpu The provided code snippet includes necessary dependencies for implementing the `file_based_input_fn_builder` function. Write a Python function `def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder, task_name, use_tpu, bsz, multiple=1)` to solve the following problem: Creates an `input_fn` closure to be passed to TPUEstimator. Here is the function: def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder, task_name, use_tpu, bsz, multiple=1): """Creates an `input_fn` closure to be passed to TPUEstimator.""" labeltype = tf.float32 if task_name == "sts-b" else tf.int64 name_to_features = { "input_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64), "input_mask": tf.FixedLenFeature([seq_length * multiple], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64), "label_ids": tf.FixedLenFeature([], labeltype), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" if use_tpu: batch_size = params["batch_size"] else: batch_size = bsz # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.data.experimental.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
8,772
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import metrics as contrib_metrics from tensorflow.contrib import tpu as contrib_tpu def create_model(albert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings, task_name, hub_module): """Creates a classification model.""" (output_layer, _) = fine_tuning_utils.create_albert( albert_config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, use_einsum=True, hub_module=hub_module) hidden_size = output_layer.shape[-1] output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) if task_name != "sts-b": probabilities = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) else: probabilities = logits logits = tf.squeeze(logits, [-1]) predictions = logits per_example_loss = tf.square(logits - labels) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, probabilities, logits, predictions) The provided code snippet includes necessary dependencies for implementing the `model_fn_builder` function. Write a Python function `def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, task_name, hub_module=None, optimizer="adamw")` to solve the following problem: Returns `model_fn` closure for TPUEstimator. Here is the function: def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, task_name, hub_module=None, optimizer="adamw"): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf_estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, probabilities, logits, predictions) = \ create_model(albert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings, task_name, hub_module) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf_estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, optimizer) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf_estimator.ModeKeys.EVAL: if task_name not in ["sts-b", "cola"]: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } elif task_name == "sts-b": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Pearson correlations for STS-B.""" # Display labels and predictions concat1 = contrib_metrics.streaming_concat(logits) concat2 = contrib_metrics.streaming_concat(label_ids) # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example) # Compute MSE # mse = tf.metrics.mean(per_example_loss) mse = tf.metrics.mean_squared_error( label_ids, logits, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return {"pred": concat1, "label_ids": concat2, "pearson": pearson, "MSE": mse, "eval_loss": loss,} elif task_name == "cola": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Matthew's correlations for COLA.""" predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient tp, tp_op = tf.metrics.true_positives( labels=label_ids, predictions=predictions, weights=is_real_example) tn, tn_op = tf.metrics.true_negatives( labels=label_ids, predictions=predictions, weights=is_real_example) fp, fp_op = tf.metrics.false_positives( labels=label_ids, predictions=predictions, weights=is_real_example) fn, fn_op = tf.metrics.false_negatives( labels=label_ids, predictions=predictions, weights=is_real_example) # Compute Matthew's correlation mcc = tf.div_no_nan( tp * tn - fp * fn, tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5)) # Compute accuracy accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean( values=per_example_loss, weights=is_real_example) return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)), "eval_accuracy": accuracy, "eval_loss": loss,} eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, predictions={ "probabilities": probabilities, "predictions": predictions }, scaffold_fn=scaffold_fn) return output_spec return model_fn
Returns `model_fn` closure for TPUEstimator.
8,773
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import metrics as contrib_metrics from tensorflow.contrib import tpu as contrib_tpu The provided code snippet includes necessary dependencies for implementing the `input_fn_builder` function. Write a Python function `def input_fn_builder(features, seq_length, is_training, drop_remainder)` to solve the following problem: Creates an `input_fn` closure to be passed to TPUEstimator. Here is the function: def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
8,774
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import metrics as contrib_metrics from tensorflow.contrib import tpu as contrib_tpu def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, task_name): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) if task_name != "sts-b": label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in ALBERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if task_name != "sts-b": label_id = label_map[example.label] else: label_id = example.label if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, task_name)` to solve the following problem: Convert a set of `InputExample`s to a list of `InputFeatures`. Here is the function: def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, task_name): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, task_name) features.append(feature) return features
Convert a set of `InputExample`s to a list of `InputFeatures`.
8,775
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import app from absl import flags from albert import modeling import tensorflow.compat.v1 as tf import tensorflow_hub as hub FLAGS = flags.FLAGS def get_mlm_logits(model, albert_config, mlm_positions): """From run_pretraining.py.""" input_tensor = gather_indexes(model.get_sequence_output(), mlm_positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=albert_config.embedding_size, activation=modeling.get_activation(albert_config.hidden_act), kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[albert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul( input_tensor, model.get_embedding_table(), transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) return logits def get_sop_log_probs(model, albert_config): """Get loss and log probs for the next sentence prediction.""" input_tensor = model.get_pooled_output() # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, albert_config.hidden_size], initializer=modeling.create_initializer( albert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) return log_probs The provided code snippet includes necessary dependencies for implementing the `module_fn` function. Write a Python function `def module_fn(is_training)` to solve the following problem: Module function. Here is the function: def module_fn(is_training): """Module function.""" input_ids = tf.placeholder(tf.int32, [None, None], "input_ids") input_mask = tf.placeholder(tf.int32, [None, None], "input_mask") segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids") mlm_positions = tf.placeholder(tf.int32, [None, None], "mlm_positions") albert_config_path = os.path.join( FLAGS.albert_directory, "albert_config.json") albert_config = modeling.AlbertConfig.from_json_file(albert_config_path) model = modeling.AlbertModel( config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=False, use_einsum=FLAGS.use_einsum) mlm_logits = get_mlm_logits(model, albert_config, mlm_positions) sop_log_probs = get_sop_log_probs(model, albert_config) vocab_model_path = os.path.join(FLAGS.albert_directory, "30k-clean.model") vocab_file_path = os.path.join(FLAGS.albert_directory, "30k-clean.vocab") config_file = tf.constant( value=albert_config_path, dtype=tf.string, name="config_file") vocab_model = tf.constant( value=vocab_model_path, dtype=tf.string, name="vocab_model") # This is only for visualization purpose. vocab_file = tf.constant( value=vocab_file_path, dtype=tf.string, name="vocab_file") # By adding `config_file, vocab_model and vocab_file` # to the ASSET_FILEPATHS collection, TF-Hub will # rewrite this tensor so that this asset is portable. tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, config_file) tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_model) tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_file) hub.add_signature( name="tokens", inputs=dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids), outputs=dict( sequence_output=model.get_sequence_output(), pooled_output=model.get_pooled_output())) hub.add_signature( name="sop", inputs=dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids), outputs=dict( sequence_output=model.get_sequence_output(), pooled_output=model.get_pooled_output(), sop_log_probs=sop_log_probs)) hub.add_signature( name="mlm", inputs=dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, mlm_positions=mlm_positions), outputs=dict( sequence_output=model.get_sequence_output(), pooled_output=model.get_pooled_output(), mlm_logits=mlm_logits)) hub.add_signature( name="tokenization_info", inputs={}, outputs=dict( vocab_file=vocab_model, do_lower_case=tf.constant(FLAGS.do_lower_case)))
Module function.
8,776
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, paragraph_text, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False): self.qas_id = qas_id self.question_text = question_text self.paragraph_text = paragraph_text self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) s += ", question_text: %s" % ( tokenization.printable_text(self.question_text)) s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) return s The provided code snippet includes necessary dependencies for implementing the `read_squad_examples` function. Write a Python function `def read_squad_examples(input_file, is_training)` to solve the following problem: Read a SQuAD json file into a list of SquadExample. Here is the function: def read_squad_examples(input_file, is_training): """Read a SQuAD json file into a list of SquadExample.""" with tf.gfile.Open(input_file, "r") as reader: input_data = json.load(reader)["data"] examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None orig_answer_text = None is_impossible = False if is_training: is_impossible = qa.get("is_impossible", False) if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] start_position = answer["answer_start"] else: start_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, paragraph_text=paragraph_text, orig_answer_text=orig_answer_text, start_position=start_position, is_impossible=is_impossible) examples.append(example) return examples
Read a SQuAD json file into a list of SquadExample.
8,777
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tok_start_to_orig_index, tok_end_to_orig_index, token_is_max_context, tokens, input_ids, input_mask, segment_ids, paragraph_len, p_mask=None, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tok_start_to_orig_index = tok_start_to_orig_index self.tok_end_to_orig_index = tok_end_to_orig_index self.token_is_max_context = token_is_max_context self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.paragraph_len = paragraph_len self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible self.p_mask = p_mask def _convert_index(index, pos, m=None, is_start=True): """Converts index.""" if index[pos] is not None: return index[pos] n = len(index) rear = pos while rear < n - 1 and index[rear] is None: rear += 1 front = pos while front > 0 and index[front] is None: front -= 1 assert index[front] is not None or index[rear] is not None if index[front] is None: if index[rear] >= 1: if is_start: return 0 else: return index[rear] - 1 return index[rear] if index[rear] is None: if m is not None and index[front] < m - 1: if is_start: return index[front] + 1 else: return m - 1 return index[front] if is_start: if index[rear] > index[front] + 1: return index[front] + 1 else: return index[rear] else: if index[rear] > index[front] + 1: return index[rear] - 1 else: return index[front] def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn, do_lower_case)` to solve the following problem: Loads a data file into a list of `InputBatch`s. Here is the function: def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn, do_lower_case): """Loads a data file into a list of `InputBatch`s.""" cnt_pos, cnt_neg = 0, 0 unique_id = 1000000000 max_n, max_m = 1024, 1024 f = np.zeros((max_n, max_m), dtype=np.float32) for (example_index, example) in enumerate(examples): if example_index % 100 == 0: tf.logging.info("Converting {}/{} pos {} neg {}".format( example_index, len(examples), cnt_pos, cnt_neg)) query_tokens = tokenization.encode_ids( tokenizer.sp_model, tokenization.preprocess_text( example.question_text, lower=do_lower_case)) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] paragraph_text = example.paragraph_text para_tokens = tokenization.encode_pieces( tokenizer.sp_model, tokenization.preprocess_text( example.paragraph_text, lower=do_lower_case), return_unicode=False) chartok_to_tok_index = [] tok_start_to_chartok_index = [] tok_end_to_chartok_index = [] char_cnt = 0 para_tokens = [six.ensure_text(token, "utf-8") for token in para_tokens] for i, token in enumerate(para_tokens): new_token = six.ensure_text(token).replace( tokenization.SPIECE_UNDERLINE.decode("utf-8"), " ") chartok_to_tok_index.extend([i] * len(new_token)) tok_start_to_chartok_index.append(char_cnt) char_cnt += len(new_token) tok_end_to_chartok_index.append(char_cnt - 1) tok_cat_text = "".join(para_tokens).replace( tokenization.SPIECE_UNDERLINE.decode("utf-8"), " ") n, m = len(paragraph_text), len(tok_cat_text) if n > max_n or m > max_m: max_n = max(n, max_n) max_m = max(m, max_m) f = np.zeros((max_n, max_m), dtype=np.float32) g = {} def _lcs_match(max_dist, n=n, m=m): """Longest-common-substring algorithm.""" f.fill(0) g.clear() ### longest common sub sequence # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) for i in range(n): # note(zhiliny): # unlike standard LCS, this is specifically optimized for the setting # because the mismatch between sentence pieces and original text will # be small for j in range(i - max_dist, i + max_dist): if j >= m or j < 0: continue if i > 0: g[(i, j)] = 0 f[i, j] = f[i - 1, j] if j > 0 and f[i, j - 1] > f[i, j]: g[(i, j)] = 1 f[i, j] = f[i, j - 1] f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 if (tokenization.preprocess_text( paragraph_text[i], lower=do_lower_case, remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]): g[(i, j)] = 2 f[i, j] = f_prev + 1 max_dist = abs(n - m) + 5 for _ in range(2): _lcs_match(max_dist) if f[n - 1, m - 1] > 0.8 * n: break max_dist *= 2 orig_to_chartok_index = [None] * n chartok_to_orig_index = [None] * m i, j = n - 1, m - 1 while i >= 0 and j >= 0: if (i, j) not in g: break if g[(i, j)] == 2: orig_to_chartok_index[i] = j chartok_to_orig_index[j] = i i, j = i - 1, j - 1 elif g[(i, j)] == 1: j = j - 1 else: i = i - 1 if (all(v is None for v in orig_to_chartok_index) or f[n - 1, m - 1] < 0.8 * n): tf.logging.info("MISMATCH DETECTED!") continue tok_start_to_orig_index = [] tok_end_to_orig_index = [] for i in range(len(para_tokens)): start_chartok_pos = tok_start_to_chartok_index[i] end_chartok_pos = tok_end_to_chartok_index[i] start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos, n, is_start=True) end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos, n, is_start=False) tok_start_to_orig_index.append(start_orig_pos) tok_end_to_orig_index.append(end_orig_pos) if not is_training: tok_start_position = tok_end_position = None if is_training and example.is_impossible: tok_start_position = 0 tok_end_position = 0 if is_training and not example.is_impossible: start_position = example.start_position end_position = start_position + len(example.orig_answer_text) - 1 start_chartok_pos = _convert_index(orig_to_chartok_index, start_position, is_start=True) tok_start_position = chartok_to_tok_index[start_chartok_pos] end_chartok_pos = _convert_index(orig_to_chartok_index, end_position, is_start=False) tok_end_position = chartok_to_tok_index[end_chartok_pos] assert tok_start_position <= tok_end_position def _piece_to_id(x): if six.PY2 and isinstance(x, six.text_type): x = six.ensure_binary(x, "utf-8") return tokenizer.sp_model.PieceToId(x) all_doc_tokens = list(map(_piece_to_id, para_tokens)) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_is_max_context = {} segment_ids = [] p_mask = [] cur_tok_start_to_orig_index = [] cur_tok_end_to_orig_index = [] tokens.append(tokenizer.sp_model.PieceToId("[CLS]")) segment_ids.append(0) p_mask.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) p_mask.append(1) tokens.append(tokenizer.sp_model.PieceToId("[SEP]")) segment_ids.append(0) p_mask.append(1) for i in range(doc_span.length): split_token_index = doc_span.start + i cur_tok_start_to_orig_index.append( tok_start_to_orig_index[split_token_index]) cur_tok_end_to_orig_index.append( tok_end_to_orig_index[split_token_index]) is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) p_mask.append(0) tokens.append(tokenizer.sp_model.PieceToId("[SEP]")) segment_ids.append(1) p_mask.append(1) paragraph_len = len(tokens) input_ids = tokens # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: # continue start_position = 0 end_position = 0 span_is_impossible = True else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = 0 end_position = 0 if example_index < 20: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (unique_id)) tf.logging.info("example_index: %s" % (example_index)) tf.logging.info("doc_span_index: %s" % (doc_span_index)) tf.logging.info("tok_start_to_orig_index: %s" % " ".join( [str(x) for x in cur_tok_start_to_orig_index])) tf.logging.info("tok_end_to_orig_index: %s" % " ".join( [str(x) for x in cur_tok_end_to_orig_index])) tf.logging.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) tf.logging.info("input_pieces: %s" % " ".join( [tokenizer.sp_model.IdToPiece(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: tf.logging.info("impossible example span") if is_training and not span_is_impossible: pieces = [tokenizer.sp_model.IdToPiece(token) for token in tokens[start_position: (end_position + 1)]] answer_text = tokenizer.sp_model.DecodePieces(pieces) tf.logging.info("start_position: %d" % (start_position)) tf.logging.info("end_position: %d" % (end_position)) tf.logging.info( "answer: %s" % (tokenization.printable_text(answer_text))) # note(zhiliny): With multi processing, # the example_index is actually the index within the current process # therefore we use example_index=None to avoid being used in the future. # The current code does not use example_index of training data. if is_training: feat_example_index = None else: feat_example_index = example_index feature = InputFeatures( unique_id=unique_id, example_index=feat_example_index, doc_span_index=doc_span_index, tok_start_to_orig_index=cur_tok_start_to_orig_index, tok_end_to_orig_index=cur_tok_end_to_orig_index, token_is_max_context=token_is_max_context, tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens], input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible, p_mask=p_mask) # Run callback output_fn(feature) unique_id += 1 if span_is_impossible: cnt_neg += 1 else: cnt_pos += 1 tf.logging.info("Total number of instances: {} = pos {} neg {}".format( cnt_pos + cnt_neg, cnt_pos, cnt_neg))
Loads a data file into a list of `InputBatch`s.
8,778
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu The provided code snippet includes necessary dependencies for implementing the `input_fn_builder` function. Write a Python function `def input_fn_builder(input_file, seq_length, is_training, drop_remainder, use_tpu, bsz, is_v2)` to solve the following problem: Creates an `input_fn` closure to be passed to TPUEstimator. Here is the function: def input_fn_builder(input_file, seq_length, is_training, drop_remainder, use_tpu, bsz, is_v2): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "unique_ids": tf.FixedLenFeature([], tf.int64), "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), } # p_mask is not required for SQuAD v1.1 if is_v2: name_to_features["p_mask"] = tf.FixedLenFeature([seq_length], tf.int64) if is_training: name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.int64) def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" if use_tpu: batch_size = params["batch_size"] else: batch_size = bsz # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.data.experimental.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
8,779
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu def create_v1_model(albert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings, use_einsum, hub_module): """Creates a classification model.""" (_, final_hidden) = fine_tuning_utils.create_albert( albert_config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, use_einsum=use_einsum, hub_module=hub_module) final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] output_weights = tf.get_variable( "cls/squad/output_weights", [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) return (start_logits, end_logits) The provided code snippet includes necessary dependencies for implementing the `v1_model_fn_builder` function. Write a Python function `def v1_model_fn_builder(albert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, use_einsum, hub_module)` to solve the following problem: Returns `model_fn` closure for TPUEstimator. Here is the function: def v1_model_fn_builder(albert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, use_einsum, hub_module): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) if "unique_ids" in features: unique_ids = features["unique_ids"] else: unique_ids = None input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] is_training = (mode == tf_estimator.ModeKeys.TRAIN) (start_logits, end_logits) = create_v1_model( albert_config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, use_einsum=use_einsum, hub_module=hub_module) # Assign names to the logits so that we can refer to them as output tensors. start_logits = tf.identity(start_logits, name="start_logits") end_logits = tf.identity(end_logits, name="end_logits") tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf_estimator.ModeKeys.TRAIN: seq_length = modeling.get_shape_list(input_ids)[1] def compute_loss(logits, positions): one_hot_positions = tf.one_hot( positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits, axis=-1) loss = -tf.reduce_mean( tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) return loss start_positions = features["start_positions"] end_positions = features["end_positions"] start_loss = compute_loss(start_logits, start_positions) end_loss = compute_loss(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2.0 train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf_estimator.ModeKeys.PREDICT: predictions = { "start_log_prob": start_logits, "end_log_prob": end_logits, } if unique_ids is not None: predictions["unique_ids"] = unique_ids output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: raise ValueError( "Only TRAIN and PREDICT modes are supported: %s" % (mode)) return output_spec return model_fn
Returns `model_fn` closure for TPUEstimator.
8,780
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes The provided code snippet includes necessary dependencies for implementing the `accumulate_predictions_v1` function. Write a Python function `def accumulate_predictions_v1(result_dict, all_examples, all_features, all_results, n_best_size, max_answer_length)` to solve the following problem: accumulate predictions for each positions in a dictionary. Here is the function: def accumulate_predictions_v1(result_dict, all_examples, all_features, all_results, n_best_size, max_answer_length): """accumulate predictions for each positions in a dictionary.""" example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): if example_index not in result_dict: result_dict[example_index] = {} features = example_index_to_features[example_index] prelim_predictions = [] min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): if feature.unique_id not in result_dict[example_index]: result_dict[example_index][feature.unique_id] = {} result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_log_prob, n_best_size) end_indexes = _get_best_indexes(result.end_log_prob, n_best_size) for start_index in start_indexes: for end_index in end_indexes: doc_offset = feature.tokens.index("[SEP]") + 1 # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index - doc_offset >= len(feature.tok_start_to_orig_index): continue if end_index - doc_offset >= len(feature.tok_end_to_orig_index): continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue start_log_prob = result.start_log_prob[start_index] end_log_prob = result.end_log_prob[end_index] start_idx = start_index - doc_offset end_idx = end_index - doc_offset if (start_idx, end_idx) not in result_dict[example_index][feature.unique_id]: result_dict[example_index][feature.unique_id][(start_idx, end_idx)] = [] result_dict[example_index][feature.unique_id][(start_idx, end_idx)].append((start_log_prob, end_log_prob))
accumulate predictions for each positions in a dictionary.
8,781
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs The provided code snippet includes necessary dependencies for implementing the `write_predictions_v1` function. Write a Python function `def write_predictions_v1(result_dict, all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file)` to solve the following problem: Write final predictions to the json file and log-odds of null if needed. Here is the function: def write_predictions_v1(result_dict, all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file): """Write final predictions to the json file and log-odds of null if needed.""" tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): for ((start_idx, end_idx), logprobs) in \ result_dict[example_index][feature.unique_id].items(): start_log_prob = 0 end_log_prob = 0 for logprob in logprobs: start_log_prob += logprob[0] end_log_prob += logprob[1] prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_idx, end_index=end_idx, start_log_prob=start_log_prob / len(logprobs), end_log_prob=end_log_prob / len(logprobs))) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index >= 0: # this is a non-null prediction tok_start_to_orig_index = feature.tok_start_to_orig_index tok_end_to_orig_index = feature.tok_end_to_orig_index start_orig_pos = tok_start_to_orig_index[pred.start_index] end_orig_pos = tok_end_to_orig_index[pred.end_index] paragraph_text = example.paragraph_text final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_log_prob=0.0, end_log_prob=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 all_predictions[example.qas_id] = nbest_json[0]["text"] all_nbest_json[example.qas_id] = nbest_json with tf.gfile.GFile(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with tf.gfile.GFile(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") return all_predictions
Write final predictions to the json file and log-odds of null if needed.
8,782
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer_v1(prediction).split() ground_truth_tokens = normalize_answer_v1(ground_truth).split() common = ( collections.Counter(prediction_tokens) & collections.Counter(ground_truth_tokens)) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return (normalize_answer_v1(prediction) == normalize_answer_v1(ground_truth)) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths) def evaluate_v1(dataset, predictions): f1 = exact_match = total = 0 for article in dataset: for paragraph in article["paragraphs"]: for qa in paragraph["qas"]: total += 1 if qa["id"] not in predictions: message = ("Unanswered question " + six.ensure_str(qa["id"]) + " will receive score 0.") print(message, file=sys.stderr) continue ground_truths = [x["text"] for x in qa["answers"]] # ground_truths = list(map(lambda x: x["text"], qa["answers"])) prediction = predictions[qa["id"]] exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) exact_match = 100.0 * exact_match / total f1 = 100.0 * f1 / total return {"exact_match": exact_match, "f1": f1}
null
8,783
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval['%s_%s' % (prefix, k)] = new_eval[k]
null
8,784
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu The provided code snippet includes necessary dependencies for implementing the `accumulate_predictions_v2` function. Write a Python function `def accumulate_predictions_v2(result_dict, cls_dict, all_examples, all_features, all_results, n_best_size, max_answer_length, start_n_top, end_n_top)` to solve the following problem: accumulate predictions for each positions in a dictionary. Here is the function: def accumulate_predictions_v2(result_dict, cls_dict, all_examples, all_features, all_results, n_best_size, max_answer_length, start_n_top, end_n_top): """accumulate predictions for each positions in a dictionary.""" example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): if example_index not in result_dict: result_dict[example_index] = {} features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive for (feature_index, feature) in enumerate(features): if feature.unique_id not in result_dict[example_index]: result_dict[example_index][feature.unique_id] = {} result = unique_id_to_result[feature.unique_id] cur_null_score = result.cls_logits # if we could have irrelevant answers, get the min score of irrelevant score_null = min(score_null, cur_null_score) doc_offset = feature.tokens.index("[SEP]") + 1 for i in range(start_n_top): for j in range(end_n_top): start_log_prob = result.start_top_log_probs[i] start_index = result.start_top_index[i] j_index = i * end_n_top + j end_log_prob = result.end_top_log_probs[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index - doc_offset >= len(feature.tok_start_to_orig_index): continue if start_index - doc_offset < 0: continue if end_index - doc_offset >= len(feature.tok_end_to_orig_index): continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue start_idx = start_index - doc_offset end_idx = end_index - doc_offset if (start_idx, end_idx) not in result_dict[example_index][feature.unique_id]: result_dict[example_index][feature.unique_id][(start_idx, end_idx)] = [] result_dict[example_index][feature.unique_id][(start_idx, end_idx)].append((start_log_prob, end_log_prob)) if example_index not in cls_dict: cls_dict[example_index] = [] cls_dict[example_index].append(score_null)
accumulate predictions for each positions in a dictionary.
8,785
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu def create_v2_model(albert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings, features, max_seq_length, start_n_top, end_n_top, dropout_prob, hub_module): """Creates a classification model.""" (_, output) = fine_tuning_utils.create_albert( albert_config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, use_einsum=True, hub_module=hub_module) bsz = tf.shape(output)[0] return_dict = {} output = tf.transpose(output, [1, 0, 2]) # invalid position mask such as query and special symbols (PAD, SEP, CLS) p_mask = tf.cast(features["p_mask"], dtype=tf.float32) # logit of the start position with tf.variable_scope("start_logits"): start_logits = tf.layers.dense( output, 1, kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) # logit of the end position with tf.variable_scope("end_logits"): if is_training: # during training, compute the end logits based on the # ground truth of the start position start_positions = tf.reshape(features["start_positions"], [-1]) start_index = tf.one_hot(start_positions, depth=max_seq_length, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bl->bh", output, start_index) start_features = tf.tile(start_features[None], [max_seq_length, 1, 1]) end_logits = tf.layers.dense( tf.concat([output, start_features], axis=-1), albert_config.hidden_size, kernel_initializer=modeling.create_initializer( albert_config.initializer_range), activation=tf.tanh, name="dense_0") end_logits = contrib_layers.layer_norm(end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=modeling.create_initializer( albert_config.initializer_range), name="dense_1") end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) else: # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k=start_n_top) start_index = tf.one_hot(start_top_index, depth=max_seq_length, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bkl->bkh", output, start_index) end_input = tf.tile(output[:, :, None], [1, 1, start_n_top, 1]) start_features = tf.tile(start_features[None], [max_seq_length, 1, 1, 1]) end_input = tf.concat([end_input, start_features], axis=-1) end_logits = tf.layers.dense( end_input, albert_config.hidden_size, kernel_initializer=modeling.create_initializer( albert_config.initializer_range), activation=tf.tanh, name="dense_0") end_logits = contrib_layers.layer_norm(end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=modeling.create_initializer( albert_config.initializer_range), name="dense_1") end_logits = tf.reshape(end_logits, [max_seq_length, -1, start_n_top]) end_logits = tf.transpose(end_logits, [1, 2, 0]) end_logits_masked = end_logits * ( 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) end_top_log_probs, end_top_index = tf.nn.top_k( end_log_probs, k=end_n_top) end_top_log_probs = tf.reshape( end_top_log_probs, [-1, start_n_top * end_n_top]) end_top_index = tf.reshape( end_top_index, [-1, start_n_top * end_n_top]) if is_training: return_dict["start_log_probs"] = start_log_probs return_dict["end_log_probs"] = end_log_probs else: return_dict["start_top_log_probs"] = start_top_log_probs return_dict["start_top_index"] = start_top_index return_dict["end_top_log_probs"] = end_top_log_probs return_dict["end_top_index"] = end_top_index # an additional layer to predict answerability with tf.variable_scope("answer_class"): # get the representation of CLS cls_index = tf.one_hot(tf.zeros([bsz], dtype=tf.int32), max_seq_length, axis=-1, dtype=tf.float32) cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) # get the representation of START start_p = tf.nn.softmax(start_logits_masked, axis=-1, name="softmax_start") start_feature = tf.einsum("lbh,bl->bh", output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = tf.concat([start_feature, cls_feature], -1) ans_feature = tf.layers.dense( ans_feature, albert_config.hidden_size, activation=tf.tanh, kernel_initializer=modeling.create_initializer( albert_config.initializer_range), name="dense_0") ans_feature = tf.layers.dropout(ans_feature, dropout_prob, training=is_training) cls_logits = tf.layers.dense( ans_feature, 1, kernel_initializer=modeling.create_initializer( albert_config.initializer_range), name="dense_1", use_bias=False) cls_logits = tf.squeeze(cls_logits, -1) return_dict["cls_logits"] = cls_logits return return_dict The provided code snippet includes necessary dependencies for implementing the `v2_model_fn_builder` function. Write a Python function `def v2_model_fn_builder(albert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, max_seq_length, start_n_top, end_n_top, dropout_prob, hub_module)` to solve the following problem: Returns `model_fn` closure for TPUEstimator. Here is the function: def v2_model_fn_builder(albert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, max_seq_length, start_n_top, end_n_top, dropout_prob, hub_module): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) # unique_ids = features["unique_ids"] input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] is_training = (mode == tf_estimator.ModeKeys.TRAIN) outputs = create_v2_model( albert_config=albert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, features=features, max_seq_length=max_seq_length, start_n_top=start_n_top, end_n_top=end_n_top, dropout_prob=dropout_prob, hub_module=hub_module) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf_estimator.ModeKeys.TRAIN: seq_length = modeling.get_shape_list(input_ids)[1] def compute_loss(log_probs, positions): one_hot_positions = tf.one_hot( positions, depth=seq_length, dtype=tf.float32) loss = - tf.reduce_sum(one_hot_positions * log_probs, axis=-1) loss = tf.reduce_mean(loss) return loss start_loss = compute_loss( outputs["start_log_probs"], features["start_positions"]) end_loss = compute_loss( outputs["end_log_probs"], features["end_positions"]) total_loss = (start_loss + end_loss) * 0.5 cls_logits = outputs["cls_logits"] is_impossible = tf.reshape(features["is_impossible"], [-1]) regression_loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=tf.cast(is_impossible, dtype=tf.float32), logits=cls_logits) regression_loss = tf.reduce_mean(regression_loss) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is # comparable to start_loss and end_loss total_loss += regression_loss * 0.5 train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf_estimator.ModeKeys.PREDICT: predictions = { "unique_ids": features["unique_ids"], "start_top_index": outputs["start_top_index"], "start_top_log_probs": outputs["start_top_log_probs"], "end_top_index": outputs["end_top_index"], "end_top_log_probs": outputs["end_top_log_probs"], "cls_logits": outputs["cls_logits"] } output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: raise ValueError( "Only TRAIN and PREDICT modes are supported: %s" % (mode)) return output_spec return model_fn
Returns `model_fn` closure for TPUEstimator.
8,786
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import math import re import string import sys from albert import fine_tuning_utils from albert import modeling from albert import optimization from albert import tokenization import numpy as np import six from six.moves import map from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import estimator as tf_estimator from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib import tpu as contrib_tpu def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid_to_has_ans[qa['id']] = bool(qa['answers']) return qid_to_has_ans def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid = qa['id'] gold_answers = [a['text'] for a in qa['answers'] if normalize_answer_v2(a['text'])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [''] if qid not in preds: print('Missing prediction for %s' % qid) continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores.values()) / total), ('f1', 100.0 * sum(f1_scores.values()) / total), ('total', total), ]) else: total = len(qid_list) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), ('total', total), ]) def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh def write_predictions_v2(result_dict, cls_dict, all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 # score_null = 1000000 # large and positive for (feature_index, feature) in enumerate(features): for ((start_idx, end_idx), logprobs) in \ result_dict[example_index][feature.unique_id].items(): start_log_prob = 0 end_log_prob = 0 for logprob in logprobs: start_log_prob += logprob[0] end_log_prob += logprob[1] prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_idx, end_index=end_idx, start_log_prob=start_log_prob / len(logprobs), end_log_prob=end_log_prob / len(logprobs))) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] tok_start_to_orig_index = feature.tok_start_to_orig_index tok_end_to_orig_index = feature.tok_end_to_orig_index start_orig_pos = tok_start_to_orig_index[pred.start_index] end_orig_pos = tok_end_to_orig_index[pred.end_index] paragraph_text = example.paragraph_text final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction( text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 assert best_non_null_entry is not None score_diff = sum(cls_dict[example_index]) / len(cls_dict[example_index]) scores_diff_json[example.qas_id] = score_diff # predict null answers when null threshold is provided if null_score_diff_threshold is None or score_diff < null_score_diff_threshold: all_predictions[example.qas_id] = best_non_null_entry.text else: all_predictions[example.qas_id] = "" all_nbest_json[example.qas_id] = nbest_json assert len(nbest_json) >= 1 with tf.gfile.GFile(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with tf.gfile.GFile(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") with tf.gfile.GFile(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions, scores_diff_json def evaluate_v2(result_dict, cls_dict, prediction_json, eval_examples, eval_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file): null_score_diff_threshold = None predictions, na_probs = write_predictions_v2( result_dict, cls_dict, eval_examples, eval_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, null_score_diff_threshold) na_prob_thresh = 1.0 # default value taken from the eval script qid_to_has_ans = make_qid_to_has_ans(prediction_json) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(prediction_json, predictions) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) find_all_best_thresh(out_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans) null_score_diff_threshold = out_eval["best_f1_thresh"] predictions, na_probs = write_predictions_v2( result_dict, cls_dict,eval_examples, eval_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, null_score_diff_threshold) qid_to_has_ans = make_qid_to_has_ans(prediction_json) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(prediction_json, predictions) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) out_eval["null_score_diff_threshold"] = null_score_diff_threshold return out_eval
null
8,787
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import app from absl import flags from albert import modeling import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS def get_mlm_logits(input_tensor, albert_config, mlm_positions, output_weights): """From run_pretraining.py.""" input_tensor = gather_indexes(input_tensor, mlm_positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=albert_config.embedding_size, activation=modeling.get_activation(albert_config.hidden_act), kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[albert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul( input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) return logits def get_sentence_order_logits(input_tensor, albert_config): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, albert_config.hidden_size], initializer=modeling.create_initializer( albert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) return logits The provided code snippet includes necessary dependencies for implementing the `build_model` function. Write a Python function `def build_model(sess)` to solve the following problem: Module function. Here is the function: def build_model(sess): """Module function.""" input_ids = tf.placeholder(tf.int32, [None, None], "input_ids") input_mask = tf.placeholder(tf.int32, [None, None], "input_mask") segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids") mlm_positions = tf.placeholder(tf.int32, [None, None], "mlm_positions") albert_config_path = os.path.join( FLAGS.albert_directory, "albert_config.json") albert_config = modeling.AlbertConfig.from_json_file(albert_config_path) model = modeling.AlbertModel( config=albert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=False) get_mlm_logits(model.get_sequence_output(), albert_config, mlm_positions, model.get_embedding_table()) get_sentence_order_logits(model.get_pooled_output(), albert_config) checkpoint_path = os.path.join(FLAGS.albert_directory, FLAGS.checkpoint_name) tvars = tf.trainable_variables() (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, checkpoint_path) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) tf.train.init_from_checkpoint(checkpoint_path, assignment_map) init = tf.global_variables_initializer() sess.run(init) return sess
Module function.
8,788
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm The provided code snippet includes necessary dependencies for implementing the `preprocess_text` function. Write a Python function `def preprocess_text(inputs, remove_space=True, lower=False)` to solve the following problem: preprocess data by removing extra space and normalize data. Here is the function: def preprocess_text(inputs, remove_space=True, lower=False): """preprocess data by removing extra space and normalize data.""" outputs = inputs if remove_space: outputs = " ".join(inputs.strip().split()) if six.PY2 and isinstance(outputs, str): try: outputs = six.ensure_text(outputs, "utf-8") except UnicodeDecodeError: outputs = six.ensure_text(outputs, "latin-1") outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if lower: outputs = outputs.lower() return outputs
preprocess data by removing extra space and normalize data.
8,789
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm def encode_pieces(sp_model, text, return_unicode=True, sample=False): """turn sentences into word pieces.""" if six.PY2 and isinstance(text, six.text_type): text = six.ensure_binary(text, "utf-8") if not sample: pieces = sp_model.EncodeAsPieces(text) else: pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: piece = printable_text(piece) if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit(): cur_pieces = sp_model.EncodeAsPieces( six.ensure_binary(piece[:-1]).replace(SPIECE_UNDERLINE, b"")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) # note(zhiliny): convert back to unicode for py2 if six.PY2 and return_unicode: ret_pieces = [] for piece in new_pieces: if isinstance(piece, str): piece = six.ensure_text(piece, "utf-8") ret_pieces.append(piece) new_pieces = ret_pieces return new_pieces def encode_ids(sp_model, text, sample=False): pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample) ids = [sp_model.PieceToId(piece) for piece in pieces] return ids
null
8,790
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return six.ensure_text(text, "utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return six.ensure_text(text, "utf-8", "ignore") elif isinstance(text, six.text_type): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem: Loads a vocabulary file into a dictionary. Here is the function: def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with tf.gfile.GFile(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip().split()[0] if token.strip() else " " if token not in vocab: vocab[token] = len(vocab) return vocab
Loads a vocabulary file into a dictionary.
8,791
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens)
null
8,792
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids)
null
8,793
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem: Runs basic whitespace cleaning and splitting on a piece of text. Here is the function: def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens
Runs basic whitespace cleaning and splitting on a piece of text.
8,794
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem: Checks whether `chars` is a whitespace character. Here is the function: def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False
Checks whether `chars` is a whitespace character.
8,795
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem: Checks whether `chars` is a control character. Here is the function: def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False
Checks whether `chars` is a control character.
8,796
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import six from six.moves import range import tensorflow.compat.v1 as tf import tensorflow_hub as hub import sentencepiece as spm The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem: Checks whether `chars` is a punctuation character. Here is the function: def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
Checks whether `chars` is a punctuation character.
8,797
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf The provided code snippet includes necessary dependencies for implementing the `get_activation` function. Write a Python function `def get_activation(activation_string)` to solve the following problem: Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. Here is the function: def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return None act = activation_string.lower() if act == "linear": return None elif act == "relu": return tf.nn.relu elif act == "gelu": return gelu elif act == "tanh": return tf.tanh else: raise ValueError("Unsupported activation: %s" % act)
Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation.
8,798
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers The provided code snippet includes necessary dependencies for implementing the `get_assignment_map_from_checkpoint` function. Write a Python function `def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0)` to solve the following problem: Compute the union of the current variables and checkpoint variables. Here is the function: def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) init_vars_name = [name for (name, _) in init_vars] if num_of_group > 0: assignment_map = [] for gid in range(num_of_group): assignment_map.append(collections.OrderedDict()) else: assignment_map = collections.OrderedDict() for name in name_to_variable: if name in init_vars_name: tvar_name = name elif (re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name)) elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name)) elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name)) else: tf.logging.info("name %s does not get matched", name) continue tf.logging.info("name %s match to %s", name, tvar_name) if num_of_group > 0: group_matched = False for gid in range(1, num_of_group): if (("/group_" + str(gid) + "/" in name) or ("/ffn_" + str(gid) + "/" in name) or ("/attention_" + str(gid) + "/" in name)): group_matched = True tf.logging.info("%s belongs to %dth", name, gid) assignment_map[gid][tvar_name] = name if not group_matched: assignment_map[0][tvar_name] = name else: assignment_map[tvar_name] = name initialized_variable_names[name] = 1 initialized_variable_names[six.ensure_str(name) + ":0"] = 1 return (assignment_map, initialized_variable_names)
Compute the union of the current variables and checkpoint variables.
8,799
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers The provided code snippet includes necessary dependencies for implementing the `get_timing_signal_1d_given_position` function. Write a Python function `def get_timing_signal_1d_given_position(channels, position, min_timescale=1.0, max_timescale=1.0e4)` to solve the following problem: Get sinusoids of diff frequencies, with timing position given. Adapted from add_timing_signal_1d_given_position in //third_party/py/tensor2tensor/layers/common_attention.py Args: channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. position: a Tensor with shape [batch, seq_len] min_timescale: a float max_timescale: a float Returns: a Tensor of timing signals [batch, seq_len, channels] Here is the function: def get_timing_signal_1d_given_position(channels, position, min_timescale=1.0, max_timescale=1.0e4): """Get sinusoids of diff frequencies, with timing position given. Adapted from add_timing_signal_1d_given_position in //third_party/py/tensor2tensor/layers/common_attention.py Args: channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. position: a Tensor with shape [batch, seq_len] min_timescale: a float max_timescale: a float Returns: a Tensor of timing signals [batch, seq_len, channels] """ num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal
Get sinusoids of diff frequencies, with timing position given. Adapted from add_timing_signal_1d_given_position in //third_party/py/tensor2tensor/layers/common_attention.py Args: channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. position: a Tensor with shape [batch, seq_len] min_timescale: a float max_timescale: a float Returns: a Tensor of timing signals [batch, seq_len, channels]
8,800
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) if use_one_hot_embeddings: flat_input_ids = tf.reshape(input_ids, [-1]) one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table) else: output = tf.nn.embedding_lookup(embedding_table, input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape The provided code snippet includes necessary dependencies for implementing the `embedding_postprocessor` function. Write a Python function `def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1, use_one_hot_embeddings=True)` to solve the following problem: Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. Here is the function: def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1, use_one_hot_embeddings=True): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary, unless converting to tflite model. if use_one_hot_embeddings: flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) else: token_type_embeddings = tf.nn.embedding_lookup(token_type_table, token_type_ids) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = layer_norm_and_dropout(output, dropout_prob) return output
Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid.
8,801
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def dense_layer_2d(input_tensor, output_size, initializer, activation, use_einsum, num_attention_heads=1, name=None): """A dense layer with 2D kernel. Args: input_tensor: Float tensor with rank 3. output_size: The size of output dimension. initializer: Kernel initializer. activation: Activation function. use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers. num_attention_heads: number of attention head in attention layer. name: The name scope of this layer. Returns: float logits Tensor. """ del num_attention_heads # unused input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[hidden_size, output_size], initializer=initializer) b = tf.get_variable( name="bias", shape=[output_size], initializer=tf.zeros_initializer) if use_einsum: ret = tf.einsum("BFH,HO->BFO", input_tensor, w) else: ret = tf.matmul(input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def attention_ffn_block(layer_input, hidden_size=768, attention_mask=None, num_attention_heads=1, attention_head_size=64, attention_probs_dropout_prob=0.0, intermediate_size=3072, intermediate_act_fn=None, initializer_range=0.02, hidden_dropout_prob=0.0, use_einsum=True): """A network with attention-ffn as sub-block. Args: layer_input: float Tensor of shape [batch_size, from_seq_length, from_width]. hidden_size: (optional) int, size of hidden layer. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. attention_head_size: int. Size of attention head. attention_probs_dropout_prob: float. dropout probability for attention_layer intermediate_size: int. Size of intermediate hidden layer. intermediate_act_fn: (optional) Activation function for the intermediate layer. initializer_range: float. Range of the weight initializer. hidden_dropout_prob: (optional) float. Dropout probability of the hidden layer. use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers Returns: layer output """ with tf.variable_scope("attention_1"): with tf.variable_scope("self"): attention_output = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, use_einsum=use_einsum) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.variable_scope("output"): attention_output = dense_layer_3d_proj( attention_output, hidden_size, attention_head_size, create_initializer(initializer_range), None, use_einsum=use_einsum, name="dense") attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) with tf.variable_scope("ffn_1"): with tf.variable_scope("intermediate"): intermediate_output = dense_layer_2d( attention_output, intermediate_size, create_initializer(initializer_range), intermediate_act_fn, use_einsum=use_einsum, num_attention_heads=num_attention_heads, name="dense") with tf.variable_scope("output"): ffn_output = dense_layer_2d( intermediate_output, hidden_size, create_initializer(initializer_range), None, use_einsum=use_einsum, num_attention_heads=num_attention_heads, name="dense") ffn_output = dropout(ffn_output, hidden_dropout_prob) ffn_output = layer_norm(ffn_output + attention_output) return ffn_output def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape The provided code snippet includes necessary dependencies for implementing the `transformer_model` function. Write a Python function `def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_hidden_groups=12, num_attention_heads=12, intermediate_size=3072, inner_group_num=1, intermediate_act_fn="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False, use_einsum=True)` to solve the following problem: Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_hidden_groups: int. Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. inner_group_num: int, number of inner repetition of attention and ffn. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. Here is the function: def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_hidden_groups=12, num_attention_heads=12, intermediate_size=3072, inner_group_num=1, intermediate_act_fn="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False, use_einsum=True): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_hidden_groups: int. Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. inner_group_num: int, number of inner repetition of attention and ffn. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = hidden_size // num_attention_heads input_shape = get_shape_list(input_tensor, expected_rank=3) input_width = input_shape[2] all_layer_outputs = [] if input_width != hidden_size: prev_output = dense_layer_2d( input_tensor, hidden_size, create_initializer(initializer_range), None, use_einsum=use_einsum, name="embedding_hidden_mapping_in") else: prev_output = input_tensor with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE): for layer_idx in range(num_hidden_layers): group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups) with tf.variable_scope("group_%d" % group_idx): with tf.name_scope("layer_%d" % layer_idx): layer_output = prev_output for inner_group_idx in range(inner_group_num): with tf.variable_scope("inner_group_%d" % inner_group_idx): layer_output = attention_ffn_block( layer_input=layer_output, hidden_size=hidden_size, attention_mask=attention_mask, num_attention_heads=num_attention_heads, attention_head_size=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, intermediate_size=intermediate_size, intermediate_act_fn=intermediate_act_fn, initializer_range=initializer_range, hidden_dropout_prob=hidden_dropout_prob, use_einsum=use_einsum) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: return all_layer_outputs else: return all_layer_outputs[-1]
Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_hidden_groups: int. Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. inner_group_num: int, number of inner repetition of attention and ffn. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid.
8,802
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers The provided code snippet includes necessary dependencies for implementing the `reshape_to_matrix` function. Write a Python function `def reshape_to_matrix(input_tensor)` to solve the following problem: Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix). Here is the function: def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor
Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).
8,803
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape The provided code snippet includes necessary dependencies for implementing the `reshape_from_matrix` function. Write a Python function `def reshape_from_matrix(output_tensor, orig_shape_list)` to solve the following problem: Reshapes a rank 2 tensor back to its original rank >= 2 tensor. Here is the function: def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return tf.reshape(output_tensor, orig_dims + [width])
Reshapes a rank 2 tensor back to its original rank >= 2 tensor.
8,804
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def css(seletor): try: return GenericTranslator().css_to_xpath(seletor, prefix='self::x:') except SelectorError: return None
null
8,805
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def chunk(items, length=0): if length < 1: for item in items: yield [item] return item_length = len(items) length = item_length if length > item_length else length chunk_size = item_length / length for i in range(length): yield items[int(chunk_size*i):int(chunk_size*(i+1))]
null
8,806
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def group(numbers): ranges = [] current_range = [] numbers = sorted(numbers) for number in numbers: if not current_range: current_range = [number, number] elif number - current_range[-1] == 1: current_range[-1] = number else: ranges.append(tuple(current_range)) current_range = [number, number] ranges.append(tuple(current_range)) return ranges
null
8,807
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def sorted_mixed_keys(s): # https://docs.python.org/3/reference/expressions.html#value-comparisons return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', s)]
null
8,808
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def is_proxy_availiable(host, port, timeout=1): try: host = host.replace('http://', '') socket.create_connection((host, int(port)), timeout).close() except Exception: return False return True
null
8,809
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def size_by_unit(number, unit='KB'): unit = unit.upper() multiple = {'KB': 1, 'MB': 2} if unit not in multiple: unit = 'KB' return round(float(number) / (1000 ** multiple[unit]), 2)
null
8,810
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def open_path(path): cmd = 'open' if sys.platform.startswith('win32'): cmd = 'explorer' if sys.platform.startswith('linux'): cmd = 'xdg-open' Popen([cmd, path])
null
8,811
import re import sys import codecs import socket import hashlib from subprocess import Popen from calibre.utils.logging import Log from ..lib.cssselect import GenericTranslator, SelectorError def dummy(*args, **kwargs): pass
null
8,812
import os import shutil import os.path from types import MethodType from tempfile import gettempdir from calibre.gui2 import Dispatcher from calibre.constants import DEBUG, __version__ from calibre.ebooks.conversion.plumber import Plumber from calibre.ptempfile import PersistentTemporaryFile from calibre.ebooks.metadata.meta import get_metadata, set_metadata from .. import EbookTranslator from .config import get_config from .utils import log, sep, uid, open_path, open_file from .cache import get_cache, TranslationCache from .element import ( Extraction, get_element_handler, get_srt_elements, get_toc_elements, get_page_elements, get_metadata_elements, get_pgn_elements) from .translation import get_translator, get_translation def extract_book(input_path): elements = [] output_path = os.path.join(gettempdir(), 'temp.epub') plumber = Plumber(input_path, output_path, log=log) def convert(self, oeb, output_path, input_plugin, opts, log): # for item in oeb.manifest.items: # if item.media_type == 'text/css': # for rule in item.data.cssRules: # print('='*20) # # CSSStyleRule or CSSPageRule # print(type(rule)) # # CSSStyleDeclaration # print(rule.style.keys()) elements.extend(get_metadata_elements(oeb.metadata)) elements.extend(get_toc_elements(oeb.toc.nodes, [])) elements.extend(get_page_elements(oeb.manifest.items)) plumber.output_plugin.convert = MethodType(convert, plumber.output_plugin) plumber.run() return elements def get_srt_elements(path): elements = [] content = open_file(path) for section in content.split('\n\n'): lines = section.split('\n') number = lines.pop(0) time = lines.pop(0) content = '\n'.join(lines) elements.append(SrtElement([number, time, content])) return elements def get_pgn_elements(path): pattern = re.compile(r'\{[^}]*[a-zA-z][^}]*\}') originals = pattern.findall(open_file(path)) return [PgnElement([original, None]) for original in originals] def extract_item(input_path, input_format): extractors = { 'srt': get_srt_elements, 'pgn': get_pgn_elements, } extractor = extractors.get(input_format) or extract_book return extractor(input_path)
null
8,813
import os import shutil import os.path from types import MethodType from tempfile import gettempdir from calibre.gui2 import Dispatcher from calibre.constants import DEBUG, __version__ from calibre.ebooks.conversion.plumber import Plumber from calibre.ptempfile import PersistentTemporaryFile from calibre.ebooks.metadata.meta import get_metadata, set_metadata from .. import EbookTranslator from .config import get_config from .utils import log, sep, uid, open_path, open_file from .cache import get_cache, TranslationCache from .element import ( Extraction, get_element_handler, get_srt_elements, get_toc_elements, get_page_elements, get_metadata_elements, get_pgn_elements) from .translation import get_translator, get_translation def convert_book(input_path, output_path, translation, element_handler, cache, debug_info, notification): """Process ebooks that Calibre supported.""" plumber = Plumber( input_path, output_path, log=log, report_progress=notification) _convert = plumber.output_plugin.convert elements = [] def convert(self, oeb, output_path, input_plugin, opts, log): log.info('Translating ebook content... (this will take a while)') log.info(debug_info) translation.set_progress(self.report_progress) elements.extend(get_metadata_elements(oeb.metadata)) # The number of elements may vary with format conversion. elements.extend(get_toc_elements(oeb.toc.nodes, [])) elements.extend(get_page_elements(oeb.manifest.items)) original_group = element_handler.prepare_original(elements) cache.save(original_group) paragraphs = cache.all_paragraphs() translation.handle(paragraphs) element_handler.add_translations(paragraphs) log(sep()) log(_('Start to convert ebook format...')) log(sep()) _convert(oeb, output_path, input_plugin, opts, log) plumber.output_plugin.convert = MethodType(convert, plumber.output_plugin) plumber.run() def convert_srt(input_path, output_path, translation, element_handler, cache, debug_info, notification): log.info('Translating subtitles content... (this will take a while)') log.info(debug_info) elements = get_srt_elements(input_path) original_group = element_handler.prepare_original(elements) cache.save(original_group) paragraphs = cache.all_paragraphs() translation.set_progress(notification) translation.handle(paragraphs) element_handler.add_translations(paragraphs) log(sep()) log(_('Starting to output subtitles file...')) log(sep()) with open(output_path, 'w') as file: file.write('\n\n'.join([e.get_translation() for e in elements])) log(_('The translation of the subtitles file was completed.')) def convert_pgn(input_path, output_path, translation, element_handler, cache, debug_info, notification): log.info('Translating PGN content... (this may be take a while)') log.info(debug_info) elements = get_pgn_elements(input_path) original_group = element_handler.prepare_original(elements) cache.save(original_group) paragraphs = cache.all_paragraphs() translation.set_progress(notification) translation.handle(paragraphs) element_handler.add_translations(paragraphs) log(sep()) log(_('Starting to output PGN file...')) log(sep()) pgn_content = open_file(input_path) for element in elements: pgn_content = pgn_content.replace( element.get_raw(), element.get_translation(), 1) with open(output_path, 'w') as file: file.write(pgn_content) log(_('The translation of the PGN file was completed.')) class EbookTranslator(InterfaceActionBase): name = _z('Ebook Translator') title = _(name) supported_platforms = ['windows', 'osx', 'linux'] identifier = 'ebook-translator' author = 'bookfere.com' version = (2, 3, 2) __version__ = 'v' + '.'.join(map(str, version)) description = _('A Calibre plugin to translate ebook into a specified ' 'language (optionally keeping the original content).') # see: https://www.mobileread.com/forums/showthread.php?t=242223 minimum_calibre_version = (2, 0, 0) actual_plugin = 'calibre_plugins.ebook_translator.ui:EbookTranslatorGui' # The DEBUG constant cannot be shared with new worker processes. # To ensure that it is available, add it to the OS environment. DEBUG and os.environ.update(CALIBRE_DEBUG=str(DEBUG)) def is_customizable(self): return False log = Log() def sep(char='═', count=38): return char * count def uid(*args): md5 = hashlib.md5() for arg in args: md5.update(arg if isinstance(arg, bytes) else arg.encode('utf-8')) return md5.hexdigest() class TranslationCache: __version__ = '20230608' fresh = True dir_path = cache_path() cache_path = os.path.join(dir_path, 'cache') temp_path = os.path.join(dir_path, 'temp') def __init__(self, identity, persistence=True): """:persistence: We use two types of cache, one is used temporarily for communication, and another one is used to cache translations, which avoids the need for retranslation. """ self.persistence = persistence self.file_path = self._path(identity) # An interruption may occur, resulting in the cache size being less # than 50,000 bytes. Therefore, we need to resave it again. if os.path.exists(self.file_path) and self.size() > 50000: self.fresh = False self.cache_only = False self.connection = sqlite3.connect( self.file_path, check_same_thread=False) self.cursor = self.connection.cursor() self.cursor.execute( 'CREATE TABLE IF NOT EXISTS cache(' 'id UNIQUE, md5 UNIQUE, raw, original, ignored, ' 'attributes DEFAULT NULL, page DEFAULT NULL,' 'translation DEFAULT NULL, engine_name DEFAULT NULL, ' 'target_lang DEFAULT NULL)') self.cursor.execute( 'CREATE TABLE IF NOT EXISTS info(key UNIQUE, value)') def move(cls, dest): for dir_path in glob(os.path.join(cls.dir_path, '*')): os.path.exists(dir_path) and shutil.move(dir_path, dest) cls.dir_path = dest cls.cache_path = os.path.join(dest, 'cache') cls.temp_path = os.path.join(dest, 'temp') def count(cls): total = 0 for file_path in glob(os.path.join(cls.cache_path, '*.db')): total += os.path.getsize(file_path) return size_by_unit(total, 'MB') def remove(cls, filename): file_path = os.path.join(cls.cache_path, filename) os.path.exists(file_path) and os.remove(file_path) def clean(cls): for filename in os.listdir(cls.cache_path): cls.remove(filename) def get_list(cls): names = [] for file_path in glob(os.path.join(cls.cache_path, '*.db')): name = os.path.basename(file_path) cache = cls(os.path.splitext(name)[0]) title = cache.get_info('title') or '[%s]' % _('Unknown') engine = cache.get_info('engine_name') lang = cache.get_info('target_lang') merge = int(cache.get_info('merge_length') or 0) or 'N/A' size = size_by_unit(os.path.getsize(file_path), 'MB') names.append((title, engine, lang, merge, size, name)) return names def _path(self, name): if not os.path.exists(self.dir_path): os.mkdir(self.dir_path) cache_dir = self.cache_path if not self.is_persistence(): cache_dir = self.temp_path if not os.path.exists(cache_dir): os.mkdir(cache_dir) return os.path.join(cache_dir, '%s.db' % name) def size(self): return os.path.getsize(self.file_path) def is_fresh(self): return self.fresh def is_persistence(self): return self.persistence def set_cache_only(self, cache_only): self.cache_only = cache_only def set_info(self, key, value): self.cursor.execute( 'INSERT INTO info VALUES (?1, ?2) ' 'ON CONFLICT (KEY) DO UPDATE SET value=excluded.value', (key, value)) self.connection.commit() def get_info(self, key): resource = self.cursor.execute( 'SELECT value FROM info WHERE key=?', (key,)) result = resource.fetchone() return result[0] if result else None def save(self, original_group): if self.is_fresh(): for original_unit in original_group: self.add(*original_unit) self.connection.commit() def all(self): resource = self.cursor.execute('SELECT * FROM cache WHERE NOT ignored') return resource.fetchall() def get(self, ids): placeholders = ', '.join(['?'] * len(ids)) resource = self.cursor.execute( 'SELECT * FROM cache WHERE id IN (%s) ' % placeholders, tuple(ids)) return resource.fetchall() def first(self, **kwargs): if kwargs: data = ' AND '.join(['%s=?' % column for column in kwargs]) resource = self.cursor.execute( 'SELECT * FROM cache WHERE %s' % data, tuple(kwargs.values())) else: resource = self.cursor.execute('SELECT * FROM cache LIMIT 1') return resource.fetchone() def add(self, id, md5, raw, original, ignored=False, attributes=None, page=None): self.cursor.execute( 'INSERT INTO cache VALUES (' '?1, ?2, ?3, ?4, ?5, ?6, ?7, NULL, NULL, NULL' ') ON CONFLICT DO NOTHING', (id, md5, raw, original, ignored, attributes, page)) # self.connection.commit() def update(self, ids, **kwargs): ids = ids if isinstance(ids, list) else [ids] data = ', '.join(['%s=?' % column for column in kwargs.keys()]) placeholders = ', '.join(['?'] * len(ids)) self.cursor.execute( 'UPDATE cache SET %s WHERE id IN (%s)' % (data, placeholders), tuple(list(kwargs.values()) + ids)) self.connection.commit() def ignore(self, ids): self.update(ids, ignored=True) def delete(self, ids): placeholders = ', '.join(['?'] * len(ids)) self.cursor.execute( 'DELETE FROM cache WHERE id IN (%s)' % placeholders, tuple(ids)) self.connection.commit() def close(self): self.cursor.close() self.connection.commit() self.connection.close() def destroy(self): self.close() os.path.exists(self.file_path) and os.remove(self.file_path) def done(self): self.persistence or self.destroy() def paragraph(self, id=None): return Paragraph(*self.first(id=id)) def get_paragraphs(self, ids): return [Paragraph(*item) for item in self.get(ids)] def all_paragraphs(self): paragraphs = [] for item in self.all(): paragraph = Paragraph(*item) if self.cache_only and not paragraph.translation: continue paragraphs.append(paragraph) return paragraphs def update_paragraph(self, paragraph): self.update( paragraph.id, translation=paragraph.translation, engine_name=paragraph.engine_name, target_lang=paragraph.target_lang) def delete_paragraphs(self, paragraphs): self.delete([paragraph.id for paragraph in paragraphs]) def ignore_paragraphs(self, paragraphs): self.ignore([paragraph.id for paragraph in paragraphs]) def get_cache(uid): return TranslationCache(uid, get_config().get('cache_enabled')) class Extraction: __version__ = '20230608' def __init__(self, pages, rule_mode, filter_scope, filter_rules, element_rules): self.pages = pages self.rule_mode = rule_mode self.filter_scope = filter_scope self.filter_rules = filter_rules self.element_rules = element_rules self.filter_patterns = [] self.element_patterns = [] self.load_filter_patterns() self.load_element_patterns() def load_filter_patterns(self): default_rules = [ r'^[-\d\s\.\'\\"‘’“”,=~!@#$%^&º*|<>?/`—…+:_(){}[\]]+$'] patterns = [re.compile(rule) for rule in default_rules] for rule in self.filter_rules: if self.rule_mode == 'normal': rule = re.compile(re.escape(rule), re.I) elif self.rule_mode == 'case': rule = re.compile(re.escape(rule)) else: rule = re.compile(rule) patterns.append(rule) self.filter_patterns = patterns def load_element_patterns(self): rules = ['pre', 'code'] rules.extend(self.element_rules) patterns = [] for selector in rules: rule = css(selector) rule and patterns.append(rule) self.element_patterns = patterns def get_sorted_pages(self): pages = [] pattern = re.compile(r'\.(xhtml|html|htm|xml|xht)$') for page in self.pages: if isinstance(page.data, etree._Element) \ and pattern.search(page.href): pages.append(page) return sorted(pages, key=lambda page: sorted_mixed_keys(page.href)) def get_elements(self): elements = [] for page in self.get_sorted_pages(): body = page.data.find('./x:body', namespaces=ns) elements.extend(self.extract_elements(page.id, body, [])) return filter(self.filter_content, elements) def need_ignore(self, element): for pattern in self.element_patterns: if element.xpath(pattern, namespaces=ns): return True return False def extract_elements(self, page_id, root, elements=[]): priority_elements = ['p', 'pre', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'] for element in root.findall('./*'): # if self.need_ignore(element): # continue element_has_content = False if element.text is not None and trim(element.text) != '': element_has_content = True else: children = element.findall('./*') if children and get_name(element) in priority_elements: element_has_content = True else: for child in children: if child.tail is not None and trim(child.tail) != '': element_has_content = True break if element_has_content: page_element = PageElement(element, page_id) page_element.set_ignored(self.need_ignore(element)) elements.append(page_element) else: self.extract_elements(page_id, element, elements) # Return root if all children have no content page_element = PageElement(root, page_id) page_element.set_ignored(self.need_ignore(root)) return elements if elements else [page_element] def filter_content(self, element): # Ignore the element contains empty content content = element.get_text() if content == '': return False for entity in ('&lt;', '&gt;'): content = content.replace(entity, '') for pattern in self.filter_patterns: if pattern.search(content): element.set_ignored(True) # Filter HTML according to the rules if self.filter_scope == 'html': markup = element.get_raw() for pattern in self.filter_patterns: if pattern.search(markup): element.set_ignored(True) return True def get_element_handler(placeholder, separator): config = get_config() position_alias = {'before': 'above', 'after': 'below'} position = config.get('translation_position', 'below') position = position_alias.get(position) or position handler = ElementHandler(placeholder, separator, position) if config.get('merge_enabled'): handler = ElementHandlerMerge( placeholder, separator, position, config.get('merge_length')) column_gap = config.get('column_gap') gap_type = column_gap.get('_type') if gap_type is not None and gap_type in column_gap.keys(): handler.set_column_gap((gap_type, column_gap.get(gap_type))) handler.set_original_color(config.get('original_color')) handler.set_translation_color(config.get('translation_color')) return handler def get_translator(engine_class=None): config = get_config() engine_class = engine_class or get_engine_class() translator = engine_class() translator.set_search_paths(config.get('search_paths')) if config.get('proxy_enabled'): translator.set_proxy(config.get('proxy_setting')) translator.set_merge_enabled(config.get('merge_enabled')) return translator def get_translation(translator, log=None): config = get_config() glossary = Glossary(translator.placeholder) if config.get('glossary_enabled'): glossary.load_from_file(config.get('glossary_path')) translation = Translation(translator, glossary) if get_config().get('log_translation'): translation.set_logging(log) return translation The provided code snippet includes necessary dependencies for implementing the `convert_item` function. Write a Python function `def convert_item(ebook_title, input_path, output_path, source_lang, target_lang, cache_only, is_batch, format, notification)` to solve the following problem: The following parameters need attention: :cache_only: Only use the translation which exists in the cache. :notification: It is automatically added by arbitrary_n. Here is the function: def convert_item(ebook_title, input_path, output_path, source_lang, target_lang, cache_only, is_batch, format, notification): """The following parameters need attention: :cache_only: Only use the translation which exists in the cache. :notification: It is automatically added by arbitrary_n. """ translator = get_translator() translator.set_source_lang(source_lang) translator.set_target_lang(target_lang) element_handler = get_element_handler( translator.placeholder, translator.separator) element_handler.set_translation_lang( translator.get_iso639_target_code(target_lang)) merge_length = str(element_handler.get_merge_length()) cache_id = uid( input_path + translator.name + target_lang + merge_length + TranslationCache.__version__ + Extraction.__version__) cache = get_cache(cache_id) cache.set_cache_only(cache_only) cache.set_info('title', ebook_title) cache.set_info('engine_name', translator.name) cache.set_info('target_lang', target_lang) cache.set_info('merge_length', merge_length) cache.set_info('plugin_version', EbookTranslator.__version__) cache.set_info('calibre_version', __version__) translation = get_translation( translator, lambda text, error=False: log.info(text)) translation.set_batch(is_batch) translation.set_callback(cache.update_paragraph) debug_info = '{0}\n| Diagnosis Information\n{0}'.format(sep()) debug_info += '\n| Calibre Version: %s\n' % __version__ debug_info += '| Plugin Version: %s\n' % EbookTranslator.__version__ debug_info += '| Translation Engine: %s\n' % translator.name debug_info += '| Source Language: %s\n' % source_lang debug_info += '| Target Language: %s\n' % target_lang debug_info += '| Cache Enabled: %s\n' % cache.is_persistence() debug_info += '| Merging Length: %s\n' % element_handler.merge_length debug_info += '| Concurrent requests: %s\n' % translator.concurrency_limit debug_info += '| Request Interval: %s\n' % translator.request_interval debug_info += '| Request Attempt: %s\n' % translator.request_attempt debug_info += '| Request Timeout: %s\n' % translator.request_timeout debug_info += '| Input Path: %s\n' % input_path debug_info += '| Output Path: %s' % output_path convertors = {'srt': convert_srt, 'pgn': convert_pgn} convertor = convertors.get(format) or convert_book convertor(input_path, output_path, translation, element_handler, cache, debug_info, notification) cache.done()
The following parameters need attention: :cache_only: Only use the translation which exists in the cache. :notification: It is automatically added by arbitrary_n.
8,814
from calibre.utils.config import JSONConfig from .. import EbookTranslator from ..engines import ( GoogleFreeTranslate, ChatgptTranslate, AzureChatgptTranslate) def get_config(): preferences = JSONConfig('plugins/ebook_translator') preferences.defaults = defaults return Configuration(preferences) def ver200_upgrade(config): """Upgrade to 2.0.0""" if config.get('engine_preferences'): return engine_preferences = {} def get_engine_preference(engine_name): if engine_name not in engine_preferences: engine_preferences.update({engine_name: {}}) return engine_preferences.get(engine_name) chatgpt_prompt = config.get('chatgpt_prompt') if chatgpt_prompt is not None: if len(chatgpt_prompt) > 0: preference = get_engine_preference(ChatgptTranslate.name) prompts = config.get('chatgpt_prompt') if 'lang' in chatgpt_prompt: preference.update(prompt=prompts.get('lang')) config.delete('chatgpt_prompt') languages = config.get('preferred_language') if languages is not None: for engine_name, language in languages.items(): preference = get_engine_preference(engine_name) preference.update(target_lang=language) config.delete('preferred_language') api_keys = config.get('api_key') if api_keys is not None: for engine_name, api_key in api_keys.items(): preference = get_engine_preference(engine_name) preference.update(api_keys=[api_key]) config.delete('api_key') if len(engine_preferences) > 0: config.update(engine_preferences=engine_preferences) config.commit() def ver203_upgrade(config): """Upgrade to 2.0.3""" engine_config = config.get('engine_preferences') azure_chatgpt = engine_config.get('ChatGPT(Azure)') if azure_chatgpt and 'model' in azure_chatgpt: model = azure_chatgpt.get('model') if model not in AzureChatgptTranslate.models: del azure_chatgpt['model'] if len(engine_config) < 1: engine_config.update({GoogleFreeTranslate.name: {}}) old_concurrency_limit = config.get('concurrency_limit') old_request_attempt = config.get('request_attempt') old_request_interval = config.get('request_interval') old_request_timeout = config.get('request_timeout') for data in engine_config.values(): if old_concurrency_limit is not None and old_concurrency_limit != 1: data.update(concurrency_limit=old_concurrency_limit) if old_request_attempt is not None and old_request_attempt != 3: data.update(request_attempt=old_request_attempt) if old_request_interval is not None and old_request_interval != 5: data.update(request_interval=old_request_interval) if old_request_timeout is not None and old_request_timeout != 10: data.update(request_timeout=old_request_timeout) config.delete('concurrency_limit') config.delete('request_attempt') config.delete('request_interval') config.delete('request_timeout') config.commit() class EbookTranslator(InterfaceActionBase): name = _z('Ebook Translator') title = _(name) supported_platforms = ['windows', 'osx', 'linux'] identifier = 'ebook-translator' author = 'bookfere.com' version = (2, 3, 2) __version__ = 'v' + '.'.join(map(str, version)) description = _('A Calibre plugin to translate ebook into a specified ' 'language (optionally keeping the original content).') # see: https://www.mobileread.com/forums/showthread.php?t=242223 minimum_calibre_version = (2, 0, 0) actual_plugin = 'calibre_plugins.ebook_translator.ui:EbookTranslatorGui' # The DEBUG constant cannot be shared with new worker processes. # To ensure that it is available, add it to the OS environment. DEBUG and os.environ.update(CALIBRE_DEBUG=str(DEBUG)) def is_customizable(self): return False def upgrade_config(): config = get_config() version = EbookTranslator.version version >= (2, 0, 0) and ver200_upgrade(config) version >= (2, 0, 3) and ver203_upgrade(config)
null
8,815
import re import json import copy from lxml import etree from calibre import prepare_string_for_xml as xml_escape from .utils import ns, css, uid, trim, sorted_mixed_keys, open_file from .config import get_config def trim(text): def get_string(element, remove_ns=False): element.text = element.text or '' # prevent auto-closing empty elements markup = trim(etree.tostring( element, encoding='utf-8', with_tail=False).decode('utf-8')) return re.sub(r'\sxmlns([^"]+"){2}', '', markup) if remove_ns else markup
null
8,816
import re import json import copy from lxml import etree from calibre import prepare_string_for_xml as xml_escape from .utils import ns, css, uid, trim, sorted_mixed_keys, open_file from .config import get_config def get_name(element): return etree.QName(element).localname
null
8,817
import os import re import json import shutil import sqlite3 import os.path import tempfile from glob import glob from .utils import size_by_unit from .config import get_config def default_cache_path(): path = os.path.join( tempfile.gettempdir(), 'com.bookfere.Calibre.EbookTranslator') not os.path.exists(path) and os.mkdir(path) return path def get_config(): preferences = JSONConfig('plugins/ebook_translator') preferences.defaults = defaults return Configuration(preferences) def cache_path(): config = get_config() path = config.get('cache_path') if path and os.path.exists(path): return path return default_cache_path()
null
8,818
import sys import re import operator The provided code snippet includes necessary dependencies for implementing the `ascii_lower` function. Write a Python function `def ascii_lower(string)` to solve the following problem: Lower-case, but only in the ASCII range. Here is the function: def ascii_lower(string): """Lower-case, but only in the ASCII range.""" return string.encode('utf8').lower().decode('utf8')
Lower-case, but only in the ASCII range.
8,819
import sys import re import operator class Selector(object): """ Represents a parsed selector. :meth:`~GenericTranslator.selector_to_xpath` accepts this object, but ignores :attr:`pseudo_element`. It is the user’s responsibility to account for pseudo-elements and reject selectors with unknown or unsupported pseudo-elements. """ def __init__(self, tree, pseudo_element=None): self.parsed_tree = tree if pseudo_element is not None and not isinstance( pseudo_element, FunctionalPseudoElement): pseudo_element = ascii_lower(pseudo_element) #: A :class:`FunctionalPseudoElement`, #: or the identifier for the pseudo-element as a string, # or ``None``. #: #: +-------------------------+----------------+--------------------------------+ #: | | Selector | Pseudo-element | #: +=========================+================+================================+ #: | CSS3 syntax | ``a::before`` | ``'before'`` | #: +-------------------------+----------------+--------------------------------+ #: | Older syntax | ``a:before`` | ``'before'`` | #: +-------------------------+----------------+--------------------------------+ #: | From the Lists3_ draft, | ``li::marker`` | ``'marker'`` | #: | not in Selectors3 | | | #: +-------------------------+----------------+--------------------------------+ #: | Invalid pseudo-class | ``li:marker`` | ``None`` | #: +-------------------------+----------------+--------------------------------+ #: | Functional | ``a::foo(2)`` | ``FunctionalPseudoElement(…)`` | #: +-------------------------+----------------+--------------------------------+ #: #: .. _Lists3: http://www.w3.org/TR/2011/WD-css3-lists-20110524/#marker-pseudoelement self.pseudo_element = pseudo_element def __repr__(self): if isinstance(self.pseudo_element, FunctionalPseudoElement): pseudo_element = repr(self.pseudo_element) elif self.pseudo_element: pseudo_element = '::%s' % self.pseudo_element else: pseudo_element = '' return '%s[%r%s]' % ( self.__class__.__name__, self.parsed_tree, pseudo_element) def canonical(self): """Return a CSS representation for this selector (a string) """ if isinstance(self.pseudo_element, FunctionalPseudoElement): pseudo_element = '::%s' % self.pseudo_element.canonical() elif self.pseudo_element: pseudo_element = '::%s' % self.pseudo_element else: pseudo_element = '' res = '%s%s' % (self.parsed_tree.canonical(), pseudo_element) if len(res) > 1: res = res.lstrip('*') return res def specificity(self): """Return the specificity_ of this selector as a tuple of 3 integers. .. _specificity: http://www.w3.org/TR/selectors/#specificity """ a, b, c = self.parsed_tree.specificity() if self.pseudo_element: c += 1 return a, b, c class Class(object): """ Represents selector.class_name """ def __init__(self, selector, class_name): self.selector = selector self.class_name = class_name def __repr__(self): return '%s[%r.%s]' % ( self.__class__.__name__, self.selector, self.class_name) def canonical(self): return '%s.%s' % (self.selector.canonical(), self.class_name) def specificity(self): a, b, c = self.selector.specificity() b += 1 return a, b, c class Element(object): """ Represents namespace|element `None` is for the universal selector '*' """ def __init__(self, namespace=None, element=None): self.namespace = namespace self.element = element def __repr__(self): return '%s[%s]' % (self.__class__.__name__, self.canonical()) def canonical(self): element = self.element or '*' if self.namespace: element = '%s|%s' % (self.namespace, element) return element def specificity(self): if self.element: return 0, 0, 1 else: return 0, 0, 0 class Hash(object): """ Represents selector#id """ def __init__(self, selector, id): self.selector = selector self.id = id def __repr__(self): return '%s[%r#%s]' % ( self.__class__.__name__, self.selector, self.id) def canonical(self): return '%s#%s' % (self.selector.canonical(), self.id) def specificity(self): a, b, c = self.selector.specificity() a += 1 return a, b, c _el_re = re.compile(r'^[ \t\r\n\f]*([a-zA-Z]+)[ \t\r\n\f]*$') _id_re = re.compile(r'^[ \t\r\n\f]*([a-zA-Z]*)#([a-zA-Z0-9_-]+)[ \t\r\n\f]*$') _class_re = re.compile( r'^[ \t\r\n\f]*([a-zA-Z]*)\.([a-zA-Z][a-zA-Z0-9_-]*)[ \t\r\n\f]*$') def parse_selector_group(stream): stream.skip_whitespace() while 1: yield Selector(*parse_selector(stream)) if stream.peek() == ('DELIM', ','): stream.next() stream.skip_whitespace() else: break def tokenize(s): pos = 0 len_s = len(s) while pos < len_s: match = _match_whitespace(s, pos=pos) if match: yield Token('S', ' ', pos) pos = match.end() continue match = _match_ident(s, pos=pos) if match: value = _sub_simple_escape(_replace_simple, _sub_unicode_escape(_replace_unicode, match.group())) yield Token('IDENT', value, pos) pos = match.end() continue match = _match_hash(s, pos=pos) if match: value = _sub_simple_escape(_replace_simple, _sub_unicode_escape(_replace_unicode, match.group()[1:])) yield Token('HASH', value, pos) pos = match.end() continue quote = s[pos] if quote in _match_string_by_quote: match = _match_string_by_quote[quote](s, pos=pos + 1) assert match, 'Should have found at least an empty match' end_pos = match.end() if end_pos == len_s: raise SelectorSyntaxError('Unclosed string at %s' % pos) if s[end_pos] != quote: raise SelectorSyntaxError('Invalid string at %s' % pos) value = _sub_simple_escape(_replace_simple, _sub_unicode_escape(_replace_unicode, _sub_newline_escape('', match.group()))) yield Token('STRING', value, pos) pos = end_pos + 1 continue match = _match_number(s, pos=pos) if match: value = match.group() yield Token('NUMBER', value, pos) pos = match.end() continue pos2 = pos + 2 if s[pos:pos2] == '/*': pos = s.find('*/', pos2) if pos == -1: pos = len_s else: pos += 2 continue yield Token('DELIM', s[pos], pos) pos += 1 assert pos == len_s yield EOFToken(pos) class TokenStream(object): def __init__(self, tokens, source=None): self.used = [] self.tokens = iter(tokens) self.source = source self.peeked = None self._peeking = False try: self.next_token = self.tokens.next except AttributeError: # Python 3 self.next_token = self.tokens.__next__ def next(self): if self._peeking: self._peeking = False self.used.append(self.peeked) return self.peeked else: next = self.next_token() self.used.append(next) return next def peek(self): if not self._peeking: self.peeked = self.next_token() self._peeking = True return self.peeked def next_ident(self): next = self.next() if next.type != 'IDENT': raise SelectorSyntaxError('Expected ident, got %s' % (next,)) return next.value def next_ident_or_star(self): next = self.next() if next.type == 'IDENT': return next.value elif next == ('DELIM', '*'): return None else: raise SelectorSyntaxError( "Expected ident or '*', got %s" % (next,)) def skip_whitespace(self): peek = self.peek() if peek.type == 'S': self.next() The provided code snippet includes necessary dependencies for implementing the `parse` function. Write a Python function `def parse(css)` to solve the following problem: Parse a CSS *group of selectors*. If you don't care about pseudo-elements or selector specificity, you can skip this and use :meth:`~GenericTranslator.css_to_xpath`. :param css: A *group of selectors* as an Unicode string. :raises: :class:`SelectorSyntaxError` on invalid selectors. :returns: A list of parsed :class:`Selector` objects, one for each selector in the comma-separated group. Here is the function: def parse(css): """Parse a CSS *group of selectors*. If you don't care about pseudo-elements or selector specificity, you can skip this and use :meth:`~GenericTranslator.css_to_xpath`. :param css: A *group of selectors* as an Unicode string. :raises: :class:`SelectorSyntaxError` on invalid selectors. :returns: A list of parsed :class:`Selector` objects, one for each selector in the comma-separated group. """ # Fast path for simple cases match = _el_re.match(css) if match: return [Selector(Element(element=match.group(1)))] match = _id_re.match(css) if match is not None: return [Selector(Hash(Element(element=match.group(1) or None), match.group(2)))] match = _class_re.match(css) if match is not None: return [Selector(Class(Element(element=match.group(1) or None), match.group(2)))] stream = TokenStream(tokenize(css)) stream.source = css return list(parse_selector_group(stream))
Parse a CSS *group of selectors*. If you don't care about pseudo-elements or selector specificity, you can skip this and use :meth:`~GenericTranslator.css_to_xpath`. :param css: A *group of selectors* as an Unicode string. :raises: :class:`SelectorSyntaxError` on invalid selectors. :returns: A list of parsed :class:`Selector` objects, one for each selector in the comma-separated group.
8,820
import sys import re import operator The provided code snippet includes necessary dependencies for implementing the `parse_series` function. Write a Python function `def parse_series(tokens)` to solve the following problem: Parses the arguments for :nth-child() and friends. :raises: A list of tokens :returns: :``(a, b)`` Here is the function: def parse_series(tokens): """ Parses the arguments for :nth-child() and friends. :raises: A list of tokens :returns: :``(a, b)`` """ for token in tokens: if token.type == 'STRING': raise ValueError('String tokens not allowed in series.') s = ''.join(token.value for token in tokens).strip() if s == 'odd': return 2, 1 elif s == 'even': return 2, 0 elif s == 'n': return 1, 0 if 'n' not in s: # Just b return 0, int(s) a, b = s.split('n', 1) if not a: a = 1 elif a == '-' or a == '+': a = int(a+'1') else: a = int(a) if not b: b = 0 else: b = int(b) return a, b
Parses the arguments for :nth-child() and friends. :raises: A list of tokens :returns: :``(a, b)``
8,821
import sys import re import operator class TokenMacros: unicode_escape = r'\\([0-9a-f]{1,6})(?:\r\n|[ \n\r\t\f])?' escape = unicode_escape + r'|\\[^\n\r\f0-9a-f]' string_escape = r'\\(?:\n|\r\n|\r|\f)|' + escape nonascii = r'[^\0-\177]' nmchar = '[_a-z0-9-]|%s|%s' % (escape, nonascii) nmstart = '[_a-z]|%s|%s' % (escape, nonascii) def _compile(pattern): return re.compile(pattern % vars(TokenMacros), re.IGNORECASE).match
null
8,822
import sys import re import operator _sub_simple_escape = re.compile(r'\\(.)').sub _sub_unicode_escape = re.compile(TokenMacros.unicode_escape, re.I).sub _replace_simple = operator.methodcaller('group', 1) def _replace_unicode(match): codepoint = int(match.group(1), 16) if codepoint > sys.maxunicode: codepoint = 0xFFFD return _unichr(codepoint) def unescape_ident(value): value = _sub_unicode_escape(_replace_unicode, value) value = _sub_simple_escape(_replace_simple, value) return value
null
8,823
import sys import re from .parser import parse, parse_series, SelectorError def _unicode_safe_getattr(obj, name, default=None): # getattr() with a non-ASCII name fails on Python 2.x name = name.encode('ascii', 'replace').decode('ascii') return getattr(obj, name, default)
null
8,824
import json from lxml import etree from ..lib.utils import is_str from . import builtin_engines from .base import Base def create_engine_template(name): return """{ "name": "%s", "languages": { "source": { "Source Language": "code" }, "target": { "Target Language": "code" } }, "request": { "url": "https://example.api", "method": "POST", "headers": { "Content-Type": "application/json" }, "data": { "source": "<source>", "target": "<target>", "text": "<text>" } }, "response": "response" }""" % name
null
8,825
import json from lxml import etree from ..lib.utils import is_str from . import builtin_engines from .base import Base def is_str(data): return type(data).__name__ in ('str', 'unicode') def load_engine_data(text): # json format try: json_data = json.loads(text) except Exception: return (False, _('Engine data must be in valid JSON format.')) # validate data if not isinstance(json_data, dict): return (False, _('Invalid engine data.')) # engine name name = json_data.get('name') if not name: return (False, _('Engine name is required.')) if name.lower() in [engine.name.lower() for engine in builtin_engines]: return (False, _( 'Engine name must be different from builtin engine name.')) # language codes languages = json_data.get('languages') if not languages: return (False, _('Language codes are required.')) has_source = 'source' in languages has_target = 'target' in languages if (has_source and not has_target) or (has_target and not has_source): return (False, _('Source and target must be added in pair.')) # request info request = json_data.get('request') if not request: return (False, _('Request information is required.')) if 'url' not in request: return (False, _('API URL is required.')) # request data data = request.get('data') if data is not None and '<text>' not in str(data): return (False, _('Placeholder <text> is required.')) # request headers headers = request.get('headers') or {} if headers and not isinstance(headers, dict): return (False, _('Request headers must be an JSON object.')) has_content_type = 'content-type' in [i.lower() for i in headers] if is_str(data) and not has_content_type: return (False, _('A appropriate Content-Type in headers is required.')) # response parser response = json_data.get('response') if not response or 'response' not in response: return (False, _('Expression to parse response is required.')) return (True, json_data)
null
8,826
from calibre.utils.localization import get_lang from calibre_plugins.ebook_translator import EbookTranslator def layout_info(): widget = QWidget() widget.setStyleSheet('color:grey') layout = QHBoxLayout(widget) layout.setContentsMargins(0, 0, 0, 0) app_author = EbookTranslator.author site = QLabel( '<span style="color:crimson;">♥</span> by <a href="https://{0}">{0}</a>' .format(app_author)) site.setOpenExternalLinks(True) layout.addWidget(site) layout.addStretch(1) github = 'https://github.com/bookfere/Ebook-Translator-Calibre-Plugin' if 'zh' in get_lang(): feedback = 'https://{}/post/1057.html'.format(app_author) donate = 'https://{}/donate'.format(app_author) else: feedback = '{}/issues'.format(github) donate = 'https://www.paypal.com/paypalme/bookfere' link = QLabel(( '<a href="{0}">GitHub</a> | <a href="{1}">{3}</a>' ' | <a href="{2}">{4}</a>') .format(github, feedback, donate, _('Feedback'), _('Donate'))) link.setOpenExternalLinks(True) layout.addWidget(link) return widget
null
8,827
import argparse import os import subprocess import sys from pathlib import Path from typing import List from setuptools import find_packages, setup ROOT_DIR = Path(__file__).parent.resolve() def _get_version(): try: cmd = ["git", "rev-parse", "HEAD"] sha = subprocess.check_output(cmd, cwd=str(ROOT_DIR)).decode("ascii").strip() except Exception: sha = None if "BUILD_VERSION" in os.environ: version = os.environ["BUILD_VERSION"] else: with open(os.path.join(ROOT_DIR, "version.txt"), "r") as f: version = f.readline().strip() if sha is not None and "OFFICIAL_RELEASE" not in os.environ: version += "+" + sha[:7] if sha is None: sha = "Unknown" return version, sha
null
8,828
import argparse import os import subprocess import sys from pathlib import Path from typing import List from setuptools import find_packages, setup ROOT_DIR = Path(__file__).parent.resolve() def _export_version(version, sha): version_path = ROOT_DIR / "torchrec" / "version.py" with open(version_path, "w") as fileobj: fileobj.write("__version__ = '{}'\n".format(version)) fileobj.write("git_version = {}\n".format(repr(sha)))
null
8,829
import argparse import os import subprocess import sys from pathlib import Path from typing import List from setuptools import find_packages, setup def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description="torchrec setup") return parser.parse_known_args(argv)
null
8,830
import queue import threading from typing import Dict, List, Union import torch from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL from torchrec import EmbeddingBagConfig, EmbeddingConfig from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.sparse.jagged_tensor import KeyedJaggedTensor from .id_transformer_group import IDTransformerGroup def transform_loop(dataloader, transform_fn, out_queue, done_event): # This setting is thread local, and prevents the copy in pin_memory from # consuming all CPU cores. torch.set_num_threads(1) for data in dataloader: if done_event.is_set(): break transformed_data = transform_fn(data) while not done_event.is_set(): try: out_queue.put(transformed_data, timeout=MP_STATUS_CHECK_INTERVAL) break except queue.Full: continue # save memory del transformed_data if not done_event.is_set(): done_event.set()
null
8,831
import queue import threading from typing import Dict, List, Union import torch from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL from torchrec import EmbeddingBagConfig, EmbeddingConfig from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.sparse.jagged_tensor import KeyedJaggedTensor from .id_transformer_group import IDTransformerGroup class DataLoader: def __init__( self, id_transformer_group: IDTransformerGroup, dataloader, *, data_info: Dict[int, str] = None, paths: List[str] = None, num_prefetch=0, ): self._id_transformer_group = id_transformer_group if data_info is not None: for _, path in data_info.items(): if path not in self._id_transformer_group: raise ValueError( f"invalid path `{path}` data_info. No id transformer for this path." ) else: self._paths = paths self._data_info = data_info self._data_queue = queue.Queue(maxsize=num_prefetch) self._done_event = threading.Event() self._dataloader = dataloader self._num_prefetch = num_prefetch def _transform_fn(self, data): """ transform data with `data_info` """ if self._data_info is None: data_info = {} path_idx = 0 for i in range(len(data)): if isinstance(data[i], KeyedJaggedTensor): if path_idx >= len(self._paths): raise ValueError( "Has more KJT in a data sample than the number of modules, " "could not infer data_info, please set data_info manually" ) data_info[i] = self._paths[path_idx] path_idx += 1 else: data_info = self._data_info global_kjts = {path: data[idx] for idx, path in data_info.items()} cache_kjts, fetch_handles = self._id_transformer_group.transform(global_kjts) data = list(data) for idx, path in data_info.items(): data[idx] = cache_kjts[path] return tuple(data), fetch_handles def __iter__(self): return DataLoaderIter( self._dataloader, self._transform_fn, num_prefetch=self._num_prefetch ) def __len__(self): return len(self._dataloader) class DistributedModelParallel(nn.Module, FusedOptimizerModule): """ Entry point to model parallelism. Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `EmbeddingBagCollectionSharder()`. init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay parameter initialization until the first forward pass. Pass `True` to delay initialization of data parallel modules. Do first forward pass and then call DistributedModelParallel.init_data_parallel(). init_parameters (bool): initialize parameters for modules still on meta device. data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data parallel modules. Example:: def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = DistributedModelParallel(m) m.apply(init_weights) """ def __init__( self, module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_data_parallel: bool = True, init_parameters: bool = True, data_parallel_wrapper: Optional[DataParallelWrapper] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self.init_parameters = init_parameters self._ddp_wrapped: bool = False if env is None: pg = dist.GroupMember.WORLD assert pg is not None, "Process group is not initialized" env = ShardingEnv.from_process_group(pg) self._env: ShardingEnv = env if device is None: device = torch.device("cpu") self.device: torch.device = device if sharders is None: sharders = get_default_sharders() self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = { sharder.module_type: sharder for sharder in sharders } if data_parallel_wrapper is None: data_parallel_wrapper = DefaultDataParallelWrapper() self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper if plan is None: planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=get_local_size(self._env.world_size), world_size=self._env.world_size, compute_device=self.device.type, ) ) pg = self._env.process_group if pg is not None: plan = planner.collective_plan(module, sharders, pg) else: plan = planner.plan(module, sharders) self._plan: ShardingPlan = plan self._dmp_wrapped_module: nn.Module = self._init_dmp(module) self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module) if init_parameters: self._init_parameters(self.module) if init_data_parallel: self.init_data_parallel() def module(self) -> nn.Module: """ Property to directly access sharded module, which will not be wrapped in DDP, FSDP, DMP, or any other parallelism wrappers. """ return get_unwrapped_module(self) def module(self, value: nn.Module) -> None: if isinstance(self.module, DistributedDataParallel) or isinstance( self.module, FullyShardedDataParallel ): raise RuntimeError( "module can't be set after calling init_data_parallel(...)" ) else: self._dmp_wrapped_module = value # pyre-ignore [2, 3] def forward(self, *args, **kwargs) -> Any: return self._dmp_wrapped_module(*args, **kwargs) def init_data_parallel(self) -> None: """ See init_data_parallel c-tor argument for usage. It's safe to call this method multiple times. """ if not self._ddp_wrapped: # Allocate any 'meta' tensors if self.init_parameters: self._init_parameters(self._dmp_wrapped_module) self._data_parallel_wrapper.wrap(self, self._env, self.device) self._ddp_wrapped = True def copy( self, device: torch.device, ) -> "DistributedModelParallel": """ Recursively copy submodules to new device by calling per-module customized copy process, since some modules needs to use the original references (like `ShardedModule` for inference). """ assert isinstance(device, torch.device) # dmp code deep copy with sharded_model_copy(device=None): copy_dmp = copy.deepcopy(self) # tensor resident module deep copy copy_dmp_wrapped_module = copy_to_device( self._dmp_wrapped_module, self.device, device ) copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module return copy_dmp def _init_dmp(self, module: nn.Module) -> nn.Module: return self._shard_modules_impl(module) def _init_optim(self, module: nn.Module) -> CombinedOptimizer: # pyre-ignore [6] return CombinedOptimizer(self._fused_optim_impl(module, [])) def _fused_optim_impl( self, module: nn.Module, fused_optims: List[Tuple[str, KeyedOptimizer]], path: str = "", ) -> List[Tuple[str, KeyedOptimizer]]: if isinstance(module, FusedOptimizerModule): fused_optims.append((path, module.fused_optimizer)) return fused_optims for name, child in module.named_children(): self._fused_optim_impl( child, fused_optims, path + "." + name if path else name, ) return fused_optims def _shard_modules_impl( self, module: nn.Module, path: str = "", ) -> nn.Module: # pre-sharded module if isinstance(module, ShardedModule): return module # shardable module module_sharding_plan = self._plan.get_plan_for_module(path) if module_sharding_plan: sharder_key = type(module) module = self._sharder_map[sharder_key].shard( module, module_sharding_plan, self._env, self.device, ) return module for name, child in module.named_children(): child = self._shard_modules_impl( child, path + "." + name if path else name, ) setattr(module, name, child) return module def _init_parameters(self, module: nn.Module) -> None: def init_parameters(module: nn.Module) -> None: # Allocate parameters and buffers if over 'meta' device. has_meta_param = False for name, param in module._parameters.items(): if isinstance(param, torch.Tensor) and param.device.type == "meta": module._parameters[name] = nn.Parameter( torch.empty_like(param, device=self.device), requires_grad=param.requires_grad, ) has_meta_param = True for name, buffer in module._buffers.items(): if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta": module._buffers[name] = torch.zeros_like(buffer, device=self.device) # Init parameters if at least one parameter is over 'meta' device. if has_meta_param and hasattr(module, "reset_parameters"): module.reset_parameters() module.apply(init_parameters) def sparse_grad_parameter_names( self, destination: Optional[List[str]] = None, prefix: str = "" ) -> List[str]: destination = [] if destination is None else destination return self._sparse_grad_parameter_names(self.module, destination, prefix) def _sparse_grad_parameter_names( self, module: nn.Module, destination: List[str], prefix: str = "" ) -> List[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): pass elif isinstance(module, nn.Embedding): if module.sparse: destination.append(append_prefix(prefix, "weight")) elif isinstance(module, nn.EmbeddingBag): if module.sparse: destination.append(append_prefix(prefix, "weight")) else: for name, child in module.named_children(): self._sparse_grad_parameter_names( child, destination, append_prefix(prefix, name) ) return destination # pyre-ignore [14] def state_dict( self, destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: state_dict = get_module(self).state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix + _DDP_STATE_DICT_PREFIX ) add_prefix_to_state_dict(state_dict, prefix) return state_dict # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict( self, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: return self._load_state_dict(self, state_dict, prefix, strict) def _load_state_dict( self, module: nn.Module, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: missing_keys = [] unexpected_keys = [] module = get_module(module) if isinstance(module, DistributedDataParallel): torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix ) add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX) if isinstance(module, ShardedModule): return module.load_state_dict(state_dict, strict=strict) else: module._load_from_state_dict( state_dict, prefix, {}, strict, missing_keys, unexpected_keys, [] ) for name, child in module.named_children(): m_keys, u_keys = self._load_state_dict( child, filter_state_dict(state_dict, prefix + name), "", strict, ) missing_keys.extend(m_keys) unexpected_keys.extend(u_keys) return _IncompatibleKeys( missing_keys=missing_keys, unexpected_keys=unexpected_keys ) def _named_parameters( self, module: nn.Module, prefix: str = "", recurse: bool = True, strip_ddp: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: if strip_ddp: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_parameters(prefix, recurse) else: yield from module.named_parameters(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_parameters( child, append_prefix(prefix, name), recurse, strip_ddp, ) def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def bare_named_parameters( self, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue memo.add(param) yield key, param def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.sharded_parameter_names(prefix) else: for name, child in module.named_children(): yield from DistributedModelParallel._sharded_parameter_names( child, append_prefix(prefix, name) ) def _named_buffers( self, module: nn.Module, prefix: str = "", recurse: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_buffers(prefix, recurse) else: yield from module.named_buffers(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_buffers( child, append_prefix(prefix, name), recurse ) def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: gen = self._named_buffers(self.module, prefix, recurse) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def fused_optimizer(self) -> KeyedOptimizer: return self._optim def plan(self) -> ShardingPlan: return self._plan def _reset_parameters(module: nn.Module) -> None: for _, m in module.named_modules(): if hasattr(m, "reset_parameters"): m.reset_parameters() class IDTransformerGroup: def __init__( self, url, module: DistributedModelParallel, configs_dict: Dict[str, Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]], *, eviction_config=None, transform_config=None, ps_config=None, parallel=True, ): """ IDTransformerGroup stores the IDTransformer for all sharded modules in a DMP module. Args: url: configuration for PS, e.g. redis://127.0.0.1:6379/?prefix=model. module: DMP module that need dynamic embedding. configs_dict: a dictionary that maps the module path of the sharded module to its embedding configs or embeddingbag configs. The plan of `module` should contain the module path in `configs_dict`. eviction_config: configuration for eviction policy. Default is `{"type": "mixed_lru_lfu"}` transform_config: configuration for the transformer. Default is `{"type": "naive"}` parallel: Whether the IDTransformerCollections will run parallel. When set to True, IDTransformerGroup will start a thread for each IDTransformerCollection. Example: class Model(nn.Module): def __init__(self, config1, config2): super().__init__() self.emb1 = EmbeddingCollection(tables=config1, device=torch.device("meta")) self.emb2 = EmbeddingCollection(tables=config2, device=torch.device("meta")) ... def forward(self, kjt1, kjt2): ... m = Model(config1, config2) m = DistributedModelParallel(m) transformers = IDTransformerGroup( "redis://127.0.0.1:6379/?prefix=model", m, { "emb1": config1, "emb2": config2 }) for label, kjt1, kjt2 in dataset: kjts = transformers.transform({ "emb1": kjt1, "emb2": kjt2 }) kjt1, kjt2 = kjts["emb1"], kjts["emb2"] output = m(kjt1, kjt2) ... """ self._parallel = parallel # get all sharded_modules from plan plan = module.plan sharded_modules = _get_sharded_modules_recursive(module.module, "", plan) self._id_transformer_collections: Dict[str, IDTransformerCollection] = {} for path, configs in configs_dict.items(): if path not in sharded_modules: raise ValueError( f"`{path}` in configs dooes not match any sharded modules. " f"Paths for current sharded modules are: {list(sharded_modules.keys())}." ) sharded_module, params_plan = sharded_modules[path] ps_collection = PSCollection.fromModule( path, sharded_module, params_plan, url, ps_config ) id_transformer_collection = IDTransformerCollection( configs, eviction_config, transform_config, ps_collection ) self._id_transformer_collections[path] = id_transformer_collection if self._parallel: self._threads = {} self._input_queues = {} self._output_queues = {} for path, transformer in self._id_transformer_collections.items(): thread, input_queue, output_queue = _create_transformer_thread( transformer ) self._threads[path] = thread self._input_queues[path] = input_queue self._output_queues[path] = output_queue def transform(self, kjt_dict: Dict[str, KeyedJaggedTensor]): """ Transform global `KeyedJaggedTensor`s to local ones. Args: kjt_dict: dict keyed by module path of global kjts. Return: Dict[str, KeyedJaggedTensor] List[torch.classes.tde.FetchHandle]: list of fetch handles to wait. """ result = {} fetch_handles = [] if self._parallel: for path, kjt in kjt_dict.items(): if path not in self._id_transformer_collections: raise ValueError( f"kjt_dict contain invalid path {path}. " f"should be one of {self._id_transformer_collections.keys()}" ) self._input_queues[path].put(kjt) for path in kjt_dict: kjt, handles = self._output_queues[path].get() result[path] = kjt fetch_handles.extend(handles) else: for path, kjt in kjt_dict.items(): if path not in self._id_transformer_collections: raise ValueError( f"kjt_dict contain invalid path {path}. " f"should be one of {self._id_transformer_collections.keys()}" ) kjt, handles = self._id_transformer_collections[path].transform(kjt) result[path] = kjt fetch_handles.extend(handles) return result, fetch_handles def save(self): for _, id_transformer_collection in self._id_transformer_collections.items(): id_transformer_collection.save() def __contains__(self, path): """ Check if there is transformer for the path. """ return path in self._id_transformer_collections def __del__(self): """ Stop the parallel threads """ if self._parallel: # stop the threads for _, input_queue in self._input_queues.items(): input_queue.put(None) The provided code snippet includes necessary dependencies for implementing the `wrap` function. Write a Python function `def wrap( url: str, dataloader, module: DistributedModelParallel, configs_dict: Dict[str, Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]], *, data_info: Dict[int, str] = None, eviction_config=None, transform_config=None, ps_config=None, parallel=True, num_prefetch=0, )` to solve the following problem: DataLoader to transform data from global id to cache id. Args: url: configuration for PS, e.g. redis://127.0.0.1:6379/?prefix=model. dataloader: dataloader to transform. module: DMP module that need dynamic embedding. configs_dict: a dictionary that maps the module path of the sharded module to its embedding configs or embeddingbag configs. The plan of `module` should contain the module path in `configs_dict`. data_info: dict keyed by int index of module path. For example, if the dataloader produces `label, kjt1, kjt2` each iteration and `kjt1` and `kjt2` are inputs to modules of path `emb1` and `emb2` respectively, then `data_info` should be `{ 1: "emb1", 2: "emb2" }`. eviction_config: configuration for eviction policy. Default is `{"type": "mixed_lru_lfu"}` transform_config: configuration for the transformer. Default is `{"type": "naive"}` transform_config: configuration for the ps. Default is `{"chunk_size": 8 * 1024 * 1024} parallel: Whether the IDTransformerCollections will run paralell. When set to True, IDTransformerGroup will start a thread for each IDTransformerCollection. num_prefetch: number of samples to prefetch. Return: DataLoader: the dataloader to transform data. DistributedModelParallel: model with id_transformer_group attached. Example: class Model(nn.Module): def __init__(self, config1, config2): super().__init__() self.emb1 = EmbeddingCollection(tables=config1, device=torch.device("meta")) self.emb2 = EmbeddingCollection(tables=config2, device=torch.device("meta")) ... def forward(self, kjt1, kjt2): ... m = Model(config1, config2) m = DistributedModelParallel(m) dataloader, m = tde.wrap("redis://127.0.0.1:6379/", dataloader, m, { "emb1": config1, "emb2": config2 }) for label, kjt1, kjt2 in dataloader: output = m(kjt1, kjt2) ... Here is the function: def wrap( url: str, dataloader, module: DistributedModelParallel, configs_dict: Dict[str, Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]], *, data_info: Dict[int, str] = None, eviction_config=None, transform_config=None, ps_config=None, parallel=True, num_prefetch=0, ): """ DataLoader to transform data from global id to cache id. Args: url: configuration for PS, e.g. redis://127.0.0.1:6379/?prefix=model. dataloader: dataloader to transform. module: DMP module that need dynamic embedding. configs_dict: a dictionary that maps the module path of the sharded module to its embedding configs or embeddingbag configs. The plan of `module` should contain the module path in `configs_dict`. data_info: dict keyed by int index of module path. For example, if the dataloader produces `label, kjt1, kjt2` each iteration and `kjt1` and `kjt2` are inputs to modules of path `emb1` and `emb2` respectively, then `data_info` should be `{ 1: "emb1", 2: "emb2" }`. eviction_config: configuration for eviction policy. Default is `{"type": "mixed_lru_lfu"}` transform_config: configuration for the transformer. Default is `{"type": "naive"}` transform_config: configuration for the ps. Default is `{"chunk_size": 8 * 1024 * 1024} parallel: Whether the IDTransformerCollections will run paralell. When set to True, IDTransformerGroup will start a thread for each IDTransformerCollection. num_prefetch: number of samples to prefetch. Return: DataLoader: the dataloader to transform data. DistributedModelParallel: model with id_transformer_group attached. Example: class Model(nn.Module): def __init__(self, config1, config2): super().__init__() self.emb1 = EmbeddingCollection(tables=config1, device=torch.device("meta")) self.emb2 = EmbeddingCollection(tables=config2, device=torch.device("meta")) ... def forward(self, kjt1, kjt2): ... m = Model(config1, config2) m = DistributedModelParallel(m) dataloader, m = tde.wrap("redis://127.0.0.1:6379/", dataloader, m, { "emb1": config1, "emb2": config2 }) for label, kjt1, kjt2 in dataloader: output = m(kjt1, kjt2) ... """ id_transformer_group = IDTransformerGroup( url, module, configs_dict, eviction_config=eviction_config, transform_config=transform_config, ps_config=ps_config, parallel=parallel, ) paths = list(configs_dict.keys()) # Attach the id transformer group to module for saving. module._id_transformer_group = id_transformer_group return ( DataLoader( id_transformer_group=id_transformer_group, dataloader=dataloader, data_info=data_info, paths=paths, num_prefetch=num_prefetch, ), module, )
DataLoader to transform data from global id to cache id. Args: url: configuration for PS, e.g. redis://127.0.0.1:6379/?prefix=model. dataloader: dataloader to transform. module: DMP module that need dynamic embedding. configs_dict: a dictionary that maps the module path of the sharded module to its embedding configs or embeddingbag configs. The plan of `module` should contain the module path in `configs_dict`. data_info: dict keyed by int index of module path. For example, if the dataloader produces `label, kjt1, kjt2` each iteration and `kjt1` and `kjt2` are inputs to modules of path `emb1` and `emb2` respectively, then `data_info` should be `{ 1: "emb1", 2: "emb2" }`. eviction_config: configuration for eviction policy. Default is `{"type": "mixed_lru_lfu"}` transform_config: configuration for the transformer. Default is `{"type": "naive"}` transform_config: configuration for the ps. Default is `{"chunk_size": 8 * 1024 * 1024} parallel: Whether the IDTransformerCollections will run paralell. When set to True, IDTransformerGroup will start a thread for each IDTransformerCollection. num_prefetch: number of samples to prefetch. Return: DataLoader: the dataloader to transform data. DistributedModelParallel: model with id_transformer_group attached. Example: class Model(nn.Module): def __init__(self, config1, config2): super().__init__() self.emb1 = EmbeddingCollection(tables=config1, device=torch.device("meta")) self.emb2 = EmbeddingCollection(tables=config2, device=torch.device("meta")) ... def forward(self, kjt1, kjt2): ... m = Model(config1, config2) m = DistributedModelParallel(m) dataloader, m = tde.wrap("redis://127.0.0.1:6379/", dataloader, m, { "emb1": config1, "emb2": config2 }) for label, kjt1, kjt2 in dataloader: output = m(kjt1, kjt2) ...
8,832
import queue import threading from typing import Dict, List, Union import torch from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL from torchrec import EmbeddingBagConfig, EmbeddingConfig from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.sparse.jagged_tensor import KeyedJaggedTensor from .id_transformer_group import IDTransformerGroup class DistributedModelParallel(nn.Module, FusedOptimizerModule): """ Entry point to model parallelism. Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `EmbeddingBagCollectionSharder()`. init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay parameter initialization until the first forward pass. Pass `True` to delay initialization of data parallel modules. Do first forward pass and then call DistributedModelParallel.init_data_parallel(). init_parameters (bool): initialize parameters for modules still on meta device. data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data parallel modules. Example:: def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = DistributedModelParallel(m) m.apply(init_weights) """ def __init__( self, module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_data_parallel: bool = True, init_parameters: bool = True, data_parallel_wrapper: Optional[DataParallelWrapper] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self.init_parameters = init_parameters self._ddp_wrapped: bool = False if env is None: pg = dist.GroupMember.WORLD assert pg is not None, "Process group is not initialized" env = ShardingEnv.from_process_group(pg) self._env: ShardingEnv = env if device is None: device = torch.device("cpu") self.device: torch.device = device if sharders is None: sharders = get_default_sharders() self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = { sharder.module_type: sharder for sharder in sharders } if data_parallel_wrapper is None: data_parallel_wrapper = DefaultDataParallelWrapper() self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper if plan is None: planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=get_local_size(self._env.world_size), world_size=self._env.world_size, compute_device=self.device.type, ) ) pg = self._env.process_group if pg is not None: plan = planner.collective_plan(module, sharders, pg) else: plan = planner.plan(module, sharders) self._plan: ShardingPlan = plan self._dmp_wrapped_module: nn.Module = self._init_dmp(module) self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module) if init_parameters: self._init_parameters(self.module) if init_data_parallel: self.init_data_parallel() def module(self) -> nn.Module: """ Property to directly access sharded module, which will not be wrapped in DDP, FSDP, DMP, or any other parallelism wrappers. """ return get_unwrapped_module(self) def module(self, value: nn.Module) -> None: if isinstance(self.module, DistributedDataParallel) or isinstance( self.module, FullyShardedDataParallel ): raise RuntimeError( "module can't be set after calling init_data_parallel(...)" ) else: self._dmp_wrapped_module = value # pyre-ignore [2, 3] def forward(self, *args, **kwargs) -> Any: return self._dmp_wrapped_module(*args, **kwargs) def init_data_parallel(self) -> None: """ See init_data_parallel c-tor argument for usage. It's safe to call this method multiple times. """ if not self._ddp_wrapped: # Allocate any 'meta' tensors if self.init_parameters: self._init_parameters(self._dmp_wrapped_module) self._data_parallel_wrapper.wrap(self, self._env, self.device) self._ddp_wrapped = True def copy( self, device: torch.device, ) -> "DistributedModelParallel": """ Recursively copy submodules to new device by calling per-module customized copy process, since some modules needs to use the original references (like `ShardedModule` for inference). """ assert isinstance(device, torch.device) # dmp code deep copy with sharded_model_copy(device=None): copy_dmp = copy.deepcopy(self) # tensor resident module deep copy copy_dmp_wrapped_module = copy_to_device( self._dmp_wrapped_module, self.device, device ) copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module return copy_dmp def _init_dmp(self, module: nn.Module) -> nn.Module: return self._shard_modules_impl(module) def _init_optim(self, module: nn.Module) -> CombinedOptimizer: # pyre-ignore [6] return CombinedOptimizer(self._fused_optim_impl(module, [])) def _fused_optim_impl( self, module: nn.Module, fused_optims: List[Tuple[str, KeyedOptimizer]], path: str = "", ) -> List[Tuple[str, KeyedOptimizer]]: if isinstance(module, FusedOptimizerModule): fused_optims.append((path, module.fused_optimizer)) return fused_optims for name, child in module.named_children(): self._fused_optim_impl( child, fused_optims, path + "." + name if path else name, ) return fused_optims def _shard_modules_impl( self, module: nn.Module, path: str = "", ) -> nn.Module: # pre-sharded module if isinstance(module, ShardedModule): return module # shardable module module_sharding_plan = self._plan.get_plan_for_module(path) if module_sharding_plan: sharder_key = type(module) module = self._sharder_map[sharder_key].shard( module, module_sharding_plan, self._env, self.device, ) return module for name, child in module.named_children(): child = self._shard_modules_impl( child, path + "." + name if path else name, ) setattr(module, name, child) return module def _init_parameters(self, module: nn.Module) -> None: def init_parameters(module: nn.Module) -> None: # Allocate parameters and buffers if over 'meta' device. has_meta_param = False for name, param in module._parameters.items(): if isinstance(param, torch.Tensor) and param.device.type == "meta": module._parameters[name] = nn.Parameter( torch.empty_like(param, device=self.device), requires_grad=param.requires_grad, ) has_meta_param = True for name, buffer in module._buffers.items(): if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta": module._buffers[name] = torch.zeros_like(buffer, device=self.device) # Init parameters if at least one parameter is over 'meta' device. if has_meta_param and hasattr(module, "reset_parameters"): module.reset_parameters() module.apply(init_parameters) def sparse_grad_parameter_names( self, destination: Optional[List[str]] = None, prefix: str = "" ) -> List[str]: destination = [] if destination is None else destination return self._sparse_grad_parameter_names(self.module, destination, prefix) def _sparse_grad_parameter_names( self, module: nn.Module, destination: List[str], prefix: str = "" ) -> List[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): pass elif isinstance(module, nn.Embedding): if module.sparse: destination.append(append_prefix(prefix, "weight")) elif isinstance(module, nn.EmbeddingBag): if module.sparse: destination.append(append_prefix(prefix, "weight")) else: for name, child in module.named_children(): self._sparse_grad_parameter_names( child, destination, append_prefix(prefix, name) ) return destination # pyre-ignore [14] def state_dict( self, destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: state_dict = get_module(self).state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix + _DDP_STATE_DICT_PREFIX ) add_prefix_to_state_dict(state_dict, prefix) return state_dict # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict( self, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: return self._load_state_dict(self, state_dict, prefix, strict) def _load_state_dict( self, module: nn.Module, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: missing_keys = [] unexpected_keys = [] module = get_module(module) if isinstance(module, DistributedDataParallel): torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix ) add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX) if isinstance(module, ShardedModule): return module.load_state_dict(state_dict, strict=strict) else: module._load_from_state_dict( state_dict, prefix, {}, strict, missing_keys, unexpected_keys, [] ) for name, child in module.named_children(): m_keys, u_keys = self._load_state_dict( child, filter_state_dict(state_dict, prefix + name), "", strict, ) missing_keys.extend(m_keys) unexpected_keys.extend(u_keys) return _IncompatibleKeys( missing_keys=missing_keys, unexpected_keys=unexpected_keys ) def _named_parameters( self, module: nn.Module, prefix: str = "", recurse: bool = True, strip_ddp: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: if strip_ddp: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_parameters(prefix, recurse) else: yield from module.named_parameters(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_parameters( child, append_prefix(prefix, name), recurse, strip_ddp, ) def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def bare_named_parameters( self, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue memo.add(param) yield key, param def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.sharded_parameter_names(prefix) else: for name, child in module.named_children(): yield from DistributedModelParallel._sharded_parameter_names( child, append_prefix(prefix, name) ) def _named_buffers( self, module: nn.Module, prefix: str = "", recurse: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_buffers(prefix, recurse) else: yield from module.named_buffers(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_buffers( child, append_prefix(prefix, name), recurse ) def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: gen = self._named_buffers(self.module, prefix, recurse) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def fused_optimizer(self) -> KeyedOptimizer: return self._optim def plan(self) -> ShardingPlan: return self._plan def _reset_parameters(module: nn.Module) -> None: for _, m in module.named_modules(): if hasattr(m, "reset_parameters"): m.reset_parameters() class IDTransformerGroup: def __init__( self, url, module: DistributedModelParallel, configs_dict: Dict[str, Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]], *, eviction_config=None, transform_config=None, ps_config=None, parallel=True, ): """ IDTransformerGroup stores the IDTransformer for all sharded modules in a DMP module. Args: url: configuration for PS, e.g. redis://127.0.0.1:6379/?prefix=model. module: DMP module that need dynamic embedding. configs_dict: a dictionary that maps the module path of the sharded module to its embedding configs or embeddingbag configs. The plan of `module` should contain the module path in `configs_dict`. eviction_config: configuration for eviction policy. Default is `{"type": "mixed_lru_lfu"}` transform_config: configuration for the transformer. Default is `{"type": "naive"}` parallel: Whether the IDTransformerCollections will run parallel. When set to True, IDTransformerGroup will start a thread for each IDTransformerCollection. Example: class Model(nn.Module): def __init__(self, config1, config2): super().__init__() self.emb1 = EmbeddingCollection(tables=config1, device=torch.device("meta")) self.emb2 = EmbeddingCollection(tables=config2, device=torch.device("meta")) ... def forward(self, kjt1, kjt2): ... m = Model(config1, config2) m = DistributedModelParallel(m) transformers = IDTransformerGroup( "redis://127.0.0.1:6379/?prefix=model", m, { "emb1": config1, "emb2": config2 }) for label, kjt1, kjt2 in dataset: kjts = transformers.transform({ "emb1": kjt1, "emb2": kjt2 }) kjt1, kjt2 = kjts["emb1"], kjts["emb2"] output = m(kjt1, kjt2) ... """ self._parallel = parallel # get all sharded_modules from plan plan = module.plan sharded_modules = _get_sharded_modules_recursive(module.module, "", plan) self._id_transformer_collections: Dict[str, IDTransformerCollection] = {} for path, configs in configs_dict.items(): if path not in sharded_modules: raise ValueError( f"`{path}` in configs dooes not match any sharded modules. " f"Paths for current sharded modules are: {list(sharded_modules.keys())}." ) sharded_module, params_plan = sharded_modules[path] ps_collection = PSCollection.fromModule( path, sharded_module, params_plan, url, ps_config ) id_transformer_collection = IDTransformerCollection( configs, eviction_config, transform_config, ps_collection ) self._id_transformer_collections[path] = id_transformer_collection if self._parallel: self._threads = {} self._input_queues = {} self._output_queues = {} for path, transformer in self._id_transformer_collections.items(): thread, input_queue, output_queue = _create_transformer_thread( transformer ) self._threads[path] = thread self._input_queues[path] = input_queue self._output_queues[path] = output_queue def transform(self, kjt_dict: Dict[str, KeyedJaggedTensor]): """ Transform global `KeyedJaggedTensor`s to local ones. Args: kjt_dict: dict keyed by module path of global kjts. Return: Dict[str, KeyedJaggedTensor] List[torch.classes.tde.FetchHandle]: list of fetch handles to wait. """ result = {} fetch_handles = [] if self._parallel: for path, kjt in kjt_dict.items(): if path not in self._id_transformer_collections: raise ValueError( f"kjt_dict contain invalid path {path}. " f"should be one of {self._id_transformer_collections.keys()}" ) self._input_queues[path].put(kjt) for path in kjt_dict: kjt, handles = self._output_queues[path].get() result[path] = kjt fetch_handles.extend(handles) else: for path, kjt in kjt_dict.items(): if path not in self._id_transformer_collections: raise ValueError( f"kjt_dict contain invalid path {path}. " f"should be one of {self._id_transformer_collections.keys()}" ) kjt, handles = self._id_transformer_collections[path].transform(kjt) result[path] = kjt fetch_handles.extend(handles) return result, fetch_handles def save(self): for _, id_transformer_collection in self._id_transformer_collections.items(): id_transformer_collection.save() def __contains__(self, path): """ Check if there is transformer for the path. """ return path in self._id_transformer_collections def __del__(self): """ Stop the parallel threads """ if self._parallel: # stop the threads for _, input_queue in self._input_queues.items(): input_queue.put(None) The provided code snippet includes necessary dependencies for implementing the `save` function. Write a Python function `def save(module: DistributedModelParallel)` to solve the following problem: Save the dynamic embedding part of the model. Here is the function: def save(module: DistributedModelParallel): """ Save the dynamic embedding part of the model. """ if not hasattr(module, "_id_transformer_group"): raise ValueError( "No _id_transformer_group property for module, is this a module with dynamic embeding?" ) if not isinstance(module._id_transformer_group, IDTransformerGroup): raise ValueError( "module._id_transformer_group property is not IDTransformerGroup, is this a module with dynamic embeding?" ) module._id_transformer_group.save()
Save the dynamic embedding part of the model.
8,833
from typing import Dict import torch.nn as nn from torchrec.distributed.types import ShardingPlan class ShardingPlan: """ Representation of sharding plan. This uses the FQN of the larger wrapped model (i.e the model that is wrapped using `DistributedModelParallel`) EmbeddingModuleShardingPlan should be used when TorchRec composability is desired. Attributes: plan (Dict[str, EmbeddingModuleShardingPlan]): dict keyed by module path of dict of parameter sharding specs keyed by parameter name. """ plan: Dict[str, ModuleShardingPlan] def get_plan_for_module(self, module_path: str) -> Optional[ModuleShardingPlan]: """ Args: module_path (str): Returns: Optional[ModuleShardingPlan]: dict of parameter sharding specs keyed by parameter name. None if sharding specs do not exist for given module_path. """ return self.plan.get(module_path, None) def __str__(self) -> str: out = "" for i, (module_path, module_plan) in enumerate(self.plan.items()): if i > 0: out += "\n\n" out += "module: " + module_path out += str(module_plan) return out The provided code snippet includes necessary dependencies for implementing the `_get_sharded_modules_recursive` function. Write a Python function `def _get_sharded_modules_recursive( module: nn.Module, path: str, plan: ShardingPlan, ) -> Dict[str, nn.Module]` to solve the following problem: Get all sharded modules of module from `plan`. Here is the function: def _get_sharded_modules_recursive( module: nn.Module, path: str, plan: ShardingPlan, ) -> Dict[str, nn.Module]: """ Get all sharded modules of module from `plan`. """ params_plan = plan.get_plan_for_module(path) if params_plan: return {path: (module, params_plan)} res = {} for name, child in module.named_children(): new_path = f"{path}.{name}" if path else name res.update(_get_sharded_modules_recursive(child, new_path, plan)) return res
Get all sharded modules of module from `plan`.
8,834
import queue import threading from typing import Dict, List, Union from torchrec import EmbeddingBagConfig, EmbeddingConfig, KeyedJaggedTensor from torchrec.distributed.model_parallel import DistributedModelParallel from .id_transformer_collection import IDTransformerCollection from .ps import PSCollection from .utils import _get_sharded_modules_recursive class IDTransformerCollection: def __init__( self, tables: Union[List[EmbeddingConfig], List[EmbeddingBagConfig]], eviction_config=None, transform_config=None, ps_collection: PSCollection = None, ): """ IDTransformerCollection could transform the input of a `Embedding(Bag)Collection`. It contains the `IDTransformer` of tables in the `Embedding(Bag)Collection`. Args: tables: list of `Embedding(Bag)Config` or `EmbeddingBagConfig` one passed to `Embedding(Bag)Collection`. eviction_config: config of the eviction strategy for IDTransformers. transform_config: config of the transform strategy for IDTransformers. ps_collection: `PSCollection` of the collection, if `None`, won't do eviction or fetch. By default, IDTransformerCollection will evict half the ids when full. """ self._configs = tables self._ps_collection = ps_collection self._transformers = [] self._table_names = [] feature_names = set() for config in tables: if config.name in self._table_names: raise ValueError(f"Duplicate table name {config.name}") if not config.feature_names: config.feature_names = [config.name] self._table_names.append(config.name) for feature_name in config.feature_names: if feature_name in feature_names: raise ValueError(f"Shared feature not allowed yet.") # only rank 0 will have the id transformer # and other ranks will gather their to rank 0. if dist.get_rank() == 0: transformer = IDTransformer( num_embedding=config.num_embeddings, eviction_config=eviction_config, transform_config=transform_config, ) else: transformer = None self._transformers.append(transformer) self._feature_names: List[List[str]] = [ config.feature_names for config in tables ] self._ever_evicted = False self._time = 0 if dist.get_world_size() > 1: self._pg = dist.new_group(backend="gloo") self._stream = torch.cuda.Stream() def _transform( self, transformer, global_ids: List[torch.Tensor], cache_ids: List[torch.Tensor] ): with torch.cuda.stream(self._stream): total_numel = sum([tensor.numel() for tensor in global_ids]) if total_numel > 1e6: all_tensor = torch.cat(global_ids).to("cuda:0") unique_all_tensor, index = torch.unique(all_tensor, return_inverse=True) unique_all_tensor = unique_all_tensor.to("cpu") all_cache = torch.empty_like(unique_all_tensor) success, ids_to_fetch = transformer.transform( TensorList([unique_all_tensor]), TensorList([all_cache]), self._time, ) del all_tensor all_tensor = torch.take(all_cache.to("cuda:0"), index) offset = 0 for tensor in cache_ids: numel = tensor.numel() tensor.copy_(all_tensor[offset : offset + numel]) offset += numel assert ( total_numel == offset ), f"total_numel not equal offset, {total_numel} vs {offset}" else: # broadcast result success, ids_to_fetch = transformer.transform( TensorList(global_ids), TensorList(cache_ids), self._time, ) return success, ids_to_fetch def transform( self, global_features: KeyedJaggedTensor ) -> Tuple[KeyedJaggedTensor, List[torch.classes.tde.FetchHandle]]: """ Transform global kjts into local kjts. Return: KeyedJaggedTensor: the transformed kjt. List[torch.classes.tde.FetchHandle]: list of fetch handles to wait. """ global_values = global_features.values() cache_values = torch.empty_like(global_values) global_feature_indices = { feature_name: i for i, feature_name in enumerate(global_features.keys()) } offset_per_key = global_features.offset_per_key() fetch_handles = [] for i, transformer in enumerate(self._transformers): feature_names = self._feature_names[i] feature_indices = [ global_feature_indices[feature_name] for feature_name in feature_names ] global_ids = [ global_values[offset_per_key[idx] : offset_per_key[idx + 1]] for idx in feature_indices ] cache_ids = [ cache_values[offset_per_key[idx] : offset_per_key[idx + 1]] for idx in feature_indices ] if dist.get_world_size() > 1: concat_global_ids, concat_numel_list = gather_global_ids( global_ids, self._pg ) if dist.get_rank() == 0: global_ids = global_ids + concat_global_ids[1:] cache_ids = cache_ids + [ torch.empty_like(tensor) for tensor in concat_global_ids[1:] ] success, ids_to_fetch = self._transform( transformer, global_ids, cache_ids ) else: success, ids_to_fetch = True, None success, ids_to_fetch = broadcast_transform_result( success, ids_to_fetch, self._pg ) if self._ps_collection is not None: table_name = self._table_names[i] ps = self._ps_collection[table_name] if ids_to_fetch.numel() > 0: handle = ps.fetch( ids_to_fetch, self._time, self._ever_evicted, self._configs[i].get_weight_init_min(), self._configs[i].get_weight_init_max(), ) fetch_handles.append(handle) if not success: # TODO(zilinzhu): make this configurable # broadcast ids_to_evict if dist.get_rank() == 0: ids_to_evict = transformer.evict( transformer._num_embedding // 2 ) else: ids_to_evict = None ids_to_evict = broadcast_ids_to_evict(ids_to_evict, self._pg) ps.evict(ids_to_evict) self._ever_evicted = True # retry after eviction. # broadcast result if dist.get_rank() == 0: success, ids_to_fetch = transformer.transform( TensorList(global_ids), TensorList(cache_ids), self._time, ) else: success, ids_to_fetch = True, None success, ids_to_fetch = broadcast_transform_result( success, ids_to_fetch, self._pg ) if not success: raise RuntimeError( "Failed to transform global ids after eviction. " f"Maybe the num_embedding of table {table_name} is too small?" ) if ids_to_fetch.numel() > 0: fetch_handles.append( ps.fetch( ids_to_fetch, self._time, self._ever_evicted, self._configs[i].get_weight_init_min(), self._configs[i].get_weight_init_max(), ) ) scatter_cache_ids(cache_ids, concat_numel_list, self._pg) else: success, ids_to_fetch = self._transform( transformer, global_ids, cache_ids ) if self._ps_collection is not None: table_name = self._table_names[i] ps = self._ps_collection[table_name] if ids_to_fetch.numel() > 0: handle = ps.fetch( ids_to_fetch, self._time, self._ever_evicted, self._configs[i].get_weight_init_min(), self._configs[i].get_weight_init_max(), ) fetch_handles.append(handle) if not success: # TODO(zilinzhu): make this configurable ids_to_evict = transformer.evict( transformer._num_embedding // 2 ) ps.evict(ids_to_evict) self._ever_evicted = True # retry after eviction. success, ids_to_fetch = transformer.transform( TensorList(global_ids), TensorList(cache_ids), self._time, ) if not success: raise RuntimeError( "Failed to transform global ids after eviction. " f"Maybe the num_embedding of table {table_name} is too small?" ) if ids_to_fetch is not None: fetch_handles.append( ps.fetch( ids_to_fetch, self._time, self._ever_evicted, self._configs[i].get_weight_init_min(), self._configs[i].get_weight_init_max(), ) ) cache_values = KeyedJaggedTensor( keys=global_features.keys(), values=cache_values, lengths=global_features.lengths(), weights=global_features.weights_or_none(), ) self._time += 1 return cache_values, fetch_handles def save(self): if self._ps_collection is None: return for i, transformer in enumerate(self._transformers): table_name = self._table_names[i] if dist.get_world_size() > 1: if dist.get_rank() == 0: ids = transformer.save() numel = torch.tensor(ids.numel()) dist.broadcast(numel, src=0, group=self._pg) dist.broadcast(ids, src=0, group=self._pg) else: numel = torch.tensor(0) dist.broadcast(numel, src=0, group=self._pg) ids = torch.empty((numel // 2, 2), dtype=torch.int64) dist.broadcast(ids, src=0, group=self._pg) else: ids = transformer.save() self._ps_collection[table_name].evict(ids) The provided code snippet includes necessary dependencies for implementing the `_create_transformer_thread` function. Write a Python function `def _create_transformer_thread(transformer: IDTransformerCollection)` to solve the following problem: Create a thread for transformer. Here is the function: def _create_transformer_thread(transformer: IDTransformerCollection): """ Create a thread for transformer. """ def loop(transformer, input_queue, output_queue): while True: global_kjt = input_queue.get() if global_kjt is None: break cache_kjt = transformer.transform(global_kjt) output_queue.put(cache_kjt) input_queue = queue.Queue() output_queue = queue.Queue() thread = threading.Thread( target=loop, args=(transformer, input_queue, output_queue) ) thread.start() return thread, input_queue, output_queue
Create a thread for transformer.
8,835
from typing import List, Optional import torch import torch.distributed as dist def gather_global_ids(global_ids: List[torch.Tensor], group): world_size = dist.get_world_size() rank = dist.get_rank() concat_global_ids = torch.cat(global_ids) concat_numel = torch.tensor(concat_global_ids.numel(), dtype=torch.int64) concat_numel_list = [torch.tensor(0, dtype=torch.int64) for _ in range(world_size)] dist.all_gather(concat_numel_list, concat_numel, group=group, async_op=False) max_numel = max(concat_numel_list) concat_global_ids.resize_(max_numel) if rank == 0: concat_global_ids_list = [ torch.empty_like(concat_global_ids) for _ in range(world_size) ] dist.gather(concat_global_ids, concat_global_ids_list, 0, group, async_op=False) return [ concat_global_ids_list[i][: concat_numel_list[i]] for i in range(world_size) ], concat_numel_list else: dist.gather(concat_global_ids, None, 0, group, async_op=False) return None, concat_numel_list
null
8,836
from typing import List, Optional import torch import torch.distributed as dist def scatter_cache_ids( cache_ids_list: Optional[List[torch.Tensor]], concat_numel_list: List[int], group ): world_size = dist.get_world_size() rank = dist.get_rank() max_numel = max(concat_numel_list) concat_cache_ids = torch.empty(max_numel, dtype=torch.int64) if rank == 0: concat_cache_ids_list = [concat_cache_ids] + [ cache_ids.resize_(max_numel) for cache_ids in cache_ids_list[-world_size + 1 :] ] assert len(concat_cache_ids_list) == world_size dist.scatter(concat_cache_ids, concat_cache_ids_list, group=group) else: dist.scatter(concat_cache_ids, None, group=group) offset = 0 for cache_ids in cache_ids_list: cache_ids[:] = concat_cache_ids[offset : offset + cache_ids.numel()] offset += cache_ids.numel()
null
8,837
from typing import List, Optional import torch import torch.distributed as dist def broadcast_transform_result( success: bool, ids_to_fetch: Optional[torch.Tensor], group ): if dist.get_rank() == 0: success_and_numel = torch.tensor( [1 if success else 0, ids_to_fetch.numel()], dtype=torch.int64 ) dist.broadcast(success_and_numel, src=0, group=group) else: success_and_numel = torch.tensor([0, 0], dtype=torch.int64) dist.broadcast(success_and_numel, src=0, group=group) success, numel = success_and_numel.tolist() success = success != 0 ids_to_fetch = torch.empty((numel // 2, 2), dtype=torch.int64) if ids_to_fetch.numel() > 0: dist.broadcast(ids_to_fetch, src=0, group=group) return success, ids_to_fetch
null
8,838
from typing import List, Optional import torch import torch.distributed as dist def broadcast_ids_to_evict(ids, group): if dist.get_rank() == 0: numel = torch.tensor(ids.numel(), dtype=torch.int64) dist.broadcast(numel, src=0, group=group) else: numel = torch.tensor(0, dtype=torch.int64) dist.broadcast(numel, src=0, group=group) numel = numel.item() ids = torch.empty((numel // 2, 2), dtype=torch.int64) if numel > 0: dist.broadcast(ids, src=0, group=group) return ids
null
8,839
import argparse import logging import sys import grpc import torch from torch.utils.data import DataLoader from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.datasets.random import RandomRecDataset from torchrec.datasets.utils import Batch from gen.torchrec.inference import predictor_pb2, predictor_pb2_grpc DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)] DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)] class RandomRecDataset(IterableDataset[Batch]): """ Random iterable dataset used to generate batches for recommender systems (RecSys). Currently produces unweighted sparse features only. TODO: Add weighted sparse features. Args: keys (List[str]): List of feature names for sparse features. batch_size (int): batch size. hash_size (Optional[int]): Max sparse id value. All sparse IDs will be taken modulo this value. hash_sizes (Optional[List[int]]): Max sparse id value per feature in keys. Each sparse ID will be taken modulo the corresponding value from this argument. Note, if this is used, hash_size will be ignored. ids_per_feature (int): Number of IDs per sparse feature. ids_per_features (int): Number of IDs per sparse feature in each key. Note, if this is used, ids_per_feature will be ignored. num_dense (int): Number of dense features. manual_seed (int): Seed for deterministic behavior. num_batches: (Optional[int]): Num batches to generate before raising StopIteration num_generated_batches int: Num batches to cache. If num_batches > num_generated batches, then we will cycle to the first generated batch. If this value is negative, batches will be generated on the fly. min_ids_per_feature (int): Minimum number of IDs per features. Example:: dataset = RandomRecDataset( keys=["feat1", "feat2"], batch_size=16, hash_size=100_000, ids_per_feature=1, num_dense=13, ), example = next(iter(dataset)) """ def __init__( self, keys: List[str], batch_size: int, hash_size: Optional[int] = 100, hash_sizes: Optional[List[int]] = None, ids_per_feature: Optional[int] = 2, ids_per_features: Optional[List[int]] = None, num_dense: int = 50, manual_seed: Optional[int] = None, num_batches: Optional[int] = None, num_generated_batches: int = 10, min_ids_per_feature: Optional[int] = None, min_ids_per_features: Optional[List[int]] = None, ) -> None: super().__init__() if hash_sizes is None: hash_size = hash_size or 100 hash_sizes = [hash_size] * len(keys) assert hash_sizes is not None assert len(hash_sizes) == len( keys ), "length of hash_sizes must be equal to the number of keys" if ids_per_features is None: ids_per_feature = ids_per_feature or 2 ids_per_features = [ids_per_feature] * len(keys) assert ids_per_features is not None if min_ids_per_features is None: min_ids_per_feature = ( min_ids_per_feature if min_ids_per_feature is not None else ids_per_feature ) assert min_ids_per_feature is not None min_ids_per_features = [min_ids_per_feature] * len(keys) assert len(ids_per_features) == len( keys ), "length of ids_per_features must be equal to the number of keys" self.batch_generator = _RandomRecBatch( keys=keys, batch_size=batch_size, hash_sizes=hash_sizes, ids_per_features=ids_per_features, num_dense=num_dense, manual_seed=manual_seed, num_batches=None, num_generated_batches=num_generated_batches, min_ids_per_features=min_ids_per_features, ) self.num_batches: int = cast(int, num_batches if not None else sys.maxsize) def __iter__(self) -> Iterator[Batch]: return itertools.islice(iter(self.batch_generator), self.num_batches) def __len__(self) -> int: return self.num_batches class Batch(Pipelineable): dense_features: torch.Tensor sparse_features: KeyedJaggedTensor labels: torch.Tensor def to(self, device: torch.device, non_blocking: bool = False) -> "Batch": return Batch( dense_features=self.dense_features.to( device=device, non_blocking=non_blocking ), sparse_features=self.sparse_features.to( device=device, non_blocking=non_blocking ), labels=self.labels.to(device=device, non_blocking=non_blocking), ) def record_stream(self, stream: torch.cuda.streams.Stream) -> None: self.dense_features.record_stream(stream) self.sparse_features.record_stream(stream) self.labels.record_stream(stream) def pin_memory(self) -> "Batch": return Batch( dense_features=self.dense_features.pin_memory(), sparse_features=self.sparse_features.pin_memory(), labels=self.labels.pin_memory(), ) def create_training_batch(args: argparse.Namespace) -> Batch: return next( iter( DataLoader( RandomRecDataset( keys=DEFAULT_CAT_NAMES, batch_size=args.batch_size, hash_size=args.num_embedding_features, ids_per_feature=1, num_dense=len(DEFAULT_INT_NAMES), ), batch_sampler=None, pin_memory=False, num_workers=0, ) ) )
null
8,840
import argparse import logging import sys import grpc import torch from torch.utils.data import DataLoader from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.datasets.random import RandomRecDataset from torchrec.datasets.utils import Batch from gen.torchrec.inference import predictor_pb2, predictor_pb2_grpc class Batch(Pipelineable): def to(self, device: torch.device, non_blocking: bool = False) -> "Batch": def record_stream(self, stream: torch.cuda.streams.Stream) -> None: def pin_memory(self) -> "Batch": def create_request( batch: Batch, args: argparse.Namespace ) -> predictor_pb2.PredictionRequest: def to_bytes(tensor: torch.Tensor) -> bytes: return tensor.cpu().numpy().tobytes() float_features = predictor_pb2.FloatFeatures( num_features=args.num_float_features, values=to_bytes(batch.dense_features), ) id_list_features = predictor_pb2.SparseFeatures( num_features=args.num_id_list_features, values=to_bytes(batch.sparse_features.values()), lengths=to_bytes(batch.sparse_features.lengths()), ) id_score_list_features = predictor_pb2.SparseFeatures(num_features=0) embedding_features = predictor_pb2.FloatFeatures(num_features=0) unary_features = predictor_pb2.SparseFeatures(num_features=0) return predictor_pb2.PredictionRequest( batch_size=args.batch_size, float_features=float_features, id_list_features=id_list_features, id_score_list_features=id_score_list_features, embedding_features=embedding_features, unary_features=unary_features, )
null
8,841
import argparse import sys from typing import List from dlrm_predict import DLRMModelConfig, DLRMPredictFactory from torch.package import PackageExporter from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.inference.model_packager import PredictFactoryPackager DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)] DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)] def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description="torchrec dlrm model packager") parser.add_argument( "--num_embeddings", type=int, default=100_000, help="max_ind_size. The number of embeddings in each embedding table. Defaults" " to 100_000 if num_embeddings_per_feature is not supplied.", ) parser.add_argument( "--num_embeddings_per_feature", type=str, default="45833188,36746,17245,7413,20243,3,7114,1441,62,29275261,1572176,345138," "10,2209,11267,128,4,974,14,48937457,11316796,40094537,452104,12606,104,35", help="Comma separated max_ind_size per sparse feature. The number of embeddings" " in each embedding table. 26 values are expected for the Criteo dataset.", ) parser.add_argument( "--sparse_feature_names", type=str, default=",".join(DEFAULT_CAT_NAMES), help="Comma separated names of the sparse features.", ) parser.add_argument( "--dense_arch_layer_sizes", type=str, default="512,256,64", help="Comma separated layer sizes for dense arch.", ) parser.add_argument( "--over_arch_layer_sizes", type=str, default="512,512,256,1", help="Comma separated layer sizes for over arch.", ) parser.add_argument( "--embedding_dim", type=int, default=64, help="Size of each embedding.", ) parser.add_argument( "--num_dense_features", type=int, default=len(DEFAULT_INT_NAMES), help="Number of dense features.", ) parser.add_argument( "--output_path", type=str, help="Output path of model package.", ) return parser.parse_args(argv)
null
8,842
import os from typing import List, Optional import torch from torch import distributed as dist from torch.distributed.elastic.multiprocessing.errors import record from torch.distributed.optim import ( _apply_optimizer_in_backward as apply_optimizer_in_backward, ) from torch.utils.data import IterableDataset from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.datasets.random import RandomRecDataset from torchrec.distributed import TrainPipelineSparseDist from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder from torchrec.distributed.fbgemm_qcomm_codec import ( CommType, get_qcomm_codecs_registry, QCommsConfig, ) from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.models.dlrm import DLRM, DLRMTrain from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.optim.keyed import KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter from torchrec.optim.rowwise_adagrad import RowWiseAdagrad from tqdm import tqdm def _get_random_dataset( num_embeddings: int, batch_size: int = 32, ) -> IterableDataset: return RandomRecDataset( keys=DEFAULT_CAT_NAMES, batch_size=batch_size, hash_size=num_embeddings, ids_per_feature=1, num_dense=len(DEFAULT_INT_NAMES), ) DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)] DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)] class EmbeddingBagCollectionSharder(BaseEmbeddingSharder[EmbeddingBagCollection]): """ This implementation uses non-fused `EmbeddingBagCollection` """ def shard( self, module: EmbeddingBagCollection, params: Dict[str, ParameterSharding], env: ShardingEnv, device: Optional[torch.device] = None, ) -> ShardedEmbeddingBagCollection: return ShardedEmbeddingBagCollection( module=module, table_name_to_parameter_sharding=params, env=env, fused_params=self.fused_params, device=device, qcomm_codecs_registry=self.qcomm_codecs_registry, ) def shardable_parameters( self, module: EmbeddingBagCollection ) -> Dict[str, nn.Parameter]: return { name.split(".")[0]: param for name, param in module.embedding_bags.named_parameters() } def module_type(self) -> Type[EmbeddingBagCollection]: return EmbeddingBagCollection class CommType(Enum): FP32 = "fp32" FP16 = "fp16" BF16 = "bf16" FP8 = "fp8" INT8 = "int8" def __str__(self) -> str: return self.value class QCommsConfig: """ Quantization configs for the AllToAll and ReduceScatter communication modules used in sharding. """ # Quantization of comm modules in the forward pass forward_precision: CommType = CommType.FP32 # Quantization of comm modules in the backward pass backward_precision: CommType = CommType.FP32 # For supported precisions (currently FP16), scale the gradient of the decoder and # divide the gradient of the encoder by this value. In some cases this can provide additional numerical stability. forward_loss_scale: Optional[float] = None backward_loss_scale: Optional[float] = None fp8_quantize_dim: Optional[int] = None fp8_quantize_dim_bwd: Optional[int] = None fp8_bwd_uses_143: Optional[bool] = False def __post_init__(self) -> None: if ( self.forward_precision != CommType.FP8 and self.backward_precision != CommType.FP8 and ( self.fp8_quantize_dim is not None or self.fp8_quantize_dim_bwd is not None ) ): raise ValueError( f"fp8_quantize_dim is set to {self.fp8_quantize_dim} and fp8_quantize_dim_bwd is set to {self.fp8_quantize_dim_bwd} but no FP8 precision is found in forward or backward precisions" ) if ( self.backward_precision == CommType.FP8 and self.fp8_quantize_dim_bwd is None ): self.fp8_quantize_dim_bwd = self.fp8_quantize_dim logger.warning( f"No override of FP8 bwd row dim, using general FP8 row dim for backward: {self.fp8_quantize_dim_bwd} " ) def get_qcomm_codecs_registry( qcomms_config: QCommsConfig, comm_ops: Optional[List[CommOp]] = None, device: Optional[torch.device] = None, ) -> Optional[Dict[str, QuantizedCommCodecs]]: """ This method constructs QuantizedCommCodecs from a given QCommConfig. It assumes that you want to use the same QComm configs for all comm-types passed in. Some quantization schemes are not supported for some backends (such as BF16 for gloo/cpu, and FP8 for reduce scatter on nccl). This scheme will provide some fallback logic and print a warning. Args: qcomms_config (QCommsConfig): QCommsConfig to construct FBGEMMQuantizedCommCodecs from comm_ops (Optional[List[CommOp]]): List of CommOps to enter into the registry device (torch.device): Backend comms will run on. Example:: qcomm_codces_registry = get_qcomm_codecs_registry( qcomms_config=QCommsConfig(forward_precision=FP16, backward_precision=BF16), device=torch.device("cuda")) """ if ( qcomms_config.forward_precision == CommType.FP32 and qcomms_config.backward_precision == CommType.FP32 ): return None if device is None: device = torch.device("cuda") qcomm_codecs_registry = {} if comm_ops is None: comm_ops = [ CommOp.POOLED_EMBEDDINGS_ALL_TO_ALL, CommOp.POOLED_EMBEDDINGS_REDUCE_SCATTER, CommOp.SEQUENCE_EMBEDDINGS_ALL_TO_ALL, ] for comm_op in comm_ops: qcomm_config_copy = copy.deepcopy(qcomms_config) # TODO: On H100, FP8 types might be natively supported, in which case we should check for that arch type and not fallback. if comm_op == CommOp.POOLED_EMBEDDINGS_REDUCE_SCATTER: if qcomm_config_copy.forward_precision == CommType.FP8: logger.warning( "FP8 is not supported for reduce scatter's forward - falling back to FP16" ) qcomm_config_copy.forward_precision = CommType.FP16 if qcomm_config_copy.backward_precision == CommType.FP8: logger.warning( "FP8 is not supported for reduce scatter's backward - falling back to BF16" ) qcomm_config_copy.backward_precision = CommType.BF16 if device.type == "cpu": if qcomm_config_copy.forward_precision == CommType.BF16: logger.warning( "BF16 is not for forward_precision is not supported on GLOO - falling back to FP16." ) qcomm_config_copy.forward_precision = CommType.FP16 if qcomm_config_copy.backward_precision == CommType.BF16: logger.warning( "BF16 is not for backward_precision is not supported on GLOO - falling back to FP16." ) qcomm_config_copy.backward_precision = CommType.FP16 qcomm_codecs_registry[comm_op.name] = get_qcomm_codecs(qcomm_config_copy) return qcomm_codecs_registry class DistributedModelParallel(nn.Module, FusedOptimizerModule): """ Entry point to model parallelism. Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `EmbeddingBagCollectionSharder()`. init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay parameter initialization until the first forward pass. Pass `True` to delay initialization of data parallel modules. Do first forward pass and then call DistributedModelParallel.init_data_parallel(). init_parameters (bool): initialize parameters for modules still on meta device. data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data parallel modules. Example:: def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = DistributedModelParallel(m) m.apply(init_weights) """ def __init__( self, module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_data_parallel: bool = True, init_parameters: bool = True, data_parallel_wrapper: Optional[DataParallelWrapper] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self.init_parameters = init_parameters self._ddp_wrapped: bool = False if env is None: pg = dist.GroupMember.WORLD assert pg is not None, "Process group is not initialized" env = ShardingEnv.from_process_group(pg) self._env: ShardingEnv = env if device is None: device = torch.device("cpu") self.device: torch.device = device if sharders is None: sharders = get_default_sharders() self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = { sharder.module_type: sharder for sharder in sharders } if data_parallel_wrapper is None: data_parallel_wrapper = DefaultDataParallelWrapper() self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper if plan is None: planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=get_local_size(self._env.world_size), world_size=self._env.world_size, compute_device=self.device.type, ) ) pg = self._env.process_group if pg is not None: plan = planner.collective_plan(module, sharders, pg) else: plan = planner.plan(module, sharders) self._plan: ShardingPlan = plan self._dmp_wrapped_module: nn.Module = self._init_dmp(module) self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module) if init_parameters: self._init_parameters(self.module) if init_data_parallel: self.init_data_parallel() def module(self) -> nn.Module: """ Property to directly access sharded module, which will not be wrapped in DDP, FSDP, DMP, or any other parallelism wrappers. """ return get_unwrapped_module(self) def module(self, value: nn.Module) -> None: if isinstance(self.module, DistributedDataParallel) or isinstance( self.module, FullyShardedDataParallel ): raise RuntimeError( "module can't be set after calling init_data_parallel(...)" ) else: self._dmp_wrapped_module = value # pyre-ignore [2, 3] def forward(self, *args, **kwargs) -> Any: return self._dmp_wrapped_module(*args, **kwargs) def init_data_parallel(self) -> None: """ See init_data_parallel c-tor argument for usage. It's safe to call this method multiple times. """ if not self._ddp_wrapped: # Allocate any 'meta' tensors if self.init_parameters: self._init_parameters(self._dmp_wrapped_module) self._data_parallel_wrapper.wrap(self, self._env, self.device) self._ddp_wrapped = True def copy( self, device: torch.device, ) -> "DistributedModelParallel": """ Recursively copy submodules to new device by calling per-module customized copy process, since some modules needs to use the original references (like `ShardedModule` for inference). """ assert isinstance(device, torch.device) # dmp code deep copy with sharded_model_copy(device=None): copy_dmp = copy.deepcopy(self) # tensor resident module deep copy copy_dmp_wrapped_module = copy_to_device( self._dmp_wrapped_module, self.device, device ) copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module return copy_dmp def _init_dmp(self, module: nn.Module) -> nn.Module: return self._shard_modules_impl(module) def _init_optim(self, module: nn.Module) -> CombinedOptimizer: # pyre-ignore [6] return CombinedOptimizer(self._fused_optim_impl(module, [])) def _fused_optim_impl( self, module: nn.Module, fused_optims: List[Tuple[str, KeyedOptimizer]], path: str = "", ) -> List[Tuple[str, KeyedOptimizer]]: if isinstance(module, FusedOptimizerModule): fused_optims.append((path, module.fused_optimizer)) return fused_optims for name, child in module.named_children(): self._fused_optim_impl( child, fused_optims, path + "." + name if path else name, ) return fused_optims def _shard_modules_impl( self, module: nn.Module, path: str = "", ) -> nn.Module: # pre-sharded module if isinstance(module, ShardedModule): return module # shardable module module_sharding_plan = self._plan.get_plan_for_module(path) if module_sharding_plan: sharder_key = type(module) module = self._sharder_map[sharder_key].shard( module, module_sharding_plan, self._env, self.device, ) return module for name, child in module.named_children(): child = self._shard_modules_impl( child, path + "." + name if path else name, ) setattr(module, name, child) return module def _init_parameters(self, module: nn.Module) -> None: def init_parameters(module: nn.Module) -> None: # Allocate parameters and buffers if over 'meta' device. has_meta_param = False for name, param in module._parameters.items(): if isinstance(param, torch.Tensor) and param.device.type == "meta": module._parameters[name] = nn.Parameter( torch.empty_like(param, device=self.device), requires_grad=param.requires_grad, ) has_meta_param = True for name, buffer in module._buffers.items(): if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta": module._buffers[name] = torch.zeros_like(buffer, device=self.device) # Init parameters if at least one parameter is over 'meta' device. if has_meta_param and hasattr(module, "reset_parameters"): module.reset_parameters() module.apply(init_parameters) def sparse_grad_parameter_names( self, destination: Optional[List[str]] = None, prefix: str = "" ) -> List[str]: destination = [] if destination is None else destination return self._sparse_grad_parameter_names(self.module, destination, prefix) def _sparse_grad_parameter_names( self, module: nn.Module, destination: List[str], prefix: str = "" ) -> List[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): pass elif isinstance(module, nn.Embedding): if module.sparse: destination.append(append_prefix(prefix, "weight")) elif isinstance(module, nn.EmbeddingBag): if module.sparse: destination.append(append_prefix(prefix, "weight")) else: for name, child in module.named_children(): self._sparse_grad_parameter_names( child, destination, append_prefix(prefix, name) ) return destination # pyre-ignore [14] def state_dict( self, destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: state_dict = get_module(self).state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix + _DDP_STATE_DICT_PREFIX ) add_prefix_to_state_dict(state_dict, prefix) return state_dict # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict( self, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: return self._load_state_dict(self, state_dict, prefix, strict) def _load_state_dict( self, module: nn.Module, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: missing_keys = [] unexpected_keys = [] module = get_module(module) if isinstance(module, DistributedDataParallel): torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix ) add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX) if isinstance(module, ShardedModule): return module.load_state_dict(state_dict, strict=strict) else: module._load_from_state_dict( state_dict, prefix, {}, strict, missing_keys, unexpected_keys, [] ) for name, child in module.named_children(): m_keys, u_keys = self._load_state_dict( child, filter_state_dict(state_dict, prefix + name), "", strict, ) missing_keys.extend(m_keys) unexpected_keys.extend(u_keys) return _IncompatibleKeys( missing_keys=missing_keys, unexpected_keys=unexpected_keys ) def _named_parameters( self, module: nn.Module, prefix: str = "", recurse: bool = True, strip_ddp: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: if strip_ddp: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_parameters(prefix, recurse) else: yield from module.named_parameters(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_parameters( child, append_prefix(prefix, name), recurse, strip_ddp, ) def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def bare_named_parameters( self, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue memo.add(param) yield key, param def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.sharded_parameter_names(prefix) else: for name, child in module.named_children(): yield from DistributedModelParallel._sharded_parameter_names( child, append_prefix(prefix, name) ) def _named_buffers( self, module: nn.Module, prefix: str = "", recurse: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_buffers(prefix, recurse) else: yield from module.named_buffers(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_buffers( child, append_prefix(prefix, name), recurse ) def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: gen = self._named_buffers(self.module, prefix, recurse) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def fused_optimizer(self) -> KeyedOptimizer: return self._optim def plan(self) -> ShardingPlan: return self._plan def _reset_parameters(module: nn.Module) -> None: for _, m in module.named_modules(): if hasattr(m, "reset_parameters"): m.reset_parameters() class DLRM(nn.Module): """ Recsys model from "Deep Learning Recommendation Model for Personalization and Recommendation Systems" (https://arxiv.org/abs/1906.00091). Processes sparse features by learning pooled embeddings for each feature. Learns the relationship between dense features and sparse features by projecting dense features into the same embedding space. Also, learns the pairwise relationships between sparse features. The module assumes all sparse features have the same embedding dimension (i.e. each EmbeddingBagConfig uses the same embedding_dim). The following notation is used throughout the documentation for the models: * F: number of sparse features * D: embedding_dimension of sparse features * B: batch size * num_features: number of dense features Args: embedding_bag_collection (EmbeddingBagCollection): collection of embedding bags used to define `SparseArch`. dense_in_features (int): the dimensionality of the dense input features. dense_arch_layer_sizes (List[int]): the layer sizes for the `DenseArch`. over_arch_layer_sizes (List[int]): the layer sizes for the `OverArch`. The output dimension of the `InteractionArch` should not be manually specified here. dense_device (Optional[torch.device]): default compute device. Example:: B = 2 D = 8 eb1_config = EmbeddingBagConfig( name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1"] ) eb2_config = EmbeddingBagConfig( name="t2", embedding_dim=D, num_embeddings=100, feature_names=["f2"], ) ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config]) model = DLRM( embedding_bag_collection=ebc, dense_in_features=100, dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1], ) features = torch.rand((B, 100)) # 0 1 # 0 [1,2] [4,5] # 1 [4,3] [2,9] # ^ # feature sparse_features = KeyedJaggedTensor.from_offsets_sync( keys=["f1", "f2"], values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]), offsets=torch.tensor([0, 2, 4, 6, 8]), ) logits = model( dense_features=features, sparse_features=sparse_features, ) """ def __init__( self, embedding_bag_collection: EmbeddingBagCollection, dense_in_features: int, dense_arch_layer_sizes: List[int], over_arch_layer_sizes: List[int], dense_device: Optional[torch.device] = None, ) -> None: super().__init__() assert ( len(embedding_bag_collection.embedding_bag_configs()) > 0 ), "At least one embedding bag is required" for i in range(1, len(embedding_bag_collection.embedding_bag_configs())): conf_prev = embedding_bag_collection.embedding_bag_configs()[i - 1] conf = embedding_bag_collection.embedding_bag_configs()[i] assert ( conf_prev.embedding_dim == conf.embedding_dim ), "All EmbeddingBagConfigs must have the same dimension" embedding_dim: int = embedding_bag_collection.embedding_bag_configs()[ 0 ].embedding_dim if dense_arch_layer_sizes[-1] != embedding_dim: raise ValueError( f"embedding_bag_collection dimension ({embedding_dim}) and final dense " "arch layer size ({dense_arch_layer_sizes[-1]}) must match." ) self.sparse_arch: SparseArch = SparseArch(embedding_bag_collection) num_sparse_features: int = len(self.sparse_arch.sparse_feature_names) self.dense_arch = DenseArch( in_features=dense_in_features, layer_sizes=dense_arch_layer_sizes, device=dense_device, ) self.inter_arch = InteractionArch( num_sparse_features=num_sparse_features, ) over_in_features: int = ( embedding_dim + choose(num_sparse_features, 2) + num_sparse_features ) self.over_arch = OverArch( in_features=over_in_features, layer_sizes=over_arch_layer_sizes, device=dense_device, ) def forward( self, dense_features: torch.Tensor, sparse_features: KeyedJaggedTensor, ) -> torch.Tensor: """ Args: dense_features (torch.Tensor): the dense features. sparse_features (KeyedJaggedTensor): the sparse features. Returns: torch.Tensor: logits. """ embedded_dense = self.dense_arch(dense_features) embedded_sparse = self.sparse_arch(sparse_features) concatenated_dense = self.inter_arch( dense_features=embedded_dense, sparse_features=embedded_sparse ) logits = self.over_arch(concatenated_dense) return logits class DLRMTrain(nn.Module): """ nn.Module to wrap DLRM model to use with train_pipeline. DLRM Recsys model from "Deep Learning Recommendation Model for Personalization and Recommendation Systems" (https://arxiv.org/abs/1906.00091). Processes sparse features by learning pooled embeddings for each feature. Learns the relationship between dense features and sparse features by projecting dense features into the same embedding space. Also, learns the pairwise relationships between sparse features. The module assumes all sparse features have the same embedding dimension (i.e, each EmbeddingBagConfig uses the same embedding_dim) Args: dlrm_module: DLRM module (DLRM or DLRM_Projection or DLRM_DCN) to be used in training Example:: ebc = EmbeddingBagCollection(config=ebc_config) dlrm_module = DLRM( embedding_bag_collection=ebc, dense_in_features=100, dense_arch_layer_sizes=[20], over_arch_layer_sizes=[5, 1], ) dlrm_model = DLRMTrain(dlrm_module) """ def __init__( self, dlrm_module: DLRM, ) -> None: super().__init__() self.model = dlrm_module self.loss_fn: nn.Module = nn.BCEWithLogitsLoss() def forward( self, batch: Batch ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: """ Args: batch: batch used with criteo and random data from torchrec.datasets Returns: Tuple[loss, Tuple[loss, logits, labels]] """ logits = self.model(batch.dense_features, batch.sparse_features) logits = logits.squeeze(-1) loss = self.loss_fn(logits, batch.labels.float()) return loss, (loss.detach(), logits.detach(), batch.labels.detach()) class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM class EmbeddingBagCollection(EmbeddingBagCollectionInterface): """ EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B * (F * D)] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. device (Optional[torch.device]): default compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"] ) ebc = EmbeddingBagCollection(tables=[table_0, table_1]) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783], [ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011], [-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) tensor([0, 3, 7]) """ def __init__( self, tables: List[EmbeddingBagConfig], is_weighted: bool = False, device: Optional[torch.device] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}") self._is_weighted = is_weighted self.embedding_bags: nn.ModuleDict = nn.ModuleDict() self._embedding_bag_configs = tables self._lengths_per_embedding: List[int] = [] self._dtypes: List[int] = [] table_names = set() for embedding_config in tables: if embedding_config.name in table_names: raise ValueError(f"Duplicate table name {embedding_config.name}") table_names.add(embedding_config.name) dtype = ( torch.float32 if embedding_config.data_type == DataType.FP32 else torch.float16 ) self.embedding_bags[embedding_config.name] = nn.EmbeddingBag( num_embeddings=embedding_config.num_embeddings, embedding_dim=embedding_config.embedding_dim, mode=pooling_type_to_str(embedding_config.pooling), device=device, include_last_offset=True, dtype=dtype, ) if device is None: device = self.embedding_bags[embedding_config.name].weight.device self._dtypes.append(embedding_config.data_type.value) if not embedding_config.feature_names: embedding_config.feature_names = [embedding_config.name] self._lengths_per_embedding.extend( len(embedding_config.feature_names) * [embedding_config.embedding_dim] ) self._device: torch.device = device or torch.device("cpu") self._embedding_names: List[str] = [ embedding for embeddings in get_embedding_names_by_table(tables) for embedding in embeddings ] self._feature_names: List[List[str]] = [table.feature_names for table in tables] self.reset_parameters() def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ flat_feature_names: List[str] = [] for names in self._feature_names: flat_feature_names.extend(names) inverse_indices = reorder_inverse_indices( inverse_indices=features.inverse_indices_or_none(), feature_names=flat_feature_names, ) pooled_embeddings: List[torch.Tensor] = [] feature_dict = features.to_dict() for i, embedding_bag in enumerate(self.embedding_bags.values()): for feature_name in self._feature_names[i]: f = feature_dict[feature_name] per_sample_weights: Optional[torch.Tensor] = None if self._is_weighted: per_sample_weights = ( f.weights().half() if self._dtypes[i] == DataType.FP16.value else f.weights() ) res = embedding_bag( input=f.values(), offsets=f.offsets(), per_sample_weights=( per_sample_weights if self._is_weighted else None ), ).float() pooled_embeddings.append(res) return KeyedTensor( keys=self._embedding_names, values=process_pooled_embeddings( pooled_embeddings=pooled_embeddings, inverse_indices=inverse_indices, ), length_per_key=self._lengths_per_embedding, ) def is_weighted(self) -> bool: return self._is_weighted def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def device(self) -> torch.device: return self._device def reset_parameters(self) -> None: if (isinstance(self.device, torch.device) and self.device.type == "meta") or ( isinstance(self.device, str) and self.device == "meta" ): return # Initialize embedding bags weights with init_fn for table_config in self._embedding_bag_configs: assert table_config.init_fn is not None param = self.embedding_bags[f"{table_config.name}"].weight # pyre-ignore table_config.init_fn(param) class KeyedOptimizerWrapper(KeyedOptimizer): """ Takes a dict of parameters and exposes state_dict by parameter key. Convenience wrapper to take in optim_factory callable to create KeyedOptimizer """ def __init__( self, params: Mapping[str, Union[torch.Tensor, ShardedTensor]], optim_factory: OptimizerFactory, ) -> None: self._optimizer: optim.Optimizer = optim_factory(list(params.values())) super().__init__(params, self._optimizer.state, self._optimizer.param_groups) def zero_grad(self, set_to_none: bool = False) -> None: self._optimizer.zero_grad() # pyre-ignore [2] def step(self, closure: Any = None) -> None: self._optimizer.step(closure=closure) def in_backward_optimizer_filter( named_parameters: Iterator[Tuple[str, nn.Parameter]], include: bool = False ) -> Iterator[Tuple[str, nn.Parameter]]: """ Filters named_parameters for whether they are or or not params that use the in_backward_optimizer. Note: This only supports the in_backward_optimizer from PT-D's API. The torchrec's equivalent API is deprecated and is not supported. Args: named_parameters(Iterator[Tuple[str, nn.Parameter]]): named_parameters include(bool): If true, only yields params with in_backward_optimizer. If false, returns the outside set Defaults to include params that are not in_backward (False) """ for fqn, param in named_parameters: if hasattr(param, "_in_backward_optimizers") == include: yield fqn, param class RowWiseAdagrad(Optimizer): r"""Implements Row wise Adagrad algorithm. This is an extension of the Adagrad algorithm https://github.com/pytorch/pytorch/blob/master/torch/optim/adagrad.py, for use with EmbeddingBag parameters, where we want the adaptive learning rate to be the same within an embedding row. Since we only need to store state for an embedding row, rather than every single parameter, we can have drastic memory savings (factor of embedding_dim). Note that this implementation does not currently support sparse gradients. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) lr_decay (float, optional): learning rate decay (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-10) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) """ def __init__( self, params: Iterable[torch.nn.Parameter], lr: float = 1e-2, lr_decay: float = 0.0, weight_decay: float = 0.0, initial_accumulator_value: float = 0.0, eps: float = 1e-10, *, maximize: bool = False, # pyre-ignore **unused, ) -> None: if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= lr_decay: raise ValueError("Invalid lr_decay value: {}".format(lr_decay)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= initial_accumulator_value: raise ValueError( "Invalid initial_accumulator_value value: {}".format( initial_accumulator_value ) ) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) defaults = dict( lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value, maximize=maximize, ) super().__init__(params, defaults) for group in self.param_groups: for p in group["params"]: state = self.state[p] state["step"] = torch.tensor(0.0) init_value = ( complex(initial_accumulator_value, initial_accumulator_value) if torch.is_complex(p) else initial_accumulator_value ) state["sum"] = ( # pyre-fixme[28]: Unexpected keyword argument `axis`. torch.full_like(p, init_value, memory_format=torch.preserve_format) .mean(axis=1) .view(-1, 1) ) def __setstate__(self, state: Dict[str, Any]) -> None: super().__setstate__(state) for group in self.param_groups: group.setdefault("maximize", False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor( state_values[0]["step"] ) if not step_is_tensor: for s in state_values: s["step"] = torch.tensor(float(s["step"])) def share_memory(self) -> None: for group in self.param_groups: for p in group["params"]: state = self.state[p] state["sum"].share_memory_() # pyre-ignore def step(self, closure=None) -> torch.Tensor: """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] state_sums = [] state_steps = [] for p in group["params"]: if p.grad is not None: params_with_grad.append(p) grads.append(p.grad) state = self.state[p] state_sums.append(state["sum"]) state_steps.append(state["step"]) adagrad( params_with_grad, grads, state_sums, state_steps, lr=group["lr"], weight_decay=group["weight_decay"], lr_decay=group["lr_decay"], eps=group["eps"], maximize=group["maximize"], ) return loss The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( num_embeddings: int = 1024**2, embedding_dim: int = 128, dense_arch_layer_sizes: Optional[List[int]] = None, over_arch_layer_sizes: Optional[List[int]] = None, learning_rate: float = 0.1, num_iterations: int = 1000, qcomm_forward_precision: Optional[CommType] = CommType.FP16, qcomm_backward_precision: Optional[CommType] = CommType.BF16, ) -> None` to solve the following problem: Constructs and trains a DLRM model (using random dummy data). Each script is run on each process (rank) in SPMD fashion. The embedding layers will be sharded across available ranks qcomm_forward_precision: Compression used in forwards pass. FP16 is the recommended usage. INT8 and FP8 are in development, but feel free to try them out. qcomm_backward_precision: Compression used in backwards pass. We recommend using BF16 to ensure training stability. The effects of quantized comms will be most apparent in large training jobs across multiple nodes where inter host communication is expensive. Here is the function: def train( num_embeddings: int = 1024**2, embedding_dim: int = 128, dense_arch_layer_sizes: Optional[List[int]] = None, over_arch_layer_sizes: Optional[List[int]] = None, learning_rate: float = 0.1, num_iterations: int = 1000, qcomm_forward_precision: Optional[CommType] = CommType.FP16, qcomm_backward_precision: Optional[CommType] = CommType.BF16, ) -> None: """ Constructs and trains a DLRM model (using random dummy data). Each script is run on each process (rank) in SPMD fashion. The embedding layers will be sharded across available ranks qcomm_forward_precision: Compression used in forwards pass. FP16 is the recommended usage. INT8 and FP8 are in development, but feel free to try them out. qcomm_backward_precision: Compression used in backwards pass. We recommend using BF16 to ensure training stability. The effects of quantized comms will be most apparent in large training jobs across multiple nodes where inter host communication is expensive. """ if dense_arch_layer_sizes is None: dense_arch_layer_sizes = [64, embedding_dim] if over_arch_layer_sizes is None: over_arch_layer_sizes = [64, 1] # Init process_group , device, rank, backend rank = int(os.environ["LOCAL_RANK"]) if torch.cuda.is_available(): device: torch.device = torch.device(f"cuda:{rank}") backend = "nccl" torch.cuda.set_device(device) else: device: torch.device = torch.device("cpu") backend = "gloo" dist.init_process_group(backend=backend) # Construct DLRM module eb_configs = [ EmbeddingBagConfig( name=f"t_{feature_name}", embedding_dim=embedding_dim, num_embeddings=num_embeddings, feature_names=[feature_name], ) for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES) ] dlrm_model = DLRM( embedding_bag_collection=EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta") ), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=dense_arch_layer_sizes, over_arch_layer_sizes=over_arch_layer_sizes, dense_device=device, ) train_model = DLRMTrain(dlrm_model) apply_optimizer_in_backward( RowWiseAdagrad, train_model.model.sparse_arch.parameters(), {"lr": learning_rate}, ) qcomm_codecs_registry = ( get_qcomm_codecs_registry( qcomms_config=QCommsConfig( # pyre-ignore forward_precision=qcomm_forward_precision, # pyre-ignore backward_precision=qcomm_backward_precision, ) ) if backend == "nccl" else None ) sharder = EmbeddingBagCollectionSharder(qcomm_codecs_registry=qcomm_codecs_registry) model = DistributedModelParallel( module=train_model, device=device, # pyre-ignore sharders=[sharder], ) non_fused_optimizer = KeyedOptimizerWrapper( dict(in_backward_optimizer_filter(model.named_parameters())), lambda params: torch.optim.Adagrad(params, lr=learning_rate), ) # Overlap comm/compute/device transfer during training through train_pipeline train_pipeline = TrainPipelineSparseDist( model, non_fused_optimizer, device, ) # train model train_iterator = iter( _get_random_dataset( num_embeddings=num_embeddings, ) ) for _ in tqdm(range(int(num_iterations)), mininterval=5.0): train_pipeline.progress(train_iterator)
Constructs and trains a DLRM model (using random dummy data). Each script is run on each process (rank) in SPMD fashion. The embedding layers will be sharded across available ranks qcomm_forward_precision: Compression used in forwards pass. FP16 is the recommended usage. INT8 and FP8 are in development, but feel free to try them out. qcomm_backward_precision: Compression used in backwards pass. We recommend using BF16 to ensure training stability. The effects of quantized comms will be most apparent in large training jobs across multiple nodes where inter host communication is expensive.
8,843
import os import torch import torch.nn.functional as F from torch.distributed import all_reduce, get_rank, get_world_size, init_process_group The provided code snippet includes necessary dependencies for implementing the `compute_world_size` function. Write a Python function `def compute_world_size() -> int` to solve the following problem: Dummy script to compute world_size. Meant to test if can run Ray + Pytorch DDP Here is the function: def compute_world_size() -> int: "Dummy script to compute world_size. Meant to test if can run Ray + Pytorch DDP" rank = int(os.getenv("RANK")) # pyre-ignore[6] world_size = int(os.getenv("WORLD_SIZE")) # pyre-ignore[6] master_port = int(os.getenv("MASTER_PORT")) # pyre-ignore[6] master_addr = os.getenv("MASTER_ADDR") backend = "gloo" print(f"initializing `{backend}` process group") init_process_group( # pyre-ignore[16] backend=backend, init_method=f"tcp://{master_addr}:{master_port}", rank=rank, world_size=world_size, ) print("successfully initialized process group") rank = get_rank() # pyre-ignore[16] world_size = get_world_size() # pyre-ignore[16] t = F.one_hot(torch.tensor(rank), num_classes=world_size) all_reduce(t) # pyre-ignore[16] computed_world_size = int(torch.sum(t).item()) print( f"rank: {rank}, actual world_size: {world_size}, computed world_size: {computed_world_size}" ) return computed_world_size
Dummy script to compute world_size. Meant to test if can run Ray + Pytorch DDP
8,844
import os from typing import cast, List, Optional import torch from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType from torch import distributed as dist, nn from torch.utils.data import DataLoader from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.datasets.random import RandomRecDataset from torchrec.distributed import TrainPipelineSparseDist from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.types import ModuleSharder from torchrec.models.dlrm import DLRM, DLRMTrain from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.optim.keyed import KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter from tqdm import tqdm def _get_random_dataloader( num_embeddings: int, batch_size: int = 32, pin_memory: bool = False ) -> DataLoader: return DataLoader( RandomRecDataset( keys=DEFAULT_CAT_NAMES, batch_size=batch_size, hash_size=num_embeddings, ids_per_feature=1, num_dense=len(DEFAULT_INT_NAMES), ), batch_size=None, batch_sampler=None, pin_memory=pin_memory, num_workers=0, ) DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)] DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)] class EmbeddingBagCollectionSharder(BaseEmbeddingSharder[EmbeddingBagCollection]): """ This implementation uses non-fused `EmbeddingBagCollection` """ def shard( self, module: EmbeddingBagCollection, params: Dict[str, ParameterSharding], env: ShardingEnv, device: Optional[torch.device] = None, ) -> ShardedEmbeddingBagCollection: return ShardedEmbeddingBagCollection( module=module, table_name_to_parameter_sharding=params, env=env, fused_params=self.fused_params, device=device, qcomm_codecs_registry=self.qcomm_codecs_registry, ) def shardable_parameters( self, module: EmbeddingBagCollection ) -> Dict[str, nn.Parameter]: return { name.split(".")[0]: param for name, param in module.embedding_bags.named_parameters() } def module_type(self) -> Type[EmbeddingBagCollection]: return EmbeddingBagCollection class DistributedModelParallel(nn.Module, FusedOptimizerModule): """ Entry point to model parallelism. Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `EmbeddingBagCollectionSharder()`. init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay parameter initialization until the first forward pass. Pass `True` to delay initialization of data parallel modules. Do first forward pass and then call DistributedModelParallel.init_data_parallel(). init_parameters (bool): initialize parameters for modules still on meta device. data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data parallel modules. Example:: def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = DistributedModelParallel(m) m.apply(init_weights) """ def __init__( self, module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_data_parallel: bool = True, init_parameters: bool = True, data_parallel_wrapper: Optional[DataParallelWrapper] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self.init_parameters = init_parameters self._ddp_wrapped: bool = False if env is None: pg = dist.GroupMember.WORLD assert pg is not None, "Process group is not initialized" env = ShardingEnv.from_process_group(pg) self._env: ShardingEnv = env if device is None: device = torch.device("cpu") self.device: torch.device = device if sharders is None: sharders = get_default_sharders() self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = { sharder.module_type: sharder for sharder in sharders } if data_parallel_wrapper is None: data_parallel_wrapper = DefaultDataParallelWrapper() self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper if plan is None: planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=get_local_size(self._env.world_size), world_size=self._env.world_size, compute_device=self.device.type, ) ) pg = self._env.process_group if pg is not None: plan = planner.collective_plan(module, sharders, pg) else: plan = planner.plan(module, sharders) self._plan: ShardingPlan = plan self._dmp_wrapped_module: nn.Module = self._init_dmp(module) self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module) if init_parameters: self._init_parameters(self.module) if init_data_parallel: self.init_data_parallel() def module(self) -> nn.Module: """ Property to directly access sharded module, which will not be wrapped in DDP, FSDP, DMP, or any other parallelism wrappers. """ return get_unwrapped_module(self) def module(self, value: nn.Module) -> None: if isinstance(self.module, DistributedDataParallel) or isinstance( self.module, FullyShardedDataParallel ): raise RuntimeError( "module can't be set after calling init_data_parallel(...)" ) else: self._dmp_wrapped_module = value # pyre-ignore [2, 3] def forward(self, *args, **kwargs) -> Any: return self._dmp_wrapped_module(*args, **kwargs) def init_data_parallel(self) -> None: """ See init_data_parallel c-tor argument for usage. It's safe to call this method multiple times. """ if not self._ddp_wrapped: # Allocate any 'meta' tensors if self.init_parameters: self._init_parameters(self._dmp_wrapped_module) self._data_parallel_wrapper.wrap(self, self._env, self.device) self._ddp_wrapped = True def copy( self, device: torch.device, ) -> "DistributedModelParallel": """ Recursively copy submodules to new device by calling per-module customized copy process, since some modules needs to use the original references (like `ShardedModule` for inference). """ assert isinstance(device, torch.device) # dmp code deep copy with sharded_model_copy(device=None): copy_dmp = copy.deepcopy(self) # tensor resident module deep copy copy_dmp_wrapped_module = copy_to_device( self._dmp_wrapped_module, self.device, device ) copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module return copy_dmp def _init_dmp(self, module: nn.Module) -> nn.Module: return self._shard_modules_impl(module) def _init_optim(self, module: nn.Module) -> CombinedOptimizer: # pyre-ignore [6] return CombinedOptimizer(self._fused_optim_impl(module, [])) def _fused_optim_impl( self, module: nn.Module, fused_optims: List[Tuple[str, KeyedOptimizer]], path: str = "", ) -> List[Tuple[str, KeyedOptimizer]]: if isinstance(module, FusedOptimizerModule): fused_optims.append((path, module.fused_optimizer)) return fused_optims for name, child in module.named_children(): self._fused_optim_impl( child, fused_optims, path + "." + name if path else name, ) return fused_optims def _shard_modules_impl( self, module: nn.Module, path: str = "", ) -> nn.Module: # pre-sharded module if isinstance(module, ShardedModule): return module # shardable module module_sharding_plan = self._plan.get_plan_for_module(path) if module_sharding_plan: sharder_key = type(module) module = self._sharder_map[sharder_key].shard( module, module_sharding_plan, self._env, self.device, ) return module for name, child in module.named_children(): child = self._shard_modules_impl( child, path + "." + name if path else name, ) setattr(module, name, child) return module def _init_parameters(self, module: nn.Module) -> None: def init_parameters(module: nn.Module) -> None: # Allocate parameters and buffers if over 'meta' device. has_meta_param = False for name, param in module._parameters.items(): if isinstance(param, torch.Tensor) and param.device.type == "meta": module._parameters[name] = nn.Parameter( torch.empty_like(param, device=self.device), requires_grad=param.requires_grad, ) has_meta_param = True for name, buffer in module._buffers.items(): if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta": module._buffers[name] = torch.zeros_like(buffer, device=self.device) # Init parameters if at least one parameter is over 'meta' device. if has_meta_param and hasattr(module, "reset_parameters"): module.reset_parameters() module.apply(init_parameters) def sparse_grad_parameter_names( self, destination: Optional[List[str]] = None, prefix: str = "" ) -> List[str]: destination = [] if destination is None else destination return self._sparse_grad_parameter_names(self.module, destination, prefix) def _sparse_grad_parameter_names( self, module: nn.Module, destination: List[str], prefix: str = "" ) -> List[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): pass elif isinstance(module, nn.Embedding): if module.sparse: destination.append(append_prefix(prefix, "weight")) elif isinstance(module, nn.EmbeddingBag): if module.sparse: destination.append(append_prefix(prefix, "weight")) else: for name, child in module.named_children(): self._sparse_grad_parameter_names( child, destination, append_prefix(prefix, name) ) return destination # pyre-ignore [14] def state_dict( self, destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: state_dict = get_module(self).state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix + _DDP_STATE_DICT_PREFIX ) add_prefix_to_state_dict(state_dict, prefix) return state_dict # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict( self, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: return self._load_state_dict(self, state_dict, prefix, strict) def _load_state_dict( self, module: nn.Module, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: missing_keys = [] unexpected_keys = [] module = get_module(module) if isinstance(module, DistributedDataParallel): torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix ) add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX) if isinstance(module, ShardedModule): return module.load_state_dict(state_dict, strict=strict) else: module._load_from_state_dict( state_dict, prefix, {}, strict, missing_keys, unexpected_keys, [] ) for name, child in module.named_children(): m_keys, u_keys = self._load_state_dict( child, filter_state_dict(state_dict, prefix + name), "", strict, ) missing_keys.extend(m_keys) unexpected_keys.extend(u_keys) return _IncompatibleKeys( missing_keys=missing_keys, unexpected_keys=unexpected_keys ) def _named_parameters( self, module: nn.Module, prefix: str = "", recurse: bool = True, strip_ddp: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: if strip_ddp: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_parameters(prefix, recurse) else: yield from module.named_parameters(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_parameters( child, append_prefix(prefix, name), recurse, strip_ddp, ) def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def bare_named_parameters( self, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue memo.add(param) yield key, param def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.sharded_parameter_names(prefix) else: for name, child in module.named_children(): yield from DistributedModelParallel._sharded_parameter_names( child, append_prefix(prefix, name) ) def _named_buffers( self, module: nn.Module, prefix: str = "", recurse: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_buffers(prefix, recurse) else: yield from module.named_buffers(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_buffers( child, append_prefix(prefix, name), recurse ) def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: gen = self._named_buffers(self.module, prefix, recurse) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def fused_optimizer(self) -> KeyedOptimizer: return self._optim def plan(self) -> ShardingPlan: return self._plan def _reset_parameters(module: nn.Module) -> None: for _, m in module.named_modules(): if hasattr(m, "reset_parameters"): m.reset_parameters() class ModuleSharder(abc.ABC, Generic[M]): """ `ModuleSharder` is per each module, which supports sharding, e.g. `EmbeddingBagCollection`. Args:: qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs """ def __init__( self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None ) -> None: torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self._qcomm_codecs_registry = qcomm_codecs_registry # pyre-ignore [3] def shard( self, module: M, params: EmbeddingModuleShardingPlan, env: ShardingEnv, device: Optional[torch.device] = None, ) -> ShardedModule[Any, Any, Any, Any]: """ Does the actual sharding. It will allocate parameters on the requested locations as specified by corresponding ParameterSharding. Default implementation is data-parallel replication. Args: module (M): module to shard. params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names (module path + parameter name, '.'-separated) to its sharding spec. env (ShardingEnv): sharding environment that has the process group. device (torch.device): compute device. Returns: ShardedModule[Any, Any, Any]: sharded module implementation. """ ... def module_type(self) -> Type[M]: ... def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]: return self._qcomm_codecs_registry def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]: """ List of parameters that can be sharded. """ return dict(module.named_parameters()) def sharding_types(self, compute_device_type: str) -> List[str]: """ List of supported sharding types. See `ShardingType` for well-known examples. """ return [ShardingType.DATA_PARALLEL.value] def compute_kernels( self, sharding_type: str, compute_device_type: str ) -> List[str]: """ List of supported compute kernels for a given sharding type and compute device. """ return [ComputeKernel.DEFAULT.value] def storage_usage( self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str ) -> Dict[str, int]: """ List of system resources and corresponding usage given a compute device and compute kernel. """ assert compute_device_type in {"cuda", "cpu", "mtia"} storage_map = { "cuda": ParameterStorage.HBM, "cpu": ParameterStorage.DDR, # TODO: Update it later. Setting for MTIA is same as CPU's for now. "mtia": ParameterStorage.DDR, } return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)} class DLRM(nn.Module): """ Recsys model from "Deep Learning Recommendation Model for Personalization and Recommendation Systems" (https://arxiv.org/abs/1906.00091). Processes sparse features by learning pooled embeddings for each feature. Learns the relationship between dense features and sparse features by projecting dense features into the same embedding space. Also, learns the pairwise relationships between sparse features. The module assumes all sparse features have the same embedding dimension (i.e. each EmbeddingBagConfig uses the same embedding_dim). The following notation is used throughout the documentation for the models: * F: number of sparse features * D: embedding_dimension of sparse features * B: batch size * num_features: number of dense features Args: embedding_bag_collection (EmbeddingBagCollection): collection of embedding bags used to define `SparseArch`. dense_in_features (int): the dimensionality of the dense input features. dense_arch_layer_sizes (List[int]): the layer sizes for the `DenseArch`. over_arch_layer_sizes (List[int]): the layer sizes for the `OverArch`. The output dimension of the `InteractionArch` should not be manually specified here. dense_device (Optional[torch.device]): default compute device. Example:: B = 2 D = 8 eb1_config = EmbeddingBagConfig( name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1"] ) eb2_config = EmbeddingBagConfig( name="t2", embedding_dim=D, num_embeddings=100, feature_names=["f2"], ) ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config]) model = DLRM( embedding_bag_collection=ebc, dense_in_features=100, dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1], ) features = torch.rand((B, 100)) # 0 1 # 0 [1,2] [4,5] # 1 [4,3] [2,9] # ^ # feature sparse_features = KeyedJaggedTensor.from_offsets_sync( keys=["f1", "f2"], values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]), offsets=torch.tensor([0, 2, 4, 6, 8]), ) logits = model( dense_features=features, sparse_features=sparse_features, ) """ def __init__( self, embedding_bag_collection: EmbeddingBagCollection, dense_in_features: int, dense_arch_layer_sizes: List[int], over_arch_layer_sizes: List[int], dense_device: Optional[torch.device] = None, ) -> None: super().__init__() assert ( len(embedding_bag_collection.embedding_bag_configs()) > 0 ), "At least one embedding bag is required" for i in range(1, len(embedding_bag_collection.embedding_bag_configs())): conf_prev = embedding_bag_collection.embedding_bag_configs()[i - 1] conf = embedding_bag_collection.embedding_bag_configs()[i] assert ( conf_prev.embedding_dim == conf.embedding_dim ), "All EmbeddingBagConfigs must have the same dimension" embedding_dim: int = embedding_bag_collection.embedding_bag_configs()[ 0 ].embedding_dim if dense_arch_layer_sizes[-1] != embedding_dim: raise ValueError( f"embedding_bag_collection dimension ({embedding_dim}) and final dense " "arch layer size ({dense_arch_layer_sizes[-1]}) must match." ) self.sparse_arch: SparseArch = SparseArch(embedding_bag_collection) num_sparse_features: int = len(self.sparse_arch.sparse_feature_names) self.dense_arch = DenseArch( in_features=dense_in_features, layer_sizes=dense_arch_layer_sizes, device=dense_device, ) self.inter_arch = InteractionArch( num_sparse_features=num_sparse_features, ) over_in_features: int = ( embedding_dim + choose(num_sparse_features, 2) + num_sparse_features ) self.over_arch = OverArch( in_features=over_in_features, layer_sizes=over_arch_layer_sizes, device=dense_device, ) def forward( self, dense_features: torch.Tensor, sparse_features: KeyedJaggedTensor, ) -> torch.Tensor: """ Args: dense_features (torch.Tensor): the dense features. sparse_features (KeyedJaggedTensor): the sparse features. Returns: torch.Tensor: logits. """ embedded_dense = self.dense_arch(dense_features) embedded_sparse = self.sparse_arch(sparse_features) concatenated_dense = self.inter_arch( dense_features=embedded_dense, sparse_features=embedded_sparse ) logits = self.over_arch(concatenated_dense) return logits class DLRMTrain(nn.Module): """ nn.Module to wrap DLRM model to use with train_pipeline. DLRM Recsys model from "Deep Learning Recommendation Model for Personalization and Recommendation Systems" (https://arxiv.org/abs/1906.00091). Processes sparse features by learning pooled embeddings for each feature. Learns the relationship between dense features and sparse features by projecting dense features into the same embedding space. Also, learns the pairwise relationships between sparse features. The module assumes all sparse features have the same embedding dimension (i.e, each EmbeddingBagConfig uses the same embedding_dim) Args: dlrm_module: DLRM module (DLRM or DLRM_Projection or DLRM_DCN) to be used in training Example:: ebc = EmbeddingBagCollection(config=ebc_config) dlrm_module = DLRM( embedding_bag_collection=ebc, dense_in_features=100, dense_arch_layer_sizes=[20], over_arch_layer_sizes=[5, 1], ) dlrm_model = DLRMTrain(dlrm_module) """ def __init__( self, dlrm_module: DLRM, ) -> None: super().__init__() self.model = dlrm_module self.loss_fn: nn.Module = nn.BCEWithLogitsLoss() def forward( self, batch: Batch ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: """ Args: batch: batch used with criteo and random data from torchrec.datasets Returns: Tuple[loss, Tuple[loss, logits, labels]] """ logits = self.model(batch.dense_features, batch.sparse_features) logits = logits.squeeze(-1) loss = self.loss_fn(logits, batch.labels.float()) return loss, (loss.detach(), logits.detach(), batch.labels.detach()) class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM class EmbeddingBagCollection(EmbeddingBagCollectionInterface): """ EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B * (F * D)] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. device (Optional[torch.device]): default compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"] ) ebc = EmbeddingBagCollection(tables=[table_0, table_1]) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783], [ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011], [-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) tensor([0, 3, 7]) """ def __init__( self, tables: List[EmbeddingBagConfig], is_weighted: bool = False, device: Optional[torch.device] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}") self._is_weighted = is_weighted self.embedding_bags: nn.ModuleDict = nn.ModuleDict() self._embedding_bag_configs = tables self._lengths_per_embedding: List[int] = [] self._dtypes: List[int] = [] table_names = set() for embedding_config in tables: if embedding_config.name in table_names: raise ValueError(f"Duplicate table name {embedding_config.name}") table_names.add(embedding_config.name) dtype = ( torch.float32 if embedding_config.data_type == DataType.FP32 else torch.float16 ) self.embedding_bags[embedding_config.name] = nn.EmbeddingBag( num_embeddings=embedding_config.num_embeddings, embedding_dim=embedding_config.embedding_dim, mode=pooling_type_to_str(embedding_config.pooling), device=device, include_last_offset=True, dtype=dtype, ) if device is None: device = self.embedding_bags[embedding_config.name].weight.device self._dtypes.append(embedding_config.data_type.value) if not embedding_config.feature_names: embedding_config.feature_names = [embedding_config.name] self._lengths_per_embedding.extend( len(embedding_config.feature_names) * [embedding_config.embedding_dim] ) self._device: torch.device = device or torch.device("cpu") self._embedding_names: List[str] = [ embedding for embeddings in get_embedding_names_by_table(tables) for embedding in embeddings ] self._feature_names: List[List[str]] = [table.feature_names for table in tables] self.reset_parameters() def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ flat_feature_names: List[str] = [] for names in self._feature_names: flat_feature_names.extend(names) inverse_indices = reorder_inverse_indices( inverse_indices=features.inverse_indices_or_none(), feature_names=flat_feature_names, ) pooled_embeddings: List[torch.Tensor] = [] feature_dict = features.to_dict() for i, embedding_bag in enumerate(self.embedding_bags.values()): for feature_name in self._feature_names[i]: f = feature_dict[feature_name] per_sample_weights: Optional[torch.Tensor] = None if self._is_weighted: per_sample_weights = ( f.weights().half() if self._dtypes[i] == DataType.FP16.value else f.weights() ) res = embedding_bag( input=f.values(), offsets=f.offsets(), per_sample_weights=( per_sample_weights if self._is_weighted else None ), ).float() pooled_embeddings.append(res) return KeyedTensor( keys=self._embedding_names, values=process_pooled_embeddings( pooled_embeddings=pooled_embeddings, inverse_indices=inverse_indices, ), length_per_key=self._lengths_per_embedding, ) def is_weighted(self) -> bool: return self._is_weighted def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def device(self) -> torch.device: return self._device def reset_parameters(self) -> None: if (isinstance(self.device, torch.device) and self.device.type == "meta") or ( isinstance(self.device, str) and self.device == "meta" ): return # Initialize embedding bags weights with init_fn for table_config in self._embedding_bag_configs: assert table_config.init_fn is not None param = self.embedding_bags[f"{table_config.name}"].weight # pyre-ignore table_config.init_fn(param) class KeyedOptimizerWrapper(KeyedOptimizer): """ Takes a dict of parameters and exposes state_dict by parameter key. Convenience wrapper to take in optim_factory callable to create KeyedOptimizer """ def __init__( self, params: Mapping[str, Union[torch.Tensor, ShardedTensor]], optim_factory: OptimizerFactory, ) -> None: self._optimizer: optim.Optimizer = optim_factory(list(params.values())) super().__init__(params, self._optimizer.state, self._optimizer.param_groups) def zero_grad(self, set_to_none: bool = False) -> None: self._optimizer.zero_grad() # pyre-ignore [2] def step(self, closure: Any = None) -> None: self._optimizer.step(closure=closure) def in_backward_optimizer_filter( named_parameters: Iterator[Tuple[str, nn.Parameter]], include: bool = False ) -> Iterator[Tuple[str, nn.Parameter]]: """ Filters named_parameters for whether they are or or not params that use the in_backward_optimizer. Note: This only supports the in_backward_optimizer from PT-D's API. The torchrec's equivalent API is deprecated and is not supported. Args: named_parameters(Iterator[Tuple[str, nn.Parameter]]): named_parameters include(bool): If true, only yields params with in_backward_optimizer. If false, returns the outside set Defaults to include params that are not in_backward (False) """ for fqn, param in named_parameters: if hasattr(param, "_in_backward_optimizers") == include: yield fqn, param The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( num_embeddings: int = 1024**2, embedding_dim: int = 128, dense_arch_layer_sizes: Optional[List[int]] = None, over_arch_layer_sizes: Optional[List[int]] = None, learning_rate: float = 0.1, ) -> None` to solve the following problem: Constructs and trains a DLRM model (using random dummy data). Each script is run on each process (rank) in SPMD fashion. The embedding layers will be sharded across available ranks Here is the function: def train( num_embeddings: int = 1024**2, embedding_dim: int = 128, dense_arch_layer_sizes: Optional[List[int]] = None, over_arch_layer_sizes: Optional[List[int]] = None, learning_rate: float = 0.1, ) -> None: """ Constructs and trains a DLRM model (using random dummy data). Each script is run on each process (rank) in SPMD fashion. The embedding layers will be sharded across available ranks """ if dense_arch_layer_sizes is None: dense_arch_layer_sizes = [64, 128] if over_arch_layer_sizes is None: over_arch_layer_sizes = [64, 1] # Init process_group , device, rank, backend rank = int(os.environ["LOCAL_RANK"]) if torch.cuda.is_available(): device: torch.device = torch.device(f"cuda:{rank}") backend = "nccl" torch.cuda.set_device(device) else: device: torch.device = torch.device("cpu") backend = "gloo" dist.init_process_group(backend=backend) # Construct DLRM module eb_configs = [ EmbeddingBagConfig( name=f"t_{feature_name}", embedding_dim=embedding_dim, num_embeddings=num_embeddings, feature_names=[feature_name], ) for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES) ] dlrm_model = DLRM( embedding_bag_collection=EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta") ), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=dense_arch_layer_sizes, over_arch_layer_sizes=over_arch_layer_sizes, dense_device=device, ) train_model = DLRMTrain(dlrm_model) # Enable optimizer fusion fused_params = { "learning_rate": learning_rate, "optimizer": OptimType.EXACT_ROWWISE_ADAGRAD, } sharders = [ EmbeddingBagCollectionSharder(fused_params=fused_params), ] # Distribute model across devices model = DistributedModelParallel( module=train_model, device=device, sharders=cast(List[ModuleSharder[nn.Module]], sharders), ) # Overlap comm/compute/device transfer during training through train_pipeline non_fused_optimizer = KeyedOptimizerWrapper( dict(in_backward_optimizer_filter(model.named_parameters())), lambda params: torch.optim.Adagrad(params, lr=learning_rate), ) train_pipeline = TrainPipelineSparseDist( model, non_fused_optimizer, device, ) # train model train_iterator = iter( _get_random_dataloader( num_embeddings=num_embeddings, pin_memory=backend == "nccl" ) ) for _ in tqdm(range(int(1e4)), mininterval=5.0): train_pipeline.progress(train_iterator)
Constructs and trains a DLRM model (using random dummy data). Each script is run on each process (rank) in SPMD fashion. The embedding layers will be sharded across available ranks
8,845
import copyreg import io import os import pickle import uuid from typing import cast, List, Optional import torch import torch.distributed as dist import torch.distributed.launcher as pet import torchrec from fbgemm_gpu.split_embedding_configs import EmbOptimType from torch import nn from torch.multiprocessing.reductions import ( reduce_storage, reduce_typed_storage, reduce_typed_storage_child, ) from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology from torchrec.distributed.planner.types import ParameterConstraints from torchrec.distributed.types import ModuleSharder, ShardingType from torchrec.modules.embedding_modules import EmbeddingBagCollection gloo_pg: Optional[dist.ProcessGroup] = None The provided code snippet includes necessary dependencies for implementing the `share_tensor_via_shm` function. Write a Python function `def share_tensor_via_shm( tensor: Optional[torch.Tensor], src_rank: int = 0 ) -> torch.Tensor` to solve the following problem: Share a tensor via shared memory with local peers. This is a collective function that must be called by all processes within the global process group. Rank `src_rank` must pass in the tensor it wants to share. NOTE: this is a simple implementation that only supports the single-host, multi-process environment. Multi-host support is possible but is slightly more complicated. See [`torch.multiprocessing`](https://pytorch.org/docs/stable/multiprocessing.html) and [best practices](https://pytorch.org/docs/1.6.0/notes/multiprocessing.html?highlight=multiprocessing) for more information on shared memory Args: tensor: The tensor to share. src_rank: The rank sharing the tensor. Returns: The tensor shared via shared memory. Example:: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) dist.barrier() if dist.get_rank() == 0: # Pretend that we are loading the pretrained embedding weight from a parquet file on rank 0. emb = torch.rand(2000000, 64) # Share the tensor to local peers via shared memory emb = share_tensor_via_shm(tensor=emb) else: # Received the tensor shared by rank 0 via shared memory emb = share_tensor_via_shm(tensor=None) assert emb.is_shared() Here is the function: def share_tensor_via_shm( tensor: Optional[torch.Tensor], src_rank: int = 0 ) -> torch.Tensor: """ Share a tensor via shared memory with local peers. This is a collective function that must be called by all processes within the global process group. Rank `src_rank` must pass in the tensor it wants to share. NOTE: this is a simple implementation that only supports the single-host, multi-process environment. Multi-host support is possible but is slightly more complicated. See [`torch.multiprocessing`](https://pytorch.org/docs/stable/multiprocessing.html) and [best practices](https://pytorch.org/docs/1.6.0/notes/multiprocessing.html?highlight=multiprocessing) for more information on shared memory Args: tensor: The tensor to share. src_rank: The rank sharing the tensor. Returns: The tensor shared via shared memory. Example:: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) dist.barrier() if dist.get_rank() == 0: # Pretend that we are loading the pretrained embedding weight from a parquet file on rank 0. emb = torch.rand(2000000, 64) # Share the tensor to local peers via shared memory emb = share_tensor_via_shm(tensor=emb) else: # Received the tensor shared by rank 0 via shared memory emb = share_tensor_via_shm(tensor=None) assert emb.is_shared() """ if not dist.is_initialized(): raise RuntimeError("Global process group is not initialized") global gloo_pg if gloo_pg is None: if dist.get_backend() == "gloo": gloo_pg = dist.group.WORLD else: gloo_pg = dist.new_group(backend="gloo") torch.multiprocessing.set_sharing_strategy("file_system") if dist.get_rank() == src_rank: assert tensor is not None, f"src_rank ({src_rank}) must provide a tensor" # Intialize a custom pickler buf = io.BytesIO() shm_pickler = pickle.Pickler(buf) shm_pickler.dispatch_table = copyreg.dispatch_table.copy() # Register reducers for moving the tensor storage to shared memory for t in torch._storage_classes: if t.__name__ == "_UntypedStorage": # pyre-ignore [16] shm_pickler.dispatch_table[t] = reduce_storage else: shm_pickler.dispatch_table[t] = reduce_typed_storage_child # pyre-fixme[16]: Module `storage` has no attribute `_TypedStorage`. shm_pickler.dispatch_table[torch.storage._TypedStorage] = reduce_typed_storage tensor.share_memory_() shm_pickler.dump(tensor) obj_list = [buf.getvalue()] dist.broadcast_object_list(obj_list, src=src_rank, group=gloo_pg) dist.barrier(group=gloo_pg) return tensor else: obj_list = [None] dist.broadcast_object_list(obj_list, src=src_rank, group=gloo_pg) obj = obj_list[0] assert obj is not None buf = io.BytesIO(obj) dist.barrier(group=gloo_pg) return pickle.load(buf)
Share a tensor via shared memory with local peers. This is a collective function that must be called by all processes within the global process group. Rank `src_rank` must pass in the tensor it wants to share. NOTE: this is a simple implementation that only supports the single-host, multi-process environment. Multi-host support is possible but is slightly more complicated. See [`torch.multiprocessing`](https://pytorch.org/docs/stable/multiprocessing.html) and [best practices](https://pytorch.org/docs/1.6.0/notes/multiprocessing.html?highlight=multiprocessing) for more information on shared memory Args: tensor: The tensor to share. src_rank: The rank sharing the tensor. Returns: The tensor shared via shared memory. Example:: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) dist.barrier() if dist.get_rank() == 0: # Pretend that we are loading the pretrained embedding weight from a parquet file on rank 0. emb = torch.rand(2000000, 64) # Share the tensor to local peers via shared memory emb = share_tensor_via_shm(tensor=emb) else: # Received the tensor shared by rank 0 via shared memory emb = share_tensor_via_shm(tensor=None) assert emb.is_shared()
8,846
import copyreg import io import os import pickle import uuid from typing import cast, List, Optional import torch import torch.distributed as dist import torch.distributed.launcher as pet import torchrec from fbgemm_gpu.split_embedding_configs import EmbOptimType from torch import nn from torch.multiprocessing.reductions import ( reduce_storage, reduce_typed_storage, reduce_typed_storage_child, ) from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology from torchrec.distributed.planner.types import ParameterConstraints from torchrec.distributed.types import ModuleSharder, ShardingType from torchrec.modules.embedding_modules import EmbeddingBagCollection def main() -> None: """ An example for initializing a torchrec sharded embedding bag with a pretrained embedding weight. Environment assumptions: - The embedding weight fits in the RAM of a single host, but may OOM if all processes on the host load the embedding weight simultaneously. - For simplicity, the demo assumes a single-host, multi-process environment. """ dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) pg = dist.group.WORLD assert pg is not None dist.barrier() if dist.get_rank() == 0: # Pretend that we are loading the pretrained embedding weight from a parquet file on rank 0. emb = torch.rand(2000000, 64) # Share the tensor to local peers via shared memory emb = share_tensor_via_shm(tensor=emb) else: # Received the tensor shared by rank 0 via shared memory emb = share_tensor_via_shm(tensor=None) assert emb.is_shared() # For demo purpose, the entire model is an embedding bag collection with a # single embedding bag. ebc = EmbeddingBagCollection( device=torch.device("meta"), tables=[ torchrec.EmbeddingBagConfig( name="emb", embedding_dim=64, num_embeddings=2000000, feature_names=["f"], pooling=torchrec.PoolingType.SUM, ) ], ) # Create a rowwise sharding plan sharders = cast( List[ModuleSharder[nn.Module]], [ EmbeddingBagCollectionSharder( fused_params={ "optimizer": EmbOptimType.EXACT_ROWWISE_ADAGRAD, "learning_rate": 0.01, "eps": 0.01, } ) ], ) plan = EmbeddingShardingPlanner( topology=Topology(world_size=dist.get_world_size(), compute_device=device.type), constraints={ "emb": ParameterConstraints(sharding_types=[ShardingType.ROW_WISE.value]) }, ).collective_plan( ebc, sharders, pg, ) print(plan) # Initialize dmp which shards the embedding bag dmp = DistributedModelParallel( module=ebc, device=device, plan=plan, sharders=sharders, ) print( "Finished initializing DistributedModelParallel. " f"Current device utilization: {torch.cuda.memory_allocated() / 1_000_000} MB" ) # For each shard in sharded tensors, load from the corresponding slice from # the pretrained weights in shared memory. for rank in range(dist.get_world_size()): if dist.get_rank() == rank: for _, t in dmp.state_dict().items(): for shard in t.local_shards(): offsets = shard.metadata.shard_offsets lengths = shard.metadata.shard_sizes src = emb[ offsets[0] : offsets[0] + lengths[0], offsets[1] : offsets[1] + lengths[1], ] shard.tensor.copy_(src) dist.barrier() else: dist.barrier() def invoke_main() -> None: lc = pet.LaunchConfig( min_nodes=1, max_nodes=1, nproc_per_node=8, run_id=str(uuid.uuid4()), rdzv_backend="c10d", rdzv_endpoint="localhost:0", max_restarts=0, monitor_interval=1, ) pet.elastic_launch(lc, entrypoint=main)()
null
8,847
import os import torchx.specs as specs from torchx.components.dist import ddp The provided code snippet includes necessary dependencies for implementing the `run_dlrm_main` function. Write a Python function `def run_dlrm_main(num_trainers: int = 8, *script_args: str) -> specs.AppDef` to solve the following problem: Args: num_trainers: The number of trainers to use. script_args: A variable number of parameters to provide dlrm_main.py. Here is the function: def run_dlrm_main(num_trainers: int = 8, *script_args: str) -> specs.AppDef: """ Args: num_trainers: The number of trainers to use. script_args: A variable number of parameters to provide dlrm_main.py. """ cwd = os.getcwd() entrypoint = os.path.join(cwd, "train_torchrec.py") user = os.environ.get("USER") image = f"/data/home/{user}" if num_trainers > 8 and num_trainers % 8 != 0: raise ValueError( "Trainer jobs spanning multiple hosts must be in multiples of 8." ) nproc_per_node = 8 if num_trainers >= 8 else num_trainers num_replicas = max(num_trainers // 8, 1) return ddp( *script_args, name="train_dlrm", image=image, # AWS p4d instance (https://aws.amazon.com/ec2/instance-types/p4/). cpu=96, gpu=8, memMB=-1, script=entrypoint, j=f"{num_replicas}x{nproc_per_node}", )
Args: num_trainers: The number of trainers to use. script_args: A variable number of parameters to provide dlrm_main.py.
8,848
import argparse import os import sys import time from typing import cast, Iterator, List, Tuple import torch import torch.distributed as dist import torch.nn as nn import torchmetrics as metrics import torchrec import torchrec.distributed as trec_dist import torchrec.optim as trec_optim from nvt_binary_dataloader import NvtBinaryDataloader from pyre_extensions import none_throws from torchrec import EmbeddingBagCollection from torchrec.datasets.criteo import ( DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES, TOTAL_TRAINING_SAMPLES, ) from torchrec.datasets.utils import Batch from torchrec.distributed import TrainPipelineSparseDist from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.types import ModuleSharder from torchrec.metrics.throughput import ThroughputMetric from torchrec.models.dlrm import DLRM, DLRMTrain from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.fused_embedding_modules import fuse_embedding_optimizer from torchrec.optim.keyed import KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description="torchrec dlrm example trainer") parser.add_argument( "--epochs", type=int, default=1, help="number of epochs to train" ) parser.add_argument( "--batch_size", type=int, default=32, help="local batch size to use for training", ) parser.add_argument( "--num_embeddings", type=int, default=100_000, help="max_ind_size. The number of embeddings in each embedding table. Defaults" " to 100_000 if num_embeddings_per_feature is not supplied.", ) parser.add_argument( "--num_embeddings_per_feature", type=str, default=None, help="Comma separated max_ind_size per sparse feature. The number of embeddings" " in each embedding table. 26 values are expected for the Criteo dataset.", ) parser.add_argument( "--dense_arch_layer_sizes", type=str, default="512,256,128", help="Comma separated layer sizes for dense arch.", ) parser.add_argument( "--over_arch_layer_sizes", type=str, default="1024,1024,512,256,1", help="Comma separated layer sizes for over arch.", ) parser.add_argument( "--embedding_dim", type=int, default=128, help="Size of each embedding.", ) parser.add_argument( "--learning_rate", type=float, default=15.0, help="Learning rate.", ) parser.add_argument( "--binary_path", type=str, default="/data/criteo_1tb/criteo_binary/split/", help="Location for binary datafiles", ) parser.add_argument( "--change_lr", dest="change_lr", action="store_true", help="Flag to determine whether learning rate should be changed part way through training.", ) parser.add_argument( "--lr_change_point", type=float, default=0.80, help="The point through training at which learning rate should change to the value set by" " lr_after_change_point. The default value is 0.80 which means that 80% through the total iterations (totaled" " across all epochs), the learning rate will change.", ) parser.add_argument( "--lr_after_change_point", type=float, default=3.0, help="Learning rate after change point in first epoch.", ) parser.add_argument( "--throughput_check_freq_within_epoch", type=int, default=1000, help="Frequency at QPS will be output within an epoch.", ) parser.add_argument( "--validation_freq_within_epoch", type=int, default=10000, help="Frequency at which validation will be run within an epoch.", ) parser.add_argument( "--adagrad", dest="adagrad", action="store_true", help="Flag to determine if adagrad optimizer should be used.", ) return parser.parse_args(argv)
null
8,849
import argparse import os import sys import time from typing import cast, Iterator, List, Tuple import torch import torch.distributed as dist import torch.nn as nn import torchmetrics as metrics import torchrec import torchrec.distributed as trec_dist import torchrec.optim as trec_optim from nvt_binary_dataloader import NvtBinaryDataloader from pyre_extensions import none_throws from torchrec import EmbeddingBagCollection from torchrec.datasets.criteo import ( DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES, TOTAL_TRAINING_SAMPLES, ) from torchrec.datasets.utils import Batch from torchrec.distributed import TrainPipelineSparseDist from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.types import ModuleSharder from torchrec.metrics.throughput import ThroughputMetric from torchrec.models.dlrm import DLRM, DLRMTrain from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.fused_embedding_modules import fuse_embedding_optimizer from torchrec.optim.keyed import KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter class Batch(Pipelineable): dense_features: torch.Tensor sparse_features: KeyedJaggedTensor labels: torch.Tensor def to(self, device: torch.device, non_blocking: bool = False) -> "Batch": return Batch( dense_features=self.dense_features.to( device=device, non_blocking=non_blocking ), sparse_features=self.sparse_features.to( device=device, non_blocking=non_blocking ), labels=self.labels.to(device=device, non_blocking=non_blocking), ) def record_stream(self, stream: torch.cuda.streams.Stream) -> None: self.dense_features.record_stream(stream) self.sparse_features.record_stream(stream) self.labels.record_stream(stream) def pin_memory(self) -> "Batch": return Batch( dense_features=self.dense_features.pin_memory(), sparse_features=self.sparse_features.pin_memory(), labels=self.labels.pin_memory(), ) def _eval( train_pipeline: TrainPipelineSparseDist, it: Iterator[Batch] ) -> Tuple[float, float, float]: train_pipeline._model.eval() device = train_pipeline._device auroc = metrics.AUROC(compute_on_step=False).to(device) accuracy = metrics.Accuracy(compute_on_step=False).to(device) val_losses = [] step = 0 with torch.no_grad(): while True: try: loss, logits, labels = train_pipeline.progress(it) val_losses.append(loss) preds = torch.sigmoid(logits) labels = labels.to(torch.int32) auroc(preds, labels) accuracy(preds, labels) step += 1 except StopIteration: break auroc_result = auroc.compute().item() accuracy_result = accuracy.compute().item() bce_loss = torch.mean(torch.stack(val_losses)) return (auroc_result, accuracy_result, bce_loss)
null
8,850
import argparse import os import sys from typing import Any, cast, Dict, List, Union import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data_utils from fbgemm_gpu.split_embedding_configs import EmbOptimType from torch import distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP from torchrec.distributed.embedding import EmbeddingCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel as DMP from torchrec.distributed.types import ModuleSharder from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter from torchrec.sparse.jagged_tensor import KeyedJaggedTensor from tqdm import tqdm def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description="torchrec + lightning app") parser.add_argument( "--min_user_count", type=int, default=5, help="minimum user ratings count" ) parser.add_argument( "--min_item_count", type=int, default=0, help="minimum item count for each valid user", ) parser.add_argument( "--max_len", type=int, default=100, help="max length of the Bert embedding dimension", ) parser.add_argument( "--mask_prob", type=float, default=0.15, help="probability of the mask", ) parser.add_argument( "--dataset_name", type=str, default="ml-1m", help="dataset for experiment, current support ml-1m, ml-20m", ) parser.add_argument( "--min_rating", type=int, default=0, help="minimum valid rating", ) parser.add_argument( "--num_epochs", type=int, default=100, help="the number of epoch to train", ) parser.add_argument( "--lr", type=float, default=0.001, help="learning rate", ) parser.add_argument( "--decay_step", type=int, default="25", help="the step of weight decay", ) parser.add_argument( "--weight_decay", type=float, default=0.0, help="weight decay", ) parser.add_argument( "--gamma", type=float, default=1.0, help="gamma of the lr scheduler", ) parser.add_argument( "--train_batch_size", type=int, default=128, help="train batch size", ) parser.add_argument( "--val_batch_size", type=int, default=128, help="val batch size", ) parser.add_argument( "--test_batch_size", type=int, default=128, help="test batch size", ) parser.add_argument( "--emb_dim", type=int, default=256, help="dimension of the hidden layer embedding", ) parser.add_argument( "--nhead", type=int, default=2, help="number of header of attention", ) parser.add_argument( "--num_layers", type=int, default=2, help="number of layers of attention", ) parser.add_argument( "--dataset_path", type=str, default=None, help="Path to a folder containing the dataset.", ) parser.add_argument( "--export_root", type=str, default="", help="Path to save the trained model", ) parser.add_argument( "--random_user_count", type=int, default=10, help="number of random users", ) parser.add_argument( "--random_item_count", type=int, default=30, help="number of random items", ) parser.add_argument( "--random_size", type=int, default=300, help="number of random sample size", ) parser.add_argument( "--dupe_factor", type=int, default=3, help="number of duplication while generating the random masked seqs", ) parser.add_argument( "--mode", type=str, default="dmp", help="dmp (distirbuted model parallel) or ddp (distributed data parallel)", ) return parser.parse_args(argv)
null
8,851
import argparse import os import sys from typing import Any, cast, Dict, List, Union import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data_utils from fbgemm_gpu.split_embedding_configs import EmbOptimType from torch import distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP from torchrec.distributed.embedding import EmbeddingCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel as DMP from torchrec.distributed.types import ModuleSharder from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter from torchrec.sparse.jagged_tensor import KeyedJaggedTensor from tqdm import tqdm def _train_one_epoch( model: Union[DDP, DMP], train_loader: data_utils.DataLoader, device: torch.device, optimizer: optim.Adam, lr_scheduler: optim.lr_scheduler.StepLR, epoch: int, ) -> None: """ Train model for 1 epoch. Helper function for train_val_test. Args: model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec. train_loader (data_utils.DataLoader): DataLoader used for training. device (torch.device): the device to train/val/test optimizer (optim.Adam): Adam optimizer to train the model lr_scheduler (optim.lr_scheduler.StepLR): scheduler to control the learning rate epoch (int): the current epoch number Returns: None. """ model.train() if torch.cuda.is_available(): torch.cuda.set_device(dist.get_rank()) loss_logs = [] train_iterator = iter(train_loader) ce = nn.CrossEntropyLoss(ignore_index=0) outputs = [None for _ in range(dist.get_world_size())] for _ in tqdm(iter(int, 1), desc=f"Epoch {epoch+1}"): try: batch = next(train_iterator) batch = [x.to(device) for x in batch] optimizer.zero_grad() seqs, labels = batch kjt = _to_kjt(seqs, device) logits = model(kjt) # B x T x V logits = logits.view(-1, logits.size(-1)) # (B*T) x V labels = labels.view(-1) # B*T loss = ce(logits, labels) loss.backward() optimizer.step() loss_logs.append(loss.item()) except StopIteration: break dist.all_gather_object(outputs, sum(loss_logs) / len(loss_logs)) if dist.get_rank() == 0: # pyre-fixme[6]: For 1st param expected `Iterable[Variable[_SumT (bound to # _SupportsSum)]]` but got `List[None]`. print(f"Epoch {epoch + 1}, average loss { (sum(outputs) or 0) /len(outputs)}") lr_scheduler.step() def _validate( model: Union[DDP, DMP], val_loader: data_utils.DataLoader, device: torch.device, epoch: int, metric_ks: List[int], is_testing: bool = False, ) -> None: """ Evaluate model. Computes and prints metrics including Recalls and NDCGs. Helper function for train_val_test. Args: model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec. val_loader (data_utils.DataLoader): DataLoader used for validation. device (torch.device): the device to train/val/test epoch (int): the current epoch number metric_ks (List[int]): the metrics we want to validate is_testing (bool): if validation or testing Returns: None. """ model.eval() if torch.cuda.is_available(): torch.cuda.set_device(dist.get_rank()) outputs = [None for _ in range(dist.get_world_size())] keys = ["Recall@1", "Recall@5", "Recall@10", "NDCG@5", "NDCG@10"] metrics_log: Dict[str, List[float]] = {key: [] for key in keys} with torch.no_grad(): for _, batch in enumerate(val_loader): batch = [x.to(device) for x in batch] metrics = _calculate_metrics(model, batch, metric_ks, device) for key in keys: metrics_log[key].append(metrics[key]) metrics_avg = { key: sum(values) / len(values) for key, values in metrics_log.items() } dist.all_gather_object(outputs, metrics_avg) if dist.get_rank() == 0: print( # pyre-fixme[6] for 1st positional only parameter expected `List[Dict[str, float]]` but got `List[None]` f"{'Epoch ' + str(epoch + 1) if not is_testing else 'Test'}, metrics {_dict_mean(outputs)}" ) The provided code snippet includes necessary dependencies for implementing the `train_val_test` function. Write a Python function `def train_val_test( model: Union[DDP, DMP], train_loader: data_utils.DataLoader, val_loader: data_utils.DataLoader, test_loader: data_utils.DataLoader, device: torch.device, optimizer: optim.Adam, lr_scheduler: optim.lr_scheduler.StepLR, num_epochs: int, metric_ks: List[int], export_root: str, ) -> None` to solve the following problem: Train/validation/test loop. Ensure the dataloader will do the shuffling on each rank and will output the performance metrics like recalls and ndcgs Args: model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec. train_loader (data_utils.DataLoader): DataLoader used for training. val_loader (data_utils.DataLoader): DataLoader used for validation. test_loader (data_utils.DataLoader): DataLoader used for testing. device (torch.device): the device to train/val/test optimizer (optim.Adam): Adam optimizer to train the model lr_scheduler (optim.lr_scheduler.StepLR): scheduler to control the learning rate num_epochs (int): the number of epochs to train metric_ks (List[int]): the metrics we want to validate export_root (str): the export root of the saved models Returns: None. Here is the function: def train_val_test( model: Union[DDP, DMP], train_loader: data_utils.DataLoader, val_loader: data_utils.DataLoader, test_loader: data_utils.DataLoader, device: torch.device, optimizer: optim.Adam, lr_scheduler: optim.lr_scheduler.StepLR, num_epochs: int, metric_ks: List[int], export_root: str, ) -> None: """ Train/validation/test loop. Ensure the dataloader will do the shuffling on each rank and will output the performance metrics like recalls and ndcgs Args: model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec. train_loader (data_utils.DataLoader): DataLoader used for training. val_loader (data_utils.DataLoader): DataLoader used for validation. test_loader (data_utils.DataLoader): DataLoader used for testing. device (torch.device): the device to train/val/test optimizer (optim.Adam): Adam optimizer to train the model lr_scheduler (optim.lr_scheduler.StepLR): scheduler to control the learning rate num_epochs (int): the number of epochs to train metric_ks (List[int]): the metrics we want to validate export_root (str): the export root of the saved models Returns: None. """ _validate(model, val_loader, device, -1, metric_ks) for epoch in range(num_epochs): # pyre-fixme[16] Undefined attribute [16]: has no attribute `set_epoch` train_loader.sampler.set_epoch(epoch) _train_one_epoch( model, train_loader, device, optimizer, lr_scheduler, epoch, ) _validate(model, val_loader, device, epoch, metric_ks) if (epoch + 1) % 10 == 0: torch.save( model.state_dict(), export_root + f"epoch_{epoch + 1}_model.pth", ) print(f"epoch {epoch + 1} model has been saved to {export_root}") _validate(model, test_loader, device, num_epochs, metric_ks, True)
Train/validation/test loop. Ensure the dataloader will do the shuffling on each rank and will output the performance metrics like recalls and ndcgs Args: model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec. train_loader (data_utils.DataLoader): DataLoader used for training. val_loader (data_utils.DataLoader): DataLoader used for validation. test_loader (data_utils.DataLoader): DataLoader used for testing. device (torch.device): the device to train/val/test optimizer (optim.Adam): Adam optimizer to train the model lr_scheduler (optim.lr_scheduler.StepLR): scheduler to control the learning rate num_epochs (int): the number of epochs to train metric_ks (List[int]): the metrics we want to validate export_root (str): the export root of the saved models Returns: None.
8,852
import random from collections import Counter from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import numpy as np import pandas as pd def _get_dataframe_random( user_count: int = 50, item_count: int = 5000, size: int = 20000, min_rating: int = 2 ) -> pd.DataFrame: uids = [random.choice(range(user_count)) for i in range(size)] sids = [random.choice(range(item_count)) for i in range(size)] ratings = [min_rating] * size timestamps = list(range(0, size)) df = {"uid": uids, "sid": sids, "rating": ratings, "timestamp": timestamps} return pd.DataFrame(df) def _get_dataframe_movielens(name: str, folder_path: Path) -> pd.DataFrame: if name == "ml-1m": file_path = folder_path.joinpath("ratings.dat") df = pd.read_csv(file_path, sep="::", header=None) elif name == "ml-20m": file_path = folder_path.joinpath("ratings.csv") df = pd.read_csv(file_path) else: raise ValueError("Invalid name") return df The provided code snippet includes necessary dependencies for implementing the `get_raw_dataframe` function. Write a Python function `def get_raw_dataframe( name: str, user_count: int, item_count: int, size: int, min_rating: int, folder_path: Optional[str], ) -> pd.DataFrame` to solve the following problem: Gets raw dataframe of both random and movielens Args: name (int): the random or movielens dataset name user_count (int): the random user count of the random set item_count (int): the random item count of the random set size (int): the random sample count of the random set min_rating (int): the minimum rating of the random set folder_path (Optional[str]): the path of the movielens dataset, None for random Returns: dataframe (pd.DataFrame): the raw dataframe Here is the function: def get_raw_dataframe( name: str, user_count: int, item_count: int, size: int, min_rating: int, folder_path: Optional[str], ) -> pd.DataFrame: """ Gets raw dataframe of both random and movielens Args: name (int): the random or movielens dataset name user_count (int): the random user count of the random set item_count (int): the random item count of the random set size (int): the random sample count of the random set min_rating (int): the minimum rating of the random set folder_path (Optional[str]): the path of the movielens dataset, None for random Returns: dataframe (pd.DataFrame): the raw dataframe """ if name == "random": return _get_dataframe_random(user_count, item_count, size, min_rating) else: return _get_dataframe_movielens( name, Path(folder_path) if folder_path is not None else Path(name) )
Gets raw dataframe of both random and movielens Args: name (int): the random or movielens dataset name user_count (int): the random user count of the random set item_count (int): the random item count of the random set size (int): the random sample count of the random set min_rating (int): the minimum rating of the random set folder_path (Optional[str]): the path of the movielens dataset, None for random Returns: dataframe (pd.DataFrame): the raw dataframe
8,853
import copy import math from typing import Callable, Optional, Tuple import torch import torch.nn as nn from torchrec.modules.embedding_configs import EmbeddingConfig from torchrec.modules.embedding_modules import EmbeddingCollection from torchrec.sparse.jagged_tensor import KeyedJaggedTensor The provided code snippet includes necessary dependencies for implementing the `clones` function. Write a Python function `def clones(module: nn.Module, N: int) -> nn.ModuleList` to solve the following problem: Clone the module to N copies Args: module (nn.Module): module to clone N (int): number of copies Returns: nn.ModuleList of module copies Here is the function: def clones(module: nn.Module, N: int) -> nn.ModuleList: """ Clone the module to N copies Args: module (nn.Module): module to clone N (int): number of copies Returns: nn.ModuleList of module copies """ return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
Clone the module to N copies Args: module (nn.Module): module to clone N (int): number of copies Returns: nn.ModuleList of module copies
8,854
from typing import List, Optional import click import faiss import faiss.contrib.torch_utils import torch from torchrec import inference as trec_infer from torchrec.datasets.movielens import DEFAULT_RATINGS_COLUMN_NAMES from torchrec.distributed.embedding_types import EmbeddingComputeKernel from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.planner.types import ParameterConstraints from torchrec.distributed.types import ShardingEnv, ShardingType from torchrec.modules.embedding_configs import DataType, EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.sparse.jagged_tensor import KeyedJaggedTensor DEFAULT_RATINGS_COLUMN_NAMES: List[str] = ["userId", "movieId", "rating", "timestamp"] class EmbeddingComputeKernel(Enum): DENSE = "dense" FUSED = "fused" FUSED_UVM = "fused_uvm" FUSED_UVM_CACHING = "fused_uvm_caching" QUANT = "quant" QUANT_UVM = "quant_uvm" QUANT_UVM_CACHING = "quant_uvm_caching" class DistributedModelParallel(nn.Module, FusedOptimizerModule): """ Entry point to model parallelism. Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `EmbeddingBagCollectionSharder()`. init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay parameter initialization until the first forward pass. Pass `True` to delay initialization of data parallel modules. Do first forward pass and then call DistributedModelParallel.init_data_parallel(). init_parameters (bool): initialize parameters for modules still on meta device. data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data parallel modules. Example:: def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = DistributedModelParallel(m) m.apply(init_weights) """ def __init__( self, module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_data_parallel: bool = True, init_parameters: bool = True, data_parallel_wrapper: Optional[DataParallelWrapper] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self.init_parameters = init_parameters self._ddp_wrapped: bool = False if env is None: pg = dist.GroupMember.WORLD assert pg is not None, "Process group is not initialized" env = ShardingEnv.from_process_group(pg) self._env: ShardingEnv = env if device is None: device = torch.device("cpu") self.device: torch.device = device if sharders is None: sharders = get_default_sharders() self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = { sharder.module_type: sharder for sharder in sharders } if data_parallel_wrapper is None: data_parallel_wrapper = DefaultDataParallelWrapper() self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper if plan is None: planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=get_local_size(self._env.world_size), world_size=self._env.world_size, compute_device=self.device.type, ) ) pg = self._env.process_group if pg is not None: plan = planner.collective_plan(module, sharders, pg) else: plan = planner.plan(module, sharders) self._plan: ShardingPlan = plan self._dmp_wrapped_module: nn.Module = self._init_dmp(module) self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module) if init_parameters: self._init_parameters(self.module) if init_data_parallel: self.init_data_parallel() def module(self) -> nn.Module: """ Property to directly access sharded module, which will not be wrapped in DDP, FSDP, DMP, or any other parallelism wrappers. """ return get_unwrapped_module(self) def module(self, value: nn.Module) -> None: if isinstance(self.module, DistributedDataParallel) or isinstance( self.module, FullyShardedDataParallel ): raise RuntimeError( "module can't be set after calling init_data_parallel(...)" ) else: self._dmp_wrapped_module = value # pyre-ignore [2, 3] def forward(self, *args, **kwargs) -> Any: return self._dmp_wrapped_module(*args, **kwargs) def init_data_parallel(self) -> None: """ See init_data_parallel c-tor argument for usage. It's safe to call this method multiple times. """ if not self._ddp_wrapped: # Allocate any 'meta' tensors if self.init_parameters: self._init_parameters(self._dmp_wrapped_module) self._data_parallel_wrapper.wrap(self, self._env, self.device) self._ddp_wrapped = True def copy( self, device: torch.device, ) -> "DistributedModelParallel": """ Recursively copy submodules to new device by calling per-module customized copy process, since some modules needs to use the original references (like `ShardedModule` for inference). """ assert isinstance(device, torch.device) # dmp code deep copy with sharded_model_copy(device=None): copy_dmp = copy.deepcopy(self) # tensor resident module deep copy copy_dmp_wrapped_module = copy_to_device( self._dmp_wrapped_module, self.device, device ) copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module return copy_dmp def _init_dmp(self, module: nn.Module) -> nn.Module: return self._shard_modules_impl(module) def _init_optim(self, module: nn.Module) -> CombinedOptimizer: # pyre-ignore [6] return CombinedOptimizer(self._fused_optim_impl(module, [])) def _fused_optim_impl( self, module: nn.Module, fused_optims: List[Tuple[str, KeyedOptimizer]], path: str = "", ) -> List[Tuple[str, KeyedOptimizer]]: if isinstance(module, FusedOptimizerModule): fused_optims.append((path, module.fused_optimizer)) return fused_optims for name, child in module.named_children(): self._fused_optim_impl( child, fused_optims, path + "." + name if path else name, ) return fused_optims def _shard_modules_impl( self, module: nn.Module, path: str = "", ) -> nn.Module: # pre-sharded module if isinstance(module, ShardedModule): return module # shardable module module_sharding_plan = self._plan.get_plan_for_module(path) if module_sharding_plan: sharder_key = type(module) module = self._sharder_map[sharder_key].shard( module, module_sharding_plan, self._env, self.device, ) return module for name, child in module.named_children(): child = self._shard_modules_impl( child, path + "." + name if path else name, ) setattr(module, name, child) return module def _init_parameters(self, module: nn.Module) -> None: def init_parameters(module: nn.Module) -> None: # Allocate parameters and buffers if over 'meta' device. has_meta_param = False for name, param in module._parameters.items(): if isinstance(param, torch.Tensor) and param.device.type == "meta": module._parameters[name] = nn.Parameter( torch.empty_like(param, device=self.device), requires_grad=param.requires_grad, ) has_meta_param = True for name, buffer in module._buffers.items(): if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta": module._buffers[name] = torch.zeros_like(buffer, device=self.device) # Init parameters if at least one parameter is over 'meta' device. if has_meta_param and hasattr(module, "reset_parameters"): module.reset_parameters() module.apply(init_parameters) def sparse_grad_parameter_names( self, destination: Optional[List[str]] = None, prefix: str = "" ) -> List[str]: destination = [] if destination is None else destination return self._sparse_grad_parameter_names(self.module, destination, prefix) def _sparse_grad_parameter_names( self, module: nn.Module, destination: List[str], prefix: str = "" ) -> List[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): pass elif isinstance(module, nn.Embedding): if module.sparse: destination.append(append_prefix(prefix, "weight")) elif isinstance(module, nn.EmbeddingBag): if module.sparse: destination.append(append_prefix(prefix, "weight")) else: for name, child in module.named_children(): self._sparse_grad_parameter_names( child, destination, append_prefix(prefix, name) ) return destination # pyre-ignore [14] def state_dict( self, destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: state_dict = get_module(self).state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix + _DDP_STATE_DICT_PREFIX ) add_prefix_to_state_dict(state_dict, prefix) return state_dict # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict( self, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: return self._load_state_dict(self, state_dict, prefix, strict) def _load_state_dict( self, module: nn.Module, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: missing_keys = [] unexpected_keys = [] module = get_module(module) if isinstance(module, DistributedDataParallel): torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix ) add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX) if isinstance(module, ShardedModule): return module.load_state_dict(state_dict, strict=strict) else: module._load_from_state_dict( state_dict, prefix, {}, strict, missing_keys, unexpected_keys, [] ) for name, child in module.named_children(): m_keys, u_keys = self._load_state_dict( child, filter_state_dict(state_dict, prefix + name), "", strict, ) missing_keys.extend(m_keys) unexpected_keys.extend(u_keys) return _IncompatibleKeys( missing_keys=missing_keys, unexpected_keys=unexpected_keys ) def _named_parameters( self, module: nn.Module, prefix: str = "", recurse: bool = True, strip_ddp: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: if strip_ddp: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_parameters(prefix, recurse) else: yield from module.named_parameters(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_parameters( child, append_prefix(prefix, name), recurse, strip_ddp, ) def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def bare_named_parameters( self, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue memo.add(param) yield key, param def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.sharded_parameter_names(prefix) else: for name, child in module.named_children(): yield from DistributedModelParallel._sharded_parameter_names( child, append_prefix(prefix, name) ) def _named_buffers( self, module: nn.Module, prefix: str = "", recurse: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_buffers(prefix, recurse) else: yield from module.named_buffers(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_buffers( child, append_prefix(prefix, name), recurse ) def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: gen = self._named_buffers(self.module, prefix, recurse) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def fused_optimizer(self) -> KeyedOptimizer: return self._optim def plan(self) -> ShardingPlan: return self._plan def _reset_parameters(module: nn.Module) -> None: for _, m in module.named_modules(): if hasattr(m, "reset_parameters"): m.reset_parameters() class ParameterConstraints: """ Stores user provided constraints around the sharding plan. If provided, `pooling_factors`, `num_poolings`, and `batch_sizes` must match in length, as per sample. """ sharding_types: Optional[List[str]] = None compute_kernels: Optional[List[str]] = None min_partition: Optional[int] = None # CW sharding, min CW dim to shard pooling_factors: List[float] = field( default_factory=lambda: [POOLING_FACTOR] ) # average number of embedding lookups required per sample num_poolings: Optional[List[float]] = None # number of poolings per sample in batch batch_sizes: Optional[List[int]] = None # batch size per input feature is_weighted: bool = False cache_params: Optional[CacheParams] = None enforce_hbm: Optional[bool] = None stochastic_rounding: Optional[bool] = None bounds_check_mode: Optional[BoundsCheckMode] = None feature_names: Optional[List[str]] = None class ShardingType(Enum): """ Well-known sharding types, used by inter-module optimizations. """ # Replicated on all ranks DATA_PARALLEL = "data_parallel" # Placed on a single rank TABLE_WISE = "table_wise" # Placed on multiple ranks as different sharded tables COLUMN_WISE = "column_wise" # Range-split on the first dimension across all ranks ROW_WISE = "row_wise" # Row-wise on the same node and table-wise across nodes # Useful when having multiple ranks per node # and comms within a single node are more efficient than across nodes. TABLE_ROW_WISE = "table_row_wise" # Column-wise on the same node and table-wise across nodes TABLE_COLUMN_WISE = "table_column_wise" class ShardingEnv: """ Provides an abstraction over `torch.distributed.ProcessGroup`, which practically enables `DistributedModelParallel` to be used during inference. """ def __init__( self, world_size: int, rank: int, pg: Optional[dist.ProcessGroup] = None, ) -> None: self.world_size = world_size self.rank = rank self.process_group: Optional[dist.ProcessGroup] = pg def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv": """ Creates ProcessGroup-based sharding environment. NOTE: Typically used during training. """ return cls(dist.get_world_size(pg), dist.get_rank(pg), pg) def from_local(cls, world_size: int, rank: int) -> "ShardingEnv": """ Creates a local host-based sharding environment. NOTE: Typically used during single host inference. """ return cls(world_size, rank, None) class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM class EmbeddingBagCollection(EmbeddingBagCollectionInterface): """ EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B * (F * D)] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. device (Optional[torch.device]): default compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"] ) ebc = EmbeddingBagCollection(tables=[table_0, table_1]) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783], [ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011], [-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) tensor([0, 3, 7]) """ def __init__( self, tables: List[EmbeddingBagConfig], is_weighted: bool = False, device: Optional[torch.device] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}") self._is_weighted = is_weighted self.embedding_bags: nn.ModuleDict = nn.ModuleDict() self._embedding_bag_configs = tables self._lengths_per_embedding: List[int] = [] self._dtypes: List[int] = [] table_names = set() for embedding_config in tables: if embedding_config.name in table_names: raise ValueError(f"Duplicate table name {embedding_config.name}") table_names.add(embedding_config.name) dtype = ( torch.float32 if embedding_config.data_type == DataType.FP32 else torch.float16 ) self.embedding_bags[embedding_config.name] = nn.EmbeddingBag( num_embeddings=embedding_config.num_embeddings, embedding_dim=embedding_config.embedding_dim, mode=pooling_type_to_str(embedding_config.pooling), device=device, include_last_offset=True, dtype=dtype, ) if device is None: device = self.embedding_bags[embedding_config.name].weight.device self._dtypes.append(embedding_config.data_type.value) if not embedding_config.feature_names: embedding_config.feature_names = [embedding_config.name] self._lengths_per_embedding.extend( len(embedding_config.feature_names) * [embedding_config.embedding_dim] ) self._device: torch.device = device or torch.device("cpu") self._embedding_names: List[str] = [ embedding for embeddings in get_embedding_names_by_table(tables) for embedding in embeddings ] self._feature_names: List[List[str]] = [table.feature_names for table in tables] self.reset_parameters() def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ flat_feature_names: List[str] = [] for names in self._feature_names: flat_feature_names.extend(names) inverse_indices = reorder_inverse_indices( inverse_indices=features.inverse_indices_or_none(), feature_names=flat_feature_names, ) pooled_embeddings: List[torch.Tensor] = [] feature_dict = features.to_dict() for i, embedding_bag in enumerate(self.embedding_bags.values()): for feature_name in self._feature_names[i]: f = feature_dict[feature_name] per_sample_weights: Optional[torch.Tensor] = None if self._is_weighted: per_sample_weights = ( f.weights().half() if self._dtypes[i] == DataType.FP16.value else f.weights() ) res = embedding_bag( input=f.values(), offsets=f.offsets(), per_sample_weights=( per_sample_weights if self._is_weighted else None ), ).float() pooled_embeddings.append(res) return KeyedTensor( keys=self._embedding_names, values=process_pooled_embeddings( pooled_embeddings=pooled_embeddings, inverse_indices=inverse_indices, ), length_per_key=self._lengths_per_embedding, ) def is_weighted(self) -> bool: return self._is_weighted def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def device(self) -> torch.device: return self._device def reset_parameters(self) -> None: if (isinstance(self.device, torch.device) and self.device.type == "meta") or ( isinstance(self.device, str) and self.device == "meta" ): return # Initialize embedding bags weights with init_fn for table_config in self._embedding_bag_configs: assert table_config.init_fn is not None param = self.embedding_bags[f"{table_config.name}"].weight # pyre-ignore table_config.init_fn(param) # pyre-ignore class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta): """Represents an (optionally weighted) keyed jagged tensor. A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose slices may be of different lengths. Keyed on first dimension and jagged on the last dimension. Implementation is torch.jit.script-able. Args: keys (List[str]): keys to the jagged Tensor. values (torch.Tensor): values tensor in dense representation. weights (Optional[torch.Tensor]): if the values have weights. Tensor with the same shape as values. lengths (Optional[torch.Tensor]): jagged slices, represented as lengths. offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative offsets. stride (Optional[int]): number of examples per batch. stride_per_key_per_rank (Optional[List[List[int]]]): batch size (number of examples) per key per rank, with the outer list representing the keys and the inner list representing the values. Each value in the inner list represents the number of examples in the batch from the rank of its index in a distributed context. length_per_key (Optional[List[int]]): start length for each key. offset_per_key (Optional[List[int]]): start offset for each key and final offset. index_per_key (Optional[Dict[str, int]]): index for each key. jt_dict (Optional[Dict[str, JaggedTensor]]): inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to expand deduplicated embedding output for variable stride per key. Example:: # 0 1 2 <-- dim_1 # "Feature0" [V0,V1] None [V2] # "Feature1" [V3] [V4] [V5,V6,V7] # ^ # dim_0 dim_0: keyed dimension (ie. `Feature0`, `Feature1`) dim_1: optional second dimension (ie. batch size) dim_2: The jagged dimension which has slice lengths between 0-3 in the above example # We represent this data with following inputs: values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0 index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset """ # This is the subset of fields on KJT which are required (all other fields # can be derived from these fields, and are only cached) _fields = [ "_values", "_weights", "_lengths", "_offsets", ] def __init__( self, keys: List[str], values: torch.Tensor, weights: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, offsets: Optional[torch.Tensor] = None, stride: Optional[int] = None, stride_per_key_per_rank: Optional[List[List[int]]] = None, # Below exposed to ensure torch.script-able length_per_key: Optional[List[int]] = None, offset_per_key: Optional[List[int]] = None, index_per_key: Optional[Dict[str, int]] = None, jt_dict: Optional[Dict[str, JaggedTensor]] = None, inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None, ) -> None: self._keys: List[str] = keys self._values: torch.Tensor = values self._weights: Optional[torch.Tensor] = weights if offsets is not None: _assert_tensor_has_no_elements_or_has_integers(offsets, "offsets") if lengths is not None: _assert_tensor_has_no_elements_or_has_integers(lengths, "lengths") self._lengths: Optional[torch.Tensor] = lengths self._offsets: Optional[torch.Tensor] = offsets self._stride_per_key_per_rank: List[List[int]] = [] self._stride_per_key: List[int] = [] self._variable_stride_per_key: bool = False self._stride: int = -1 if stride_per_key_per_rank is not None: if stride is not None: raise ValueError( "Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`" ) self._stride_per_key_per_rank = stride_per_key_per_rank self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank] self._variable_stride_per_key = True if not stride_per_key_per_rank: self._stride = 0 elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()): self._stride = self.stride_per_key()[0] else: if torch.jit.is_tracing(): stride = _maybe_compute_stride_kjt_scripted( keys, stride, lengths, offsets )[0] else: stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets) self._stride = stride self._stride_per_key_per_rank = [[stride]] * len(self._keys) self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank] # lazy fields self._length_per_key: Optional[List[int]] = length_per_key self._offset_per_key: Optional[List[int]] = offset_per_key self._index_per_key: Optional[Dict[str, int]] = index_per_key self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = ( inverse_indices ) self._lengths_offset_per_key: List[int] = [] def from_offsets_sync( keys: List[str], values: torch.Tensor, offsets: torch.Tensor, weights: Optional[torch.Tensor] = None, stride: Optional[int] = None, stride_per_key_per_rank: Optional[List[List[int]]] = None, inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None, ) -> "KeyedJaggedTensor": kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, offsets=offsets, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, inverse_indices=inverse_indices, ) return kjt.sync() def from_lengths_sync( keys: List[str], values: torch.Tensor, lengths: torch.Tensor, weights: Optional[torch.Tensor] = None, stride: Optional[int] = None, stride_per_key_per_rank: Optional[List[List[int]]] = None, inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None, ) -> "KeyedJaggedTensor": kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, lengths=lengths, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, inverse_indices=inverse_indices, ) return kjt.sync() def concat( kjt_list: List["KeyedJaggedTensor"], ) -> "KeyedJaggedTensor": if len(kjt_list) == 0: raise ValueError("Can't concat empty KJT list") is_weighted: bool = kjt_list[0].weights_or_none() is not None has_length_per_key: bool = True length_per_key: List[int] = [] keys: List[str] = [] value_list: List[torch.Tensor] = [] weight_list: List[torch.Tensor] = [] length_list: List[torch.Tensor] = [] stride_per_key_per_rank: List[List[int]] = [] stride: Optional[int] = None variable_stride_per_key_list = [ kjt.variable_stride_per_key() for kjt in kjt_list ] assert all(variable_stride_per_key_list) or not any( variable_stride_per_key_list ), "variable stride per key must be consistent for all KJTs" variable_stride_per_key = all(variable_stride_per_key_list) for kjt in kjt_list: curr_is_weighted: bool = kjt.weights_or_none() is not None if is_weighted != curr_is_weighted: raise ValueError("Can't merge weighted KJT with unweighted KJT") _length_per_key: Optional[List[int]] = None if kjt._length_per_key is None: has_length_per_key = False else: _length_per_key = kjt._length_per_key if has_length_per_key and _length_per_key is not None: length_per_key += _length_per_key keys += kjt.keys() value_list.append(kjt.values()) if is_weighted: weight_list.append(kjt.weights()) length_list.append(kjt.lengths()) if variable_stride_per_key: stride_per_key_per_rank += kjt.stride_per_key_per_rank() elif stride is None: stride = kjt.stride() else: assert stride == kjt.stride(), "strides must be consistent for all KJTs" return KeyedJaggedTensor( keys=keys, values=torch.cat(value_list, dim=0), weights=torch.cat(weight_list, dim=0) if is_weighted else None, lengths=torch.cat(length_list, dim=0), stride=stride, stride_per_key_per_rank=( stride_per_key_per_rank if variable_stride_per_key else None ), length_per_key=length_per_key if has_length_per_key else None, ) def empty( is_weighted: bool = False, device: Optional[torch.device] = None, values_dtype: Optional[torch.dtype] = None, weights_dtype: Optional[torch.dtype] = None, lengths_dtype: torch.dtype = torch.int32, ) -> "KeyedJaggedTensor": weights = ( torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None ) return KeyedJaggedTensor( keys=torch.jit.annotate(List[str], []), values=torch.empty(0, dtype=values_dtype, device=device), weights=weights, lengths=torch.empty(0, dtype=lengths_dtype, device=device), stride=0, ) def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor": stride, stride_per_key_per_rank = ( (None, kjt.stride_per_key_per_rank()) if kjt.variable_stride_per_key() else (kjt.stride(), None) ) return KeyedJaggedTensor( keys=[], values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype), weights=( None if kjt.weights_or_none() is None else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype) ), lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype), stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, ) def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor": """ Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor], but this function will ONLY work if the JaggedTensors all have the same "implicit" batch_size dimension. Basically, we can visualize JaggedTensors as 2-D tensors of the format of [batch_size x variable_feature_dim]. In case, we have some batch without a feature value, the input JaggedTensor could just not include any values. But KeyedJaggedTensor (by default) typically pad "None" so that all the JaggedTensors stored in the KeyedJaggedTensor have the same batch_size dimension. That is, in the case, the JaggedTensor input didn't automatically pad for the empty batches, this function would error / not work. Consider the visualization of the following KeyedJaggedTensor: # 0 1 2 <-- dim_1 # "Feature0" [V0,V1] None [V2] # "Feature1" [V3] [V4] [V5,V6,V7] # ^ # dim_0 Notice that the inputs for this KeyedJaggedTensor would have looked like: values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0 index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset Now if the input jt_dict = { # "Feature0" [V0,V1] [V2] # "Feature1" [V3] [V4] [V5,V6,V7] } and the "None" is left out from each JaggedTensor, then this function would fail as we would not correctly be able to pad "None" as it does not technically know the correct batch / place to pad within the JaggedTensor. Essentially, the lengths Tensor inferred by this function would be [2, 1, 1, 1, 3] indicating variable batch_size dim_1 violates the existing assumption / precondition that KeyedJaggedTensor's should have fixed batch_size dimension. """ kjt_keys = list(jt_dict.keys()) kjt_vals_list: List[torch.Tensor] = [] kjt_lens_list: List[torch.Tensor] = [] kjt_weights_list: List[torch.Tensor] = [] stride_per_key: List[int] = [] for jt in jt_dict.values(): stride_per_key.append(len(jt.lengths())) kjt_vals_list.append(jt.values()) kjt_lens_list.append(jt.lengths()) weight = jt.weights_or_none() if weight is not None: kjt_weights_list.append(weight) kjt_vals = torch.concat(kjt_vals_list) kjt_lens = torch.concat(kjt_lens_list) kjt_weights = ( torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None ) kjt_stride, kjt_stride_per_key_per_rank = ( (stride_per_key[0], None) if all(s == stride_per_key[0] for s in stride_per_key) else (None, [[stride] for stride in stride_per_key]) ) kjt = KeyedJaggedTensor( keys=kjt_keys, values=kjt_vals, weights=kjt_weights, lengths=kjt_lens, stride=kjt_stride, stride_per_key_per_rank=kjt_stride_per_key_per_rank, ).sync() return kjt def sync(self) -> "KeyedJaggedTensor": self.length_per_key() self.offset_per_key() return self def unsync(self) -> "KeyedJaggedTensor": self._length_per_key = None self._offset_per_key = None return self def device(self) -> torch.device: return self._values.device def lengths(self) -> torch.Tensor: _lengths = _maybe_compute_lengths(self._lengths, self._offsets) self._lengths = _lengths return _lengths def lengths_or_none(self) -> Optional[torch.Tensor]: return self._lengths def offsets(self) -> torch.Tensor: _offsets = _maybe_compute_offsets(self._lengths, self._offsets) self._offsets = _offsets return _offsets def offsets_or_none(self) -> Optional[torch.Tensor]: return self._offsets def keys(self) -> List[str]: return self._keys def values(self) -> torch.Tensor: return self._values def weights(self) -> torch.Tensor: return _get_weights_or_throw(self._weights) def weights_or_none(self) -> Optional[torch.Tensor]: return self._weights def stride(self) -> int: return self._stride def stride_per_key(self) -> List[int]: return self._stride_per_key def stride_per_key_per_rank(self) -> List[List[int]]: return self._stride_per_key_per_rank def variable_stride_per_key(self) -> bool: return self._variable_stride_per_key def inverse_indices(self) -> Tuple[List[str], torch.Tensor]: return _get_inverse_indices_or_throw(self._inverse_indices) def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]: return self._inverse_indices def _key_indices(self) -> Dict[str, int]: _index_per_key: Dict[str, int] = _maybe_compute_index_per_key( self._keys, self._index_per_key, ) self._index_per_key = _index_per_key return _index_per_key def length_per_key(self) -> List[int]: _length_per_key = _maybe_compute_length_per_key( keys=self._keys, stride=self.stride(), stride_per_key=self.stride_per_key(), variable_stride_per_key=self.variable_stride_per_key(), length_per_key=self._length_per_key, lengths=self._lengths, offsets=self._offsets, values=self._values, ) self._length_per_key = _length_per_key return _length_per_key def length_per_key_or_none(self) -> Optional[List[int]]: return self._length_per_key def offset_per_key(self) -> List[int]: _length_per_key, _offset_per_key = _maybe_compute_offset_per_key( keys=self._keys, stride=self.stride(), stride_per_key=self.stride_per_key(), variable_stride_per_key=self.variable_stride_per_key(), length_per_key=self._length_per_key, offset_per_key=self._offset_per_key, lengths=self._lengths, offsets=self._offsets, values=self._values, ) self._length_per_key = _length_per_key self._offset_per_key = _offset_per_key return _offset_per_key def offset_per_key_or_none(self) -> Optional[List[int]]: return self._offset_per_key def lengths_offset_per_key(self) -> List[int]: if not self._lengths_offset_per_key: self._lengths_offset_per_key = _cumsum(self.stride_per_key()) return self._lengths_offset_per_key def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]: split_list: List[KeyedJaggedTensor] = [] start = 0 start_offset = 0 _length_per_key = self.length_per_key() _offset_per_key = self.offset_per_key() for segment in segments: end = start + segment end_offset = _offset_per_key[end] keys: List[str] = self._keys[start:end] stride, stride_per_key_per_rank = ( (None, self.stride_per_key_per_rank()[start:end]) if self.variable_stride_per_key() else (self._stride, None) ) if segment == len(self._keys): # no torch slicing required split_list.append( KeyedJaggedTensor( keys=self._keys, values=self._values, weights=self.weights_or_none(), lengths=self._lengths, offsets=self._offsets, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=self._length_per_key, offset_per_key=self._offset_per_key, index_per_key=self._index_per_key, jt_dict=self._jt_dict, inverse_indices=None, ) ) elif segment == 0: empty_int_list: List[int] = torch.jit.annotate(List[int], []) split_list.append( KeyedJaggedTensor( keys=keys, values=torch.tensor( empty_int_list, device=self.device(), dtype=self._values.dtype, ), weights=( None if self.weights_or_none() is None else torch.tensor( empty_int_list, device=self.device(), dtype=self.weights().dtype, ) ), lengths=torch.tensor( empty_int_list, device=self.device(), dtype=torch.int ), offsets=torch.tensor( empty_int_list, device=self.device(), dtype=torch.int ), stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=None, offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=None, ) ) else: split_length_per_key = _length_per_key[start:end] if not torch.jit.is_scripting() and is_torchdynamo_compiling(): # Checks for dynamo dynamic shapes tracing torch._check_is_size(start_offset) torch._check_is_size(end_offset) torch._check_is_size(end_offset - start_offset) torch._check(start_offset <= self._values.size(0)) torch._check(end_offset <= self._values.size(0)) torch._check(end_offset >= start_offset) split_list.append( KeyedJaggedTensor( keys=keys, values=self._values[start_offset:end_offset], weights=( None if self.weights_or_none() is None else self.weights()[start_offset:end_offset] ), lengths=self.lengths()[ self.lengths_offset_per_key()[ start ] : self.lengths_offset_per_key()[end] ], offsets=None, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=split_length_per_key, offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=None, ) ) start = end start_offset = end_offset return split_list def permute( self, indices: List[int], indices_tensor: Optional[torch.Tensor] = None, include_inverse_indices: bool = False, ) -> "KeyedJaggedTensor": if indices_tensor is None: indices_tensor = torch.tensor( indices, dtype=torch.int, device=self.device() ) length_per_key = self.length_per_key() permuted_keys: List[str] = [] permuted_stride_per_key_per_rank: List[List[int]] = [] permuted_length_per_key: List[int] = [] permuted_lengths_sum = 0 for index in indices: key = self.keys()[index] permuted_keys.append(key) permuted_stride_per_key_per_rank.append( self.stride_per_key_per_rank()[index] ) permuted_length_per_key.append(length_per_key[index]) permuted_lengths_sum += length_per_key[index] if self.variable_stride_per_key(): length_per_key_tensor = _pin_and_move( torch.tensor(self.length_per_key()), self.device() ) stride_per_key_tensor = _pin_and_move( torch.tensor(self.stride_per_key()), self.device() ) permuted_lengths, _ = _permute_tensor_by_segments( self.lengths(), stride_per_key_tensor, indices_tensor, None, ) permuted_values, permuted_weights = _permute_tensor_by_segments( self.values(), length_per_key_tensor, indices_tensor, self.weights_or_none(), ) else: ( permuted_lengths, permuted_values, permuted_weights, ) = torch.ops.fbgemm.permute_2D_sparse_data( indices_tensor, self.lengths().view(len(self._keys), -1), self.values(), self.weights_or_none(), permuted_lengths_sum, ) stride, optional_permuted_stride_per_key_per_rank = ( (None, permuted_stride_per_key_per_rank) if self.variable_stride_per_key() else (self._stride, None) ) kjt = KeyedJaggedTensor( keys=permuted_keys, values=permuted_values, weights=permuted_weights, lengths=permuted_lengths.view(-1), offsets=None, stride=stride, stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank, length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None, offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=( self.inverse_indices_or_none() if include_inverse_indices else None ), ) return kjt def flatten_lengths(self) -> "KeyedJaggedTensor": stride, stride_per_key_per_rank = ( (None, self.stride_per_key_per_rank()) if self.variable_stride_per_key() else (self._stride, None) ) return KeyedJaggedTensor( keys=self._keys, values=self._values, weights=self._weights, lengths=self.lengths().view(-1), offsets=None, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=self.length_per_key(), offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=None, ) def __getitem__(self, key: str) -> JaggedTensor: offset_per_key = self.offset_per_key() index = self._key_indices()[key] start_offset = offset_per_key[index] end_offset = ( offset_per_key[index + 1] if index + 1 < len(offset_per_key) else start_offset ) return JaggedTensor( values=self._values[start_offset:end_offset], weights=( None if self.weights_or_none() is None else self.weights()[start_offset:end_offset] ), lengths=self.lengths()[ self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[ index + 1 ] ], offsets=None, ) def to_dict(self) -> Dict[str, JaggedTensor]: _jt_dict = _maybe_compute_kjt_to_jt_dict( stride=self.stride(), stride_per_key=self.stride_per_key(), keys=self.keys(), length_per_key=self.length_per_key(), lengths=self.lengths(), values=self.values(), variable_stride_per_key=self.variable_stride_per_key(), weights=self.weights_or_none(), jt_dict=self._jt_dict, ) self._jt_dict = _jt_dict return _jt_dict def record_stream(self, stream: torch.cuda.streams.Stream) -> None: self._values.record_stream(stream) weights = self._weights lengths = self._lengths offsets = self._offsets if weights is not None: weights.record_stream(stream) if lengths is not None: lengths.record_stream(stream) if offsets is not None: offsets.record_stream(stream) def to( self, device: torch.device, non_blocking: bool = False, dtype: Optional[torch.dtype] = None, ) -> "KeyedJaggedTensor": weights = self._weights lengths = self._lengths offsets = self._offsets stride, stride_per_key_per_rank = ( (None, self._stride_per_key_per_rank) if self.variable_stride_per_key() else (self._stride, None) ) length_per_key = self._length_per_key offset_per_key = self._offset_per_key index_per_key = self._index_per_key jt_dict = self._jt_dict inverse_indices = self._inverse_indices if inverse_indices is not None: inverse_indices = ( inverse_indices[0], inverse_indices[1].to(device, non_blocking=non_blocking), ) if weights is not None: if dtype is not None: weights = weights.to( dtype=dtype, device=device, non_blocking=non_blocking ) else: weights = weights.to(device=device, non_blocking=non_blocking) return KeyedJaggedTensor( keys=self._keys, values=self._values.to(device, non_blocking=non_blocking), weights=weights, lengths=( lengths.to(device, non_blocking=non_blocking) if lengths is not None else None ), offsets=( offsets.to(device, non_blocking=non_blocking) if offsets is not None else None ), stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=length_per_key, offset_per_key=offset_per_key, index_per_key=index_per_key, jt_dict=jt_dict, inverse_indices=inverse_indices, ) def __str__(self) -> str: if len(self._keys) == 0 or self._offsets is None and self._lengths is None: return "KeyedJaggedTensor()\n" offsets = self.offsets() return ( "KeyedJaggedTensor({\n" + ",\n".join( [ " " + _jagged_tensor_string( self._keys[index], self._values, self._weights, offsets, sum(self.stride_per_key()[:index]), sum(self.stride_per_key()[: index + 1]), ) for index in range(len(self._keys)) ] ) + "\n})\n" ) def pin_memory(self) -> "KeyedJaggedTensor": weights = self._weights lengths = self._lengths offsets = self._offsets stride, stride_per_key_per_rank = ( (None, self._stride_per_key_per_rank) if self.variable_stride_per_key() else (self._stride, None) ) inverse_indices = self._inverse_indices if inverse_indices is not None: inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory()) return KeyedJaggedTensor( keys=self._keys, values=self._values.pin_memory(), weights=weights.pin_memory() if weights is not None else None, lengths=lengths.pin_memory() if lengths is not None else None, offsets=offsets.pin_memory() if offsets is not None else None, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=self._length_per_key, offset_per_key=self._offset_per_key, index_per_key=self._index_per_key, jt_dict=None, inverse_indices=inverse_indices, ) def dist_labels(self) -> List[str]: labels = ["lengths", "values"] if self.variable_stride_per_key(): labels.append("strides") if self.weights_or_none() is not None: labels.append("weights") return labels def dist_splits(self, key_splits: List[int]) -> List[List[int]]: batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits) length_per_split = _sum_by_splits(self.length_per_key(), key_splits) splits = [batch_size_per_split, length_per_split] if self.variable_stride_per_key(): splits.append(key_splits) if self.weights_or_none() is not None: splits.append(length_per_split) return splits def dist_tensors(self) -> List[torch.Tensor]: tensors = [self.lengths(), self.values()] if self.variable_stride_per_key(): strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device()) tensors.append(strides) if self.weights_or_none() is not None: tensors.append(self.weights()) return tensors def dist_init( keys: List[str], tensors: List[torch.Tensor], variable_stride_per_key: bool, num_workers: int, recat: Optional[torch.Tensor], stride_per_rank: Optional[List[int]], stagger: int = 1, ) -> "KeyedJaggedTensor": assert len(tensors) in [2, 3, 4] lengths = tensors[0] values = tensors[1] stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None weights = ( tensors[-1] if (variable_stride_per_key and len(tensors) == 4) or (not variable_stride_per_key and len(tensors) == 3) else None ) if variable_stride_per_key: assert stride_per_rank_per_key is not None stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view( num_workers, len(keys) ).T.tolist() strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum( stride_per_rank_per_key ).tolist() cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) length_per_key = ( cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]] ) with record_function("## all2all_data:recat_values ##"): if recat is not None and recat.numel() > 0: lengths, _ = _permute_tensor_by_segments( lengths, stride_per_rank_per_key, recat, None, ) values, weights = _permute_tensor_by_segments( values, length_per_key, recat, weights, ) if not stride_per_key_per_rank: stride_per_key_per_rank = [[0]] * len(keys) if stagger > 1: stride_per_key_per_rank_stagger: List[List[int]] = [] local_world_size = num_workers // stagger for i in range(len(keys)): stride_per_rank_stagger: List[int] = [] for j in range(local_world_size): stride_per_rank_stagger.extend( stride_per_key_per_rank[i][j::local_world_size] ) stride_per_key_per_rank_stagger.append(stride_per_rank_stagger) stride_per_key_per_rank = stride_per_key_per_rank_stagger kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, lengths=lengths, stride_per_key_per_rank=stride_per_key_per_rank, ) return kjt.sync() else: assert stride_per_rank is not None with record_function("## all2all_data:recat_values ##"): if recat is not None and recat.numel() > 0: stride = stride_per_rank[0] if all(s == stride for s in stride_per_rank): ( lengths, values, weights, ) = torch.ops.fbgemm.permute_2D_sparse_data( recat, lengths.view(-1, stride), values, weights, values.numel(), ) lengths = lengths.view(-1) else: # variable batch size per rank ( lengths, values, weights, ) = torch.ops.fbgemm.permute_1D_sparse_data( recat, lengths.view(-1), values, weights, values.numel(), ) kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, lengths=lengths, stride=sum(stride_per_rank), ) return kjt.sync() def get_index( embedding_dim: int, num_centroids: int, num_probe: int, num_subquantizers: int, bits_per_code: int, device: Optional[torch.device] = None, # pyre-ignore[11] ) -> Union[faiss.GpuIndexIVFPQ, faiss.IndexIVFPQ]: """ returns a FAISS IVFPQ index, placed on the device passed in Args: embedding_dim (int): indexed embedding dimension, num_centroids (int): the number of centroids (Voronoi cells), num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done., num_subquantizers (int): the number of subquanitizers in Product Quantization (PQ) compression of subvectors, bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ), Example:: get_index() """ if device is not None and device.type == "cuda": # pyre-fixme[16] res = faiss.StandardGpuResources() # pyre-fixme[16] config = faiss.GpuIndexIVFPQConfig() # pyre-ignore[16] index = faiss.GpuIndexIVFPQ( res, embedding_dim, num_centroids, num_subquantizers, bits_per_code, # pyre-fixme[16] faiss.METRIC_L2, config, ) else: # pyre-fixme[16] quantizer = faiss.IndexFlatL2(embedding_dim) # pyre-fixme[16] index = faiss.IndexIVFPQ( quantizer, embedding_dim, num_centroids, num_subquantizers, bits_per_code, ) index.nprobe = num_probe return index class TwoTowerRetrieval(nn.Module): """ Simple TwoTower (UV) Model. Embeds two different entities (query and candidate) into the same space. Similar to the TwoTower model above, but is meant for retrieval. Specifically, this module also contiains a FAISS index, used to KNN search the K closest entities of tower 2. It separates query and candidate into separate EmbeddingBagCollections Args: faiss_index (Union[faiss.GpuIndexIVFPQ, faiss.IndexIVFPQ]): faiss index to search candidate query_ebc (EmbeddingBagCollection): embedding_bag_collection with one EmbeddingBag candidate_ebc (EmbeddingBagCollection): embedding_bag_collection with one EmbeddingBag layer_sizes (List[int]): list of the layer_sizes for the MLP k (int): number of tower 2 nearest neighbors to score device (Optional[torch.device]) Example:: m = TwoTowerRetrieval(index, query_ebc, candidate_ebc, [16, 8], 100, device) """ def __init__( self, # pyre-ignore[11] faiss_index: Union[faiss.GpuIndexIVFPQ, faiss.IndexIVFPQ], query_ebc: EmbeddingBagCollection, candidate_ebc: EmbeddingBagCollection, layer_sizes: List[int], k: int, device: Optional[torch.device] = None, dtype: torch.dtype = torch.float32, ) -> None: super().__init__() self.embedding_dim: int = query_ebc.embedding_bag_configs()[0].embedding_dim assert ( candidate_ebc.embedding_bag_configs()[0].embedding_dim == self.embedding_dim ), "Both EmbeddingBagCollections must have the same dimension" self.candidate_feature_names: List[str] = candidate_ebc.embedding_bag_configs()[ 0 ].feature_names self.query_ebc = query_ebc self.candidate_ebc = candidate_ebc self.query_proj = MLP( in_size=self.embedding_dim, layer_sizes=layer_sizes, device=device, dtype=dtype, ) self.candidate_proj = MLP( in_size=self.embedding_dim, layer_sizes=layer_sizes, device=device, dtype=dtype, ) self.faiss_index: Union[faiss.GpuIndexIVFPQ, faiss.IndexIVFPQ] = faiss_index self.k = k self.device = device def forward(self, query_kjt: KeyedJaggedTensor) -> torch.Tensor: """ Args: query_kjt (KeyedJaggedTensor): KJT containing query_ids to query Returns: torch.Tensor: logits """ batch_size = query_kjt.stride() # tower 1 lookup query_embedding = self.query_proj(self.query_ebc(query_kjt).values()) # KNN lookup distances = torch.empty((batch_size, self.k), device=self.device) candidates = torch.empty( (batch_size, self.k), device=self.device, dtype=torch.int64 ) query_embedding = query_embedding.to(torch.float32) # required by faiss self.faiss_index.search(query_embedding, self.k, distances, candidates) # candidate lookup candidate_kjt = KeyedJaggedTensor( keys=self.candidate_feature_names, values=candidates.reshape(-1), lengths=torch.tensor([self.k] * batch_size), ) candidate_embedding = self.candidate_proj( self.candidate_ebc(candidate_kjt).values() ) # return logit (dot product) return (query_embedding * candidate_embedding).sum(dim=1).squeeze() # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool) -> None: super().load_state_dict(state_dict, strict) The provided code snippet includes necessary dependencies for implementing the `infer` function. Write a Python function `def infer( num_embeddings: int = 1024 * 1024, embedding_dim: int = 64, layer_sizes: Optional[List[int]] = None, num_centroids: int = 100, k: int = 100, num_subquantizers: int = 8, bits_per_code: int = 8, num_probe: int = 8, model_device_idx: int = 0, faiss_device_idx: int = 0, batch_size: int = 32, load_dir: Optional[str] = None, world_size: int = 2, ) -> None` to solve the following problem: Loads the serialized model and FAISS index from `two_tower_train.py`. A `TwoTowerRetrieval` model is instantiated, which wraps the `KNNIndex`, the query (user) tower and the candidate item (movie) tower inside an `nn.Module`. The retreival model is quantized using [`torchrec.quant`](https://pytorch.org/torchrec/torchrec.quant.html). The serialized `TwoTower` model weights trained before are converted into `TwoTowerRetrieval` which are loaded into the retrieval model. The seralized trained FAISS index is also loaded. The entire retreival model can be queried with a batch of candidate (user) ids and returns logits which can be used in ranking. Args: num_embeddings (int): The number of embeddings the embedding table embedding_dim (int): embedding dimension of both embedding tables layer_sizes (str): Comma separated list representing layer sizes of the MLP. Last size is the final embedding size num_centroids (int): The number of centroids (Voronoi cells) k (int): The number of nearest neighbors to retrieve num_subquantizers (int): The number of subquanitizers in Product Quantization (PQ) compression of subvectors bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ) num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done. model_device_idx (int): device index to place model on faiss_device_idx (int): device index to place FAISS index on batch_size (int): batch_size of the random batch used to query Retrieval model at the end of the script load_dir (Optional[str]): Directory to load model and faiss index from. If None, uses random data Here is the function: def infer( num_embeddings: int = 1024 * 1024, embedding_dim: int = 64, layer_sizes: Optional[List[int]] = None, num_centroids: int = 100, k: int = 100, num_subquantizers: int = 8, bits_per_code: int = 8, num_probe: int = 8, model_device_idx: int = 0, faiss_device_idx: int = 0, batch_size: int = 32, load_dir: Optional[str] = None, world_size: int = 2, ) -> None: """ Loads the serialized model and FAISS index from `two_tower_train.py`. A `TwoTowerRetrieval` model is instantiated, which wraps the `KNNIndex`, the query (user) tower and the candidate item (movie) tower inside an `nn.Module`. The retreival model is quantized using [`torchrec.quant`](https://pytorch.org/torchrec/torchrec.quant.html). The serialized `TwoTower` model weights trained before are converted into `TwoTowerRetrieval` which are loaded into the retrieval model. The seralized trained FAISS index is also loaded. The entire retreival model can be queried with a batch of candidate (user) ids and returns logits which can be used in ranking. Args: num_embeddings (int): The number of embeddings the embedding table embedding_dim (int): embedding dimension of both embedding tables layer_sizes (str): Comma separated list representing layer sizes of the MLP. Last size is the final embedding size num_centroids (int): The number of centroids (Voronoi cells) k (int): The number of nearest neighbors to retrieve num_subquantizers (int): The number of subquanitizers in Product Quantization (PQ) compression of subvectors bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ) num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done. model_device_idx (int): device index to place model on faiss_device_idx (int): device index to place FAISS index on batch_size (int): batch_size of the random batch used to query Retrieval model at the end of the script load_dir (Optional[str]): Directory to load model and faiss index from. If None, uses random data """ if layer_sizes is None: layer_sizes = [128, 64] assert torch.cuda.is_available(), "This example requires a GPU" device: torch.device = torch.device(f"cuda:{model_device_idx}") torch.cuda.set_device(device) two_tower_column_names = DEFAULT_RATINGS_COLUMN_NAMES[:2] ebcs = [] for feature_name in two_tower_column_names: config = EmbeddingBagConfig( name=f"t_{feature_name}", embedding_dim=embedding_dim, num_embeddings=num_embeddings, feature_names=[feature_name], data_type=DataType.FP16, ) ebcs.append( EmbeddingBagCollection( tables=[config], device=torch.device("meta"), ) ) retrieval_sd = None if load_dir is not None: load_dir = load_dir.rstrip("/") # pyre-ignore[16] index = faiss.index_cpu_to_gpu( # pyre-ignore[16] faiss.StandardGpuResources(), faiss_device_idx, # pyre-ignore[16] faiss.read_index(f"{load_dir}/faiss.index"), ) two_tower_sd = torch.load(f"{load_dir}/model.pt") retrieval_sd = convert_TwoTower_to_TwoTowerRetrieval( two_tower_sd, [f"t_{two_tower_column_names[0]}"], [f"t_{two_tower_column_names[1]}"], ) else: embeddings = torch.rand((num_embeddings, embedding_dim)).to( torch.device(f"cuda:{faiss_device_idx}") ) index = get_index( embedding_dim=embedding_dim, num_centroids=num_centroids, num_probe=num_probe, num_subquantizers=num_subquantizers, bits_per_code=bits_per_code, device=torch.device(f"cuda:{faiss_device_idx}"), ) index.train(embeddings) index.add(embeddings) retrieval_model = TwoTowerRetrieval( index, ebcs[0], ebcs[1], layer_sizes, k, device, dtype=torch.float16 ) constraints = {} for feature_name in two_tower_column_names: constraints[f"t_{feature_name}"] = ParameterConstraints( sharding_types=[ShardingType.TABLE_WISE.value], compute_kernels=[EmbeddingComputeKernel.QUANT.value], ) quant_model = trec_infer.modules.quantize_embeddings( retrieval_model, dtype=torch.qint8, inplace=True, output_dtype=torch.float16, ) dmp = DistributedModelParallel( module=quant_model, device=device, env=ShardingEnv.from_local(world_size=world_size, rank=model_device_idx), init_data_parallel=False, ) if retrieval_sd is not None: dmp.load_state_dict(retrieval_sd) # query with random batch values = torch.randint(0, num_embeddings, (batch_size,), device=device) batch = KeyedJaggedTensor( keys=[two_tower_column_names[0]], values=values, lengths=torch.tensor([1] * batch_size, device=device), ) dmp(batch)
Loads the serialized model and FAISS index from `two_tower_train.py`. A `TwoTowerRetrieval` model is instantiated, which wraps the `KNNIndex`, the query (user) tower and the candidate item (movie) tower inside an `nn.Module`. The retreival model is quantized using [`torchrec.quant`](https://pytorch.org/torchrec/torchrec.quant.html). The serialized `TwoTower` model weights trained before are converted into `TwoTowerRetrieval` which are loaded into the retrieval model. The seralized trained FAISS index is also loaded. The entire retreival model can be queried with a batch of candidate (user) ids and returns logits which can be used in ranking. Args: num_embeddings (int): The number of embeddings the embedding table embedding_dim (int): embedding dimension of both embedding tables layer_sizes (str): Comma separated list representing layer sizes of the MLP. Last size is the final embedding size num_centroids (int): The number of centroids (Voronoi cells) k (int): The number of nearest neighbors to retrieve num_subquantizers (int): The number of subquanitizers in Product Quantization (PQ) compression of subvectors bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ) num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done. model_device_idx (int): device index to place model on faiss_device_idx (int): device index to place FAISS index on batch_size (int): batch_size of the random batch used to query Retrieval model at the end of the script load_dir (Optional[str]): Directory to load model and faiss index from. If None, uses random data
8,855
import os from typing import List, Optional import click import faiss import faiss.contrib.torch_utils import torch from torch import distributed as dist from torch.distributed.optim import ( _apply_optimizer_in_backward as apply_optimizer_in_backward, ) from torchrec import inference as trec_infer from torchrec.datasets.movielens import DEFAULT_RATINGS_COLUMN_NAMES from torchrec.distributed import TrainPipelineSparseDist from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.inference.state_dict_transform import ( state_dict_gather, state_dict_to_device, ) from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.optim.keyed import KeyedOptimizerWrapper from torchrec.optim.rowwise_adagrad import RowWiseAdagrad from torchrec.sparse.jagged_tensor import KeyedJaggedTensor DEFAULT_RATINGS_COLUMN_NAMES: List[str] = ["userId", "movieId", "rating", "timestamp"] class DistributedModelParallel(nn.Module, FusedOptimizerModule): """ Entry point to model parallelism. Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `EmbeddingBagCollectionSharder()`. init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay parameter initialization until the first forward pass. Pass `True` to delay initialization of data parallel modules. Do first forward pass and then call DistributedModelParallel.init_data_parallel(). init_parameters (bool): initialize parameters for modules still on meta device. data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data parallel modules. Example:: def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = DistributedModelParallel(m) m.apply(init_weights) """ def __init__( self, module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_data_parallel: bool = True, init_parameters: bool = True, data_parallel_wrapper: Optional[DataParallelWrapper] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}") self.init_parameters = init_parameters self._ddp_wrapped: bool = False if env is None: pg = dist.GroupMember.WORLD assert pg is not None, "Process group is not initialized" env = ShardingEnv.from_process_group(pg) self._env: ShardingEnv = env if device is None: device = torch.device("cpu") self.device: torch.device = device if sharders is None: sharders = get_default_sharders() self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = { sharder.module_type: sharder for sharder in sharders } if data_parallel_wrapper is None: data_parallel_wrapper = DefaultDataParallelWrapper() self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper if plan is None: planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=get_local_size(self._env.world_size), world_size=self._env.world_size, compute_device=self.device.type, ) ) pg = self._env.process_group if pg is not None: plan = planner.collective_plan(module, sharders, pg) else: plan = planner.plan(module, sharders) self._plan: ShardingPlan = plan self._dmp_wrapped_module: nn.Module = self._init_dmp(module) self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module) if init_parameters: self._init_parameters(self.module) if init_data_parallel: self.init_data_parallel() def module(self) -> nn.Module: """ Property to directly access sharded module, which will not be wrapped in DDP, FSDP, DMP, or any other parallelism wrappers. """ return get_unwrapped_module(self) def module(self, value: nn.Module) -> None: if isinstance(self.module, DistributedDataParallel) or isinstance( self.module, FullyShardedDataParallel ): raise RuntimeError( "module can't be set after calling init_data_parallel(...)" ) else: self._dmp_wrapped_module = value # pyre-ignore [2, 3] def forward(self, *args, **kwargs) -> Any: return self._dmp_wrapped_module(*args, **kwargs) def init_data_parallel(self) -> None: """ See init_data_parallel c-tor argument for usage. It's safe to call this method multiple times. """ if not self._ddp_wrapped: # Allocate any 'meta' tensors if self.init_parameters: self._init_parameters(self._dmp_wrapped_module) self._data_parallel_wrapper.wrap(self, self._env, self.device) self._ddp_wrapped = True def copy( self, device: torch.device, ) -> "DistributedModelParallel": """ Recursively copy submodules to new device by calling per-module customized copy process, since some modules needs to use the original references (like `ShardedModule` for inference). """ assert isinstance(device, torch.device) # dmp code deep copy with sharded_model_copy(device=None): copy_dmp = copy.deepcopy(self) # tensor resident module deep copy copy_dmp_wrapped_module = copy_to_device( self._dmp_wrapped_module, self.device, device ) copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module return copy_dmp def _init_dmp(self, module: nn.Module) -> nn.Module: return self._shard_modules_impl(module) def _init_optim(self, module: nn.Module) -> CombinedOptimizer: # pyre-ignore [6] return CombinedOptimizer(self._fused_optim_impl(module, [])) def _fused_optim_impl( self, module: nn.Module, fused_optims: List[Tuple[str, KeyedOptimizer]], path: str = "", ) -> List[Tuple[str, KeyedOptimizer]]: if isinstance(module, FusedOptimizerModule): fused_optims.append((path, module.fused_optimizer)) return fused_optims for name, child in module.named_children(): self._fused_optim_impl( child, fused_optims, path + "." + name if path else name, ) return fused_optims def _shard_modules_impl( self, module: nn.Module, path: str = "", ) -> nn.Module: # pre-sharded module if isinstance(module, ShardedModule): return module # shardable module module_sharding_plan = self._plan.get_plan_for_module(path) if module_sharding_plan: sharder_key = type(module) module = self._sharder_map[sharder_key].shard( module, module_sharding_plan, self._env, self.device, ) return module for name, child in module.named_children(): child = self._shard_modules_impl( child, path + "." + name if path else name, ) setattr(module, name, child) return module def _init_parameters(self, module: nn.Module) -> None: def init_parameters(module: nn.Module) -> None: # Allocate parameters and buffers if over 'meta' device. has_meta_param = False for name, param in module._parameters.items(): if isinstance(param, torch.Tensor) and param.device.type == "meta": module._parameters[name] = nn.Parameter( torch.empty_like(param, device=self.device), requires_grad=param.requires_grad, ) has_meta_param = True for name, buffer in module._buffers.items(): if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta": module._buffers[name] = torch.zeros_like(buffer, device=self.device) # Init parameters if at least one parameter is over 'meta' device. if has_meta_param and hasattr(module, "reset_parameters"): module.reset_parameters() module.apply(init_parameters) def sparse_grad_parameter_names( self, destination: Optional[List[str]] = None, prefix: str = "" ) -> List[str]: destination = [] if destination is None else destination return self._sparse_grad_parameter_names(self.module, destination, prefix) def _sparse_grad_parameter_names( self, module: nn.Module, destination: List[str], prefix: str = "" ) -> List[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): pass elif isinstance(module, nn.Embedding): if module.sparse: destination.append(append_prefix(prefix, "weight")) elif isinstance(module, nn.EmbeddingBag): if module.sparse: destination.append(append_prefix(prefix, "weight")) else: for name, child in module.named_children(): self._sparse_grad_parameter_names( child, destination, append_prefix(prefix, name) ) return destination # pyre-ignore [14] def state_dict( self, destination: Optional[Dict[str, Any]] = None, prefix: str = "", keep_vars: bool = False, ) -> Dict[str, Any]: state_dict = get_module(self).state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix + _DDP_STATE_DICT_PREFIX ) add_prefix_to_state_dict(state_dict, prefix) return state_dict # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module` # inconsistently. def load_state_dict( self, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: return self._load_state_dict(self, state_dict, prefix, strict) def _load_state_dict( self, module: nn.Module, state_dict: "OrderedDict[str, torch.Tensor]", prefix: str = "", strict: bool = True, ) -> _IncompatibleKeys: missing_keys = [] unexpected_keys = [] module = get_module(module) if isinstance(module, DistributedDataParallel): torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( state_dict, prefix ) add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX) if isinstance(module, ShardedModule): return module.load_state_dict(state_dict, strict=strict) else: module._load_from_state_dict( state_dict, prefix, {}, strict, missing_keys, unexpected_keys, [] ) for name, child in module.named_children(): m_keys, u_keys = self._load_state_dict( child, filter_state_dict(state_dict, prefix + name), "", strict, ) missing_keys.extend(m_keys) unexpected_keys.extend(u_keys) return _IncompatibleKeys( missing_keys=missing_keys, unexpected_keys=unexpected_keys ) def _named_parameters( self, module: nn.Module, prefix: str = "", recurse: bool = True, strip_ddp: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: if strip_ddp: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_parameters(prefix, recurse) else: yield from module.named_parameters(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_parameters( child, append_prefix(prefix, name), recurse, strip_ddp, ) def named_parameters( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def bare_named_parameters( self, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, torch.nn.Parameter]]: gen = self._named_parameters( self.module, prefix, recurse, ) memo = set() for key, param in gen: if param in memo: continue memo.add(param) yield key, param def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.sharded_parameter_names(prefix) else: for name, child in module.named_children(): yield from DistributedModelParallel._sharded_parameter_names( child, append_prefix(prefix, name) ) def _named_buffers( self, module: nn.Module, prefix: str = "", recurse: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: module = get_unwrapped_module(module) if isinstance(module, ShardedModule): yield from module.named_buffers(prefix, recurse) else: yield from module.named_buffers(prefix, recurse=False) for name, child in module.named_children(): yield from self._named_buffers( child, append_prefix(prefix, name), recurse ) def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, torch.Tensor]]: gen = self._named_buffers(self.module, prefix, recurse) memo = set() for key, param in gen: if param in memo: continue if remove_duplicate: memo.add(param) yield key, param def fused_optimizer(self) -> KeyedOptimizer: return self._optim def plan(self) -> ShardingPlan: return self._plan def _reset_parameters(module: nn.Module) -> None: for _, m in module.named_modules(): if hasattr(m, "reset_parameters"): m.reset_parameters() def state_dict_gather( src: Dict[str, Union[torch.Tensor, ShardedTensor]], dst: Dict[str, torch.Tensor], ) -> None: """ Gathers the values of the src state_dict of the keys present in the dst state_dict. Can handle ShardedTensors in the src state_dict. Args: src (Dict[str, Union[torch.Tensor, ShardedTensor]]): source's state_dict for this rank dst (Dict[str, torch.Tensor]): destination's state_dict """ for key, dst_tensor in dst.items(): src_tensor = src[key] if isinstance(src_tensor, ShardedTensor): src_tensor.gather(out=dst_tensor if (dist.get_rank() == 0) else None) elif isinstance(src_tensor, torch.Tensor): dst_tensor.copy_(src_tensor) else: raise ValueError(f"Unsupported tensor {key} type {type(src_tensor)}") def state_dict_to_device( state_dict: Dict[str, Union[torch.Tensor, ShardedTensor]], pg: ProcessGroup, device: torch.device, ) -> Dict[str, Union[torch.Tensor, ShardedTensor]]: """ Moves a state_dict to a device with a process group. Can handle ShardedTensors in the state_dict. Args: state_dict (Dict[str, Union[torch.Tensor, ShardedTensor]]): state_dict to move pg (ProcessGroup): Process Group used for comms device (torch.device): device to put state_dict on """ ret = {} all_keys = state_dict_all_gather_keys(state_dict, pg) for key in all_keys: if key in state_dict: tensor = state_dict[key] if isinstance(tensor, ShardedTensor): copied_shards = [ Shard.from_tensor_and_offsets( tensor=shard.tensor.to(device), shard_offsets=shard.metadata.shard_offsets, rank=dist.get_rank(pg), ) for shard in tensor.local_shards() ] ret[key] = ShardedTensor._init_from_local_shards( copied_shards, tensor.metadata().size, process_group=pg, ) elif isinstance(tensor, torch.Tensor): ret[key] = tensor.to(device) else: raise ValueError(f"Unsupported tensor {key} type {type(tensor)}") else: # No state_dict entries for table-wise sharding, # but need to follow full-sync. ret[key] = ShardedTensor._init_from_local_shards( [], [], process_group=pg, ) return ret class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM class EmbeddingBagCollection(EmbeddingBagCollectionInterface): """ EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B * (F * D)] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. device (Optional[torch.device]): default compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"] ) ebc = EmbeddingBagCollection(tables=[table_0, table_1]) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783], [ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011], [-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) tensor([0, 3, 7]) """ def __init__( self, tables: List[EmbeddingBagConfig], is_weighted: bool = False, device: Optional[torch.device] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}") self._is_weighted = is_weighted self.embedding_bags: nn.ModuleDict = nn.ModuleDict() self._embedding_bag_configs = tables self._lengths_per_embedding: List[int] = [] self._dtypes: List[int] = [] table_names = set() for embedding_config in tables: if embedding_config.name in table_names: raise ValueError(f"Duplicate table name {embedding_config.name}") table_names.add(embedding_config.name) dtype = ( torch.float32 if embedding_config.data_type == DataType.FP32 else torch.float16 ) self.embedding_bags[embedding_config.name] = nn.EmbeddingBag( num_embeddings=embedding_config.num_embeddings, embedding_dim=embedding_config.embedding_dim, mode=pooling_type_to_str(embedding_config.pooling), device=device, include_last_offset=True, dtype=dtype, ) if device is None: device = self.embedding_bags[embedding_config.name].weight.device self._dtypes.append(embedding_config.data_type.value) if not embedding_config.feature_names: embedding_config.feature_names = [embedding_config.name] self._lengths_per_embedding.extend( len(embedding_config.feature_names) * [embedding_config.embedding_dim] ) self._device: torch.device = device or torch.device("cpu") self._embedding_names: List[str] = [ embedding for embeddings in get_embedding_names_by_table(tables) for embedding in embeddings ] self._feature_names: List[List[str]] = [table.feature_names for table in tables] self.reset_parameters() def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ flat_feature_names: List[str] = [] for names in self._feature_names: flat_feature_names.extend(names) inverse_indices = reorder_inverse_indices( inverse_indices=features.inverse_indices_or_none(), feature_names=flat_feature_names, ) pooled_embeddings: List[torch.Tensor] = [] feature_dict = features.to_dict() for i, embedding_bag in enumerate(self.embedding_bags.values()): for feature_name in self._feature_names[i]: f = feature_dict[feature_name] per_sample_weights: Optional[torch.Tensor] = None if self._is_weighted: per_sample_weights = ( f.weights().half() if self._dtypes[i] == DataType.FP16.value else f.weights() ) res = embedding_bag( input=f.values(), offsets=f.offsets(), per_sample_weights=( per_sample_weights if self._is_weighted else None ), ).float() pooled_embeddings.append(res) return KeyedTensor( keys=self._embedding_names, values=process_pooled_embeddings( pooled_embeddings=pooled_embeddings, inverse_indices=inverse_indices, ), length_per_key=self._lengths_per_embedding, ) def is_weighted(self) -> bool: return self._is_weighted def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def device(self) -> torch.device: return self._device def reset_parameters(self) -> None: if (isinstance(self.device, torch.device) and self.device.type == "meta") or ( isinstance(self.device, str) and self.device == "meta" ): return # Initialize embedding bags weights with init_fn for table_config in self._embedding_bag_configs: assert table_config.init_fn is not None param = self.embedding_bags[f"{table_config.name}"].weight # pyre-ignore table_config.init_fn(param) class KeyedOptimizerWrapper(KeyedOptimizer): """ Takes a dict of parameters and exposes state_dict by parameter key. Convenience wrapper to take in optim_factory callable to create KeyedOptimizer """ def __init__( self, params: Mapping[str, Union[torch.Tensor, ShardedTensor]], optim_factory: OptimizerFactory, ) -> None: self._optimizer: optim.Optimizer = optim_factory(list(params.values())) super().__init__(params, self._optimizer.state, self._optimizer.param_groups) def zero_grad(self, set_to_none: bool = False) -> None: self._optimizer.zero_grad() # pyre-ignore [2] def step(self, closure: Any = None) -> None: self._optimizer.step(closure=closure) class RowWiseAdagrad(Optimizer): r"""Implements Row wise Adagrad algorithm. This is an extension of the Adagrad algorithm https://github.com/pytorch/pytorch/blob/master/torch/optim/adagrad.py, for use with EmbeddingBag parameters, where we want the adaptive learning rate to be the same within an embedding row. Since we only need to store state for an embedding row, rather than every single parameter, we can have drastic memory savings (factor of embedding_dim). Note that this implementation does not currently support sparse gradients. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) lr_decay (float, optional): learning rate decay (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-10) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) """ def __init__( self, params: Iterable[torch.nn.Parameter], lr: float = 1e-2, lr_decay: float = 0.0, weight_decay: float = 0.0, initial_accumulator_value: float = 0.0, eps: float = 1e-10, *, maximize: bool = False, # pyre-ignore **unused, ) -> None: if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= lr_decay: raise ValueError("Invalid lr_decay value: {}".format(lr_decay)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= initial_accumulator_value: raise ValueError( "Invalid initial_accumulator_value value: {}".format( initial_accumulator_value ) ) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) defaults = dict( lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value, maximize=maximize, ) super().__init__(params, defaults) for group in self.param_groups: for p in group["params"]: state = self.state[p] state["step"] = torch.tensor(0.0) init_value = ( complex(initial_accumulator_value, initial_accumulator_value) if torch.is_complex(p) else initial_accumulator_value ) state["sum"] = ( # pyre-fixme[28]: Unexpected keyword argument `axis`. torch.full_like(p, init_value, memory_format=torch.preserve_format) .mean(axis=1) .view(-1, 1) ) def __setstate__(self, state: Dict[str, Any]) -> None: super().__setstate__(state) for group in self.param_groups: group.setdefault("maximize", False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor( state_values[0]["step"] ) if not step_is_tensor: for s in state_values: s["step"] = torch.tensor(float(s["step"])) def share_memory(self) -> None: for group in self.param_groups: for p in group["params"]: state = self.state[p] state["sum"].share_memory_() # pyre-ignore def step(self, closure=None) -> torch.Tensor: """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] state_sums = [] state_steps = [] for p in group["params"]: if p.grad is not None: params_with_grad.append(p) grads.append(p.grad) state = self.state[p] state_sums.append(state["sum"]) state_steps.append(state["step"]) adagrad( params_with_grad, grads, state_sums, state_steps, lr=group["lr"], weight_decay=group["weight_decay"], lr_decay=group["lr_decay"], eps=group["eps"], maximize=group["maximize"], ) return loss # pyre-ignore class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta): """Represents an (optionally weighted) keyed jagged tensor. A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose slices may be of different lengths. Keyed on first dimension and jagged on the last dimension. Implementation is torch.jit.script-able. Args: keys (List[str]): keys to the jagged Tensor. values (torch.Tensor): values tensor in dense representation. weights (Optional[torch.Tensor]): if the values have weights. Tensor with the same shape as values. lengths (Optional[torch.Tensor]): jagged slices, represented as lengths. offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative offsets. stride (Optional[int]): number of examples per batch. stride_per_key_per_rank (Optional[List[List[int]]]): batch size (number of examples) per key per rank, with the outer list representing the keys and the inner list representing the values. Each value in the inner list represents the number of examples in the batch from the rank of its index in a distributed context. length_per_key (Optional[List[int]]): start length for each key. offset_per_key (Optional[List[int]]): start offset for each key and final offset. index_per_key (Optional[Dict[str, int]]): index for each key. jt_dict (Optional[Dict[str, JaggedTensor]]): inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to expand deduplicated embedding output for variable stride per key. Example:: # 0 1 2 <-- dim_1 # "Feature0" [V0,V1] None [V2] # "Feature1" [V3] [V4] [V5,V6,V7] # ^ # dim_0 dim_0: keyed dimension (ie. `Feature0`, `Feature1`) dim_1: optional second dimension (ie. batch size) dim_2: The jagged dimension which has slice lengths between 0-3 in the above example # We represent this data with following inputs: values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0 index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset """ # This is the subset of fields on KJT which are required (all other fields # can be derived from these fields, and are only cached) _fields = [ "_values", "_weights", "_lengths", "_offsets", ] def __init__( self, keys: List[str], values: torch.Tensor, weights: Optional[torch.Tensor] = None, lengths: Optional[torch.Tensor] = None, offsets: Optional[torch.Tensor] = None, stride: Optional[int] = None, stride_per_key_per_rank: Optional[List[List[int]]] = None, # Below exposed to ensure torch.script-able length_per_key: Optional[List[int]] = None, offset_per_key: Optional[List[int]] = None, index_per_key: Optional[Dict[str, int]] = None, jt_dict: Optional[Dict[str, JaggedTensor]] = None, inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None, ) -> None: self._keys: List[str] = keys self._values: torch.Tensor = values self._weights: Optional[torch.Tensor] = weights if offsets is not None: _assert_tensor_has_no_elements_or_has_integers(offsets, "offsets") if lengths is not None: _assert_tensor_has_no_elements_or_has_integers(lengths, "lengths") self._lengths: Optional[torch.Tensor] = lengths self._offsets: Optional[torch.Tensor] = offsets self._stride_per_key_per_rank: List[List[int]] = [] self._stride_per_key: List[int] = [] self._variable_stride_per_key: bool = False self._stride: int = -1 if stride_per_key_per_rank is not None: if stride is not None: raise ValueError( "Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`" ) self._stride_per_key_per_rank = stride_per_key_per_rank self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank] self._variable_stride_per_key = True if not stride_per_key_per_rank: self._stride = 0 elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()): self._stride = self.stride_per_key()[0] else: if torch.jit.is_tracing(): stride = _maybe_compute_stride_kjt_scripted( keys, stride, lengths, offsets )[0] else: stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets) self._stride = stride self._stride_per_key_per_rank = [[stride]] * len(self._keys) self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank] # lazy fields self._length_per_key: Optional[List[int]] = length_per_key self._offset_per_key: Optional[List[int]] = offset_per_key self._index_per_key: Optional[Dict[str, int]] = index_per_key self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = ( inverse_indices ) self._lengths_offset_per_key: List[int] = [] def from_offsets_sync( keys: List[str], values: torch.Tensor, offsets: torch.Tensor, weights: Optional[torch.Tensor] = None, stride: Optional[int] = None, stride_per_key_per_rank: Optional[List[List[int]]] = None, inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None, ) -> "KeyedJaggedTensor": kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, offsets=offsets, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, inverse_indices=inverse_indices, ) return kjt.sync() def from_lengths_sync( keys: List[str], values: torch.Tensor, lengths: torch.Tensor, weights: Optional[torch.Tensor] = None, stride: Optional[int] = None, stride_per_key_per_rank: Optional[List[List[int]]] = None, inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None, ) -> "KeyedJaggedTensor": kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, lengths=lengths, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, inverse_indices=inverse_indices, ) return kjt.sync() def concat( kjt_list: List["KeyedJaggedTensor"], ) -> "KeyedJaggedTensor": if len(kjt_list) == 0: raise ValueError("Can't concat empty KJT list") is_weighted: bool = kjt_list[0].weights_or_none() is not None has_length_per_key: bool = True length_per_key: List[int] = [] keys: List[str] = [] value_list: List[torch.Tensor] = [] weight_list: List[torch.Tensor] = [] length_list: List[torch.Tensor] = [] stride_per_key_per_rank: List[List[int]] = [] stride: Optional[int] = None variable_stride_per_key_list = [ kjt.variable_stride_per_key() for kjt in kjt_list ] assert all(variable_stride_per_key_list) or not any( variable_stride_per_key_list ), "variable stride per key must be consistent for all KJTs" variable_stride_per_key = all(variable_stride_per_key_list) for kjt in kjt_list: curr_is_weighted: bool = kjt.weights_or_none() is not None if is_weighted != curr_is_weighted: raise ValueError("Can't merge weighted KJT with unweighted KJT") _length_per_key: Optional[List[int]] = None if kjt._length_per_key is None: has_length_per_key = False else: _length_per_key = kjt._length_per_key if has_length_per_key and _length_per_key is not None: length_per_key += _length_per_key keys += kjt.keys() value_list.append(kjt.values()) if is_weighted: weight_list.append(kjt.weights()) length_list.append(kjt.lengths()) if variable_stride_per_key: stride_per_key_per_rank += kjt.stride_per_key_per_rank() elif stride is None: stride = kjt.stride() else: assert stride == kjt.stride(), "strides must be consistent for all KJTs" return KeyedJaggedTensor( keys=keys, values=torch.cat(value_list, dim=0), weights=torch.cat(weight_list, dim=0) if is_weighted else None, lengths=torch.cat(length_list, dim=0), stride=stride, stride_per_key_per_rank=( stride_per_key_per_rank if variable_stride_per_key else None ), length_per_key=length_per_key if has_length_per_key else None, ) def empty( is_weighted: bool = False, device: Optional[torch.device] = None, values_dtype: Optional[torch.dtype] = None, weights_dtype: Optional[torch.dtype] = None, lengths_dtype: torch.dtype = torch.int32, ) -> "KeyedJaggedTensor": weights = ( torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None ) return KeyedJaggedTensor( keys=torch.jit.annotate(List[str], []), values=torch.empty(0, dtype=values_dtype, device=device), weights=weights, lengths=torch.empty(0, dtype=lengths_dtype, device=device), stride=0, ) def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor": stride, stride_per_key_per_rank = ( (None, kjt.stride_per_key_per_rank()) if kjt.variable_stride_per_key() else (kjt.stride(), None) ) return KeyedJaggedTensor( keys=[], values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype), weights=( None if kjt.weights_or_none() is None else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype) ), lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype), stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, ) def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor": """ Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor], but this function will ONLY work if the JaggedTensors all have the same "implicit" batch_size dimension. Basically, we can visualize JaggedTensors as 2-D tensors of the format of [batch_size x variable_feature_dim]. In case, we have some batch without a feature value, the input JaggedTensor could just not include any values. But KeyedJaggedTensor (by default) typically pad "None" so that all the JaggedTensors stored in the KeyedJaggedTensor have the same batch_size dimension. That is, in the case, the JaggedTensor input didn't automatically pad for the empty batches, this function would error / not work. Consider the visualization of the following KeyedJaggedTensor: # 0 1 2 <-- dim_1 # "Feature0" [V0,V1] None [V2] # "Feature1" [V3] [V4] [V5,V6,V7] # ^ # dim_0 Notice that the inputs for this KeyedJaggedTensor would have looked like: values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0 index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset Now if the input jt_dict = { # "Feature0" [V0,V1] [V2] # "Feature1" [V3] [V4] [V5,V6,V7] } and the "None" is left out from each JaggedTensor, then this function would fail as we would not correctly be able to pad "None" as it does not technically know the correct batch / place to pad within the JaggedTensor. Essentially, the lengths Tensor inferred by this function would be [2, 1, 1, 1, 3] indicating variable batch_size dim_1 violates the existing assumption / precondition that KeyedJaggedTensor's should have fixed batch_size dimension. """ kjt_keys = list(jt_dict.keys()) kjt_vals_list: List[torch.Tensor] = [] kjt_lens_list: List[torch.Tensor] = [] kjt_weights_list: List[torch.Tensor] = [] stride_per_key: List[int] = [] for jt in jt_dict.values(): stride_per_key.append(len(jt.lengths())) kjt_vals_list.append(jt.values()) kjt_lens_list.append(jt.lengths()) weight = jt.weights_or_none() if weight is not None: kjt_weights_list.append(weight) kjt_vals = torch.concat(kjt_vals_list) kjt_lens = torch.concat(kjt_lens_list) kjt_weights = ( torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None ) kjt_stride, kjt_stride_per_key_per_rank = ( (stride_per_key[0], None) if all(s == stride_per_key[0] for s in stride_per_key) else (None, [[stride] for stride in stride_per_key]) ) kjt = KeyedJaggedTensor( keys=kjt_keys, values=kjt_vals, weights=kjt_weights, lengths=kjt_lens, stride=kjt_stride, stride_per_key_per_rank=kjt_stride_per_key_per_rank, ).sync() return kjt def sync(self) -> "KeyedJaggedTensor": self.length_per_key() self.offset_per_key() return self def unsync(self) -> "KeyedJaggedTensor": self._length_per_key = None self._offset_per_key = None return self def device(self) -> torch.device: return self._values.device def lengths(self) -> torch.Tensor: _lengths = _maybe_compute_lengths(self._lengths, self._offsets) self._lengths = _lengths return _lengths def lengths_or_none(self) -> Optional[torch.Tensor]: return self._lengths def offsets(self) -> torch.Tensor: _offsets = _maybe_compute_offsets(self._lengths, self._offsets) self._offsets = _offsets return _offsets def offsets_or_none(self) -> Optional[torch.Tensor]: return self._offsets def keys(self) -> List[str]: return self._keys def values(self) -> torch.Tensor: return self._values def weights(self) -> torch.Tensor: return _get_weights_or_throw(self._weights) def weights_or_none(self) -> Optional[torch.Tensor]: return self._weights def stride(self) -> int: return self._stride def stride_per_key(self) -> List[int]: return self._stride_per_key def stride_per_key_per_rank(self) -> List[List[int]]: return self._stride_per_key_per_rank def variable_stride_per_key(self) -> bool: return self._variable_stride_per_key def inverse_indices(self) -> Tuple[List[str], torch.Tensor]: return _get_inverse_indices_or_throw(self._inverse_indices) def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]: return self._inverse_indices def _key_indices(self) -> Dict[str, int]: _index_per_key: Dict[str, int] = _maybe_compute_index_per_key( self._keys, self._index_per_key, ) self._index_per_key = _index_per_key return _index_per_key def length_per_key(self) -> List[int]: _length_per_key = _maybe_compute_length_per_key( keys=self._keys, stride=self.stride(), stride_per_key=self.stride_per_key(), variable_stride_per_key=self.variable_stride_per_key(), length_per_key=self._length_per_key, lengths=self._lengths, offsets=self._offsets, values=self._values, ) self._length_per_key = _length_per_key return _length_per_key def length_per_key_or_none(self) -> Optional[List[int]]: return self._length_per_key def offset_per_key(self) -> List[int]: _length_per_key, _offset_per_key = _maybe_compute_offset_per_key( keys=self._keys, stride=self.stride(), stride_per_key=self.stride_per_key(), variable_stride_per_key=self.variable_stride_per_key(), length_per_key=self._length_per_key, offset_per_key=self._offset_per_key, lengths=self._lengths, offsets=self._offsets, values=self._values, ) self._length_per_key = _length_per_key self._offset_per_key = _offset_per_key return _offset_per_key def offset_per_key_or_none(self) -> Optional[List[int]]: return self._offset_per_key def lengths_offset_per_key(self) -> List[int]: if not self._lengths_offset_per_key: self._lengths_offset_per_key = _cumsum(self.stride_per_key()) return self._lengths_offset_per_key def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]: split_list: List[KeyedJaggedTensor] = [] start = 0 start_offset = 0 _length_per_key = self.length_per_key() _offset_per_key = self.offset_per_key() for segment in segments: end = start + segment end_offset = _offset_per_key[end] keys: List[str] = self._keys[start:end] stride, stride_per_key_per_rank = ( (None, self.stride_per_key_per_rank()[start:end]) if self.variable_stride_per_key() else (self._stride, None) ) if segment == len(self._keys): # no torch slicing required split_list.append( KeyedJaggedTensor( keys=self._keys, values=self._values, weights=self.weights_or_none(), lengths=self._lengths, offsets=self._offsets, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=self._length_per_key, offset_per_key=self._offset_per_key, index_per_key=self._index_per_key, jt_dict=self._jt_dict, inverse_indices=None, ) ) elif segment == 0: empty_int_list: List[int] = torch.jit.annotate(List[int], []) split_list.append( KeyedJaggedTensor( keys=keys, values=torch.tensor( empty_int_list, device=self.device(), dtype=self._values.dtype, ), weights=( None if self.weights_or_none() is None else torch.tensor( empty_int_list, device=self.device(), dtype=self.weights().dtype, ) ), lengths=torch.tensor( empty_int_list, device=self.device(), dtype=torch.int ), offsets=torch.tensor( empty_int_list, device=self.device(), dtype=torch.int ), stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=None, offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=None, ) ) else: split_length_per_key = _length_per_key[start:end] if not torch.jit.is_scripting() and is_torchdynamo_compiling(): # Checks for dynamo dynamic shapes tracing torch._check_is_size(start_offset) torch._check_is_size(end_offset) torch._check_is_size(end_offset - start_offset) torch._check(start_offset <= self._values.size(0)) torch._check(end_offset <= self._values.size(0)) torch._check(end_offset >= start_offset) split_list.append( KeyedJaggedTensor( keys=keys, values=self._values[start_offset:end_offset], weights=( None if self.weights_or_none() is None else self.weights()[start_offset:end_offset] ), lengths=self.lengths()[ self.lengths_offset_per_key()[ start ] : self.lengths_offset_per_key()[end] ], offsets=None, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=split_length_per_key, offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=None, ) ) start = end start_offset = end_offset return split_list def permute( self, indices: List[int], indices_tensor: Optional[torch.Tensor] = None, include_inverse_indices: bool = False, ) -> "KeyedJaggedTensor": if indices_tensor is None: indices_tensor = torch.tensor( indices, dtype=torch.int, device=self.device() ) length_per_key = self.length_per_key() permuted_keys: List[str] = [] permuted_stride_per_key_per_rank: List[List[int]] = [] permuted_length_per_key: List[int] = [] permuted_lengths_sum = 0 for index in indices: key = self.keys()[index] permuted_keys.append(key) permuted_stride_per_key_per_rank.append( self.stride_per_key_per_rank()[index] ) permuted_length_per_key.append(length_per_key[index]) permuted_lengths_sum += length_per_key[index] if self.variable_stride_per_key(): length_per_key_tensor = _pin_and_move( torch.tensor(self.length_per_key()), self.device() ) stride_per_key_tensor = _pin_and_move( torch.tensor(self.stride_per_key()), self.device() ) permuted_lengths, _ = _permute_tensor_by_segments( self.lengths(), stride_per_key_tensor, indices_tensor, None, ) permuted_values, permuted_weights = _permute_tensor_by_segments( self.values(), length_per_key_tensor, indices_tensor, self.weights_or_none(), ) else: ( permuted_lengths, permuted_values, permuted_weights, ) = torch.ops.fbgemm.permute_2D_sparse_data( indices_tensor, self.lengths().view(len(self._keys), -1), self.values(), self.weights_or_none(), permuted_lengths_sum, ) stride, optional_permuted_stride_per_key_per_rank = ( (None, permuted_stride_per_key_per_rank) if self.variable_stride_per_key() else (self._stride, None) ) kjt = KeyedJaggedTensor( keys=permuted_keys, values=permuted_values, weights=permuted_weights, lengths=permuted_lengths.view(-1), offsets=None, stride=stride, stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank, length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None, offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=( self.inverse_indices_or_none() if include_inverse_indices else None ), ) return kjt def flatten_lengths(self) -> "KeyedJaggedTensor": stride, stride_per_key_per_rank = ( (None, self.stride_per_key_per_rank()) if self.variable_stride_per_key() else (self._stride, None) ) return KeyedJaggedTensor( keys=self._keys, values=self._values, weights=self._weights, lengths=self.lengths().view(-1), offsets=None, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=self.length_per_key(), offset_per_key=None, index_per_key=None, jt_dict=None, inverse_indices=None, ) def __getitem__(self, key: str) -> JaggedTensor: offset_per_key = self.offset_per_key() index = self._key_indices()[key] start_offset = offset_per_key[index] end_offset = ( offset_per_key[index + 1] if index + 1 < len(offset_per_key) else start_offset ) return JaggedTensor( values=self._values[start_offset:end_offset], weights=( None if self.weights_or_none() is None else self.weights()[start_offset:end_offset] ), lengths=self.lengths()[ self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[ index + 1 ] ], offsets=None, ) def to_dict(self) -> Dict[str, JaggedTensor]: _jt_dict = _maybe_compute_kjt_to_jt_dict( stride=self.stride(), stride_per_key=self.stride_per_key(), keys=self.keys(), length_per_key=self.length_per_key(), lengths=self.lengths(), values=self.values(), variable_stride_per_key=self.variable_stride_per_key(), weights=self.weights_or_none(), jt_dict=self._jt_dict, ) self._jt_dict = _jt_dict return _jt_dict def record_stream(self, stream: torch.cuda.streams.Stream) -> None: self._values.record_stream(stream) weights = self._weights lengths = self._lengths offsets = self._offsets if weights is not None: weights.record_stream(stream) if lengths is not None: lengths.record_stream(stream) if offsets is not None: offsets.record_stream(stream) def to( self, device: torch.device, non_blocking: bool = False, dtype: Optional[torch.dtype] = None, ) -> "KeyedJaggedTensor": weights = self._weights lengths = self._lengths offsets = self._offsets stride, stride_per_key_per_rank = ( (None, self._stride_per_key_per_rank) if self.variable_stride_per_key() else (self._stride, None) ) length_per_key = self._length_per_key offset_per_key = self._offset_per_key index_per_key = self._index_per_key jt_dict = self._jt_dict inverse_indices = self._inverse_indices if inverse_indices is not None: inverse_indices = ( inverse_indices[0], inverse_indices[1].to(device, non_blocking=non_blocking), ) if weights is not None: if dtype is not None: weights = weights.to( dtype=dtype, device=device, non_blocking=non_blocking ) else: weights = weights.to(device=device, non_blocking=non_blocking) return KeyedJaggedTensor( keys=self._keys, values=self._values.to(device, non_blocking=non_blocking), weights=weights, lengths=( lengths.to(device, non_blocking=non_blocking) if lengths is not None else None ), offsets=( offsets.to(device, non_blocking=non_blocking) if offsets is not None else None ), stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=length_per_key, offset_per_key=offset_per_key, index_per_key=index_per_key, jt_dict=jt_dict, inverse_indices=inverse_indices, ) def __str__(self) -> str: if len(self._keys) == 0 or self._offsets is None and self._lengths is None: return "KeyedJaggedTensor()\n" offsets = self.offsets() return ( "KeyedJaggedTensor({\n" + ",\n".join( [ " " + _jagged_tensor_string( self._keys[index], self._values, self._weights, offsets, sum(self.stride_per_key()[:index]), sum(self.stride_per_key()[: index + 1]), ) for index in range(len(self._keys)) ] ) + "\n})\n" ) def pin_memory(self) -> "KeyedJaggedTensor": weights = self._weights lengths = self._lengths offsets = self._offsets stride, stride_per_key_per_rank = ( (None, self._stride_per_key_per_rank) if self.variable_stride_per_key() else (self._stride, None) ) inverse_indices = self._inverse_indices if inverse_indices is not None: inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory()) return KeyedJaggedTensor( keys=self._keys, values=self._values.pin_memory(), weights=weights.pin_memory() if weights is not None else None, lengths=lengths.pin_memory() if lengths is not None else None, offsets=offsets.pin_memory() if offsets is not None else None, stride=stride, stride_per_key_per_rank=stride_per_key_per_rank, length_per_key=self._length_per_key, offset_per_key=self._offset_per_key, index_per_key=self._index_per_key, jt_dict=None, inverse_indices=inverse_indices, ) def dist_labels(self) -> List[str]: labels = ["lengths", "values"] if self.variable_stride_per_key(): labels.append("strides") if self.weights_or_none() is not None: labels.append("weights") return labels def dist_splits(self, key_splits: List[int]) -> List[List[int]]: batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits) length_per_split = _sum_by_splits(self.length_per_key(), key_splits) splits = [batch_size_per_split, length_per_split] if self.variable_stride_per_key(): splits.append(key_splits) if self.weights_or_none() is not None: splits.append(length_per_split) return splits def dist_tensors(self) -> List[torch.Tensor]: tensors = [self.lengths(), self.values()] if self.variable_stride_per_key(): strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device()) tensors.append(strides) if self.weights_or_none() is not None: tensors.append(self.weights()) return tensors def dist_init( keys: List[str], tensors: List[torch.Tensor], variable_stride_per_key: bool, num_workers: int, recat: Optional[torch.Tensor], stride_per_rank: Optional[List[int]], stagger: int = 1, ) -> "KeyedJaggedTensor": assert len(tensors) in [2, 3, 4] lengths = tensors[0] values = tensors[1] stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None weights = ( tensors[-1] if (variable_stride_per_key and len(tensors) == 4) or (not variable_stride_per_key and len(tensors) == 3) else None ) if variable_stride_per_key: assert stride_per_rank_per_key is not None stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view( num_workers, len(keys) ).T.tolist() strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum( stride_per_rank_per_key ).tolist() cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) length_per_key = ( cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]] ) with record_function("## all2all_data:recat_values ##"): if recat is not None and recat.numel() > 0: lengths, _ = _permute_tensor_by_segments( lengths, stride_per_rank_per_key, recat, None, ) values, weights = _permute_tensor_by_segments( values, length_per_key, recat, weights, ) if not stride_per_key_per_rank: stride_per_key_per_rank = [[0]] * len(keys) if stagger > 1: stride_per_key_per_rank_stagger: List[List[int]] = [] local_world_size = num_workers // stagger for i in range(len(keys)): stride_per_rank_stagger: List[int] = [] for j in range(local_world_size): stride_per_rank_stagger.extend( stride_per_key_per_rank[i][j::local_world_size] ) stride_per_key_per_rank_stagger.append(stride_per_rank_stagger) stride_per_key_per_rank = stride_per_key_per_rank_stagger kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, lengths=lengths, stride_per_key_per_rank=stride_per_key_per_rank, ) return kjt.sync() else: assert stride_per_rank is not None with record_function("## all2all_data:recat_values ##"): if recat is not None and recat.numel() > 0: stride = stride_per_rank[0] if all(s == stride for s in stride_per_rank): ( lengths, values, weights, ) = torch.ops.fbgemm.permute_2D_sparse_data( recat, lengths.view(-1, stride), values, weights, values.numel(), ) lengths = lengths.view(-1) else: # variable batch size per rank ( lengths, values, weights, ) = torch.ops.fbgemm.permute_1D_sparse_data( recat, lengths.view(-1), values, weights, values.numel(), ) kjt = KeyedJaggedTensor( keys=keys, values=values, weights=weights, lengths=lengths, stride=sum(stride_per_rank), ) return kjt.sync() def get_dataloader( batch_size: int, num_embeddings: int, pin_memory: bool = False, num_workers: int = 0 ) -> DataLoader: """ Gets a Random dataloader for the two tower model, containing a two_feature KJT as sparse_features, empty dense_features and binary labels Args: batch_size (int): batch_size num_embeddings (int): hash_size of the two embedding tables pin_memory (bool): Whether to pin_memory on the GPU num_workers (int) Number of dataloader workers Returns: dataloader (DataLoader): PyTorch dataloader for the specified options. """ two_tower_column_names = DEFAULT_RATINGS_COLUMN_NAMES[:2] return DataLoader( RandomRecDataset( keys=two_tower_column_names, batch_size=batch_size, hash_size=num_embeddings, ids_per_feature=1, num_dense=0, ), batch_size=None, batch_sampler=None, pin_memory=pin_memory, num_workers=num_workers, ) def get_index( embedding_dim: int, num_centroids: int, num_probe: int, num_subquantizers: int, bits_per_code: int, device: Optional[torch.device] = None, # pyre-ignore[11] ) -> Union[faiss.GpuIndexIVFPQ, faiss.IndexIVFPQ]: """ returns a FAISS IVFPQ index, placed on the device passed in Args: embedding_dim (int): indexed embedding dimension, num_centroids (int): the number of centroids (Voronoi cells), num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done., num_subquantizers (int): the number of subquanitizers in Product Quantization (PQ) compression of subvectors, bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ), Example:: get_index() """ if device is not None and device.type == "cuda": # pyre-fixme[16] res = faiss.StandardGpuResources() # pyre-fixme[16] config = faiss.GpuIndexIVFPQConfig() # pyre-ignore[16] index = faiss.GpuIndexIVFPQ( res, embedding_dim, num_centroids, num_subquantizers, bits_per_code, # pyre-fixme[16] faiss.METRIC_L2, config, ) else: # pyre-fixme[16] quantizer = faiss.IndexFlatL2(embedding_dim) # pyre-fixme[16] index = faiss.IndexIVFPQ( quantizer, embedding_dim, num_centroids, num_subquantizers, bits_per_code, ) index.nprobe = num_probe return index class TwoTower(nn.Module): """ Simple TwoTower (UV) Model. Embeds two different entities into the same space. A simplified version of the `A Dual Augmented Two-tower Model for Online Large-scale Recommendation <https://dlp-kdd.github.io/assets/pdf/DLP-KDD_2021_paper_4.pdf>`_ model. Used to train the retrieval model Embeddings trained with this model will be indexed and queried in the retrieval example. Args: embedding_bag_collection (EmbeddingBagCollection): embedding_bag_collection with two EmbeddingBags layer_sizes (List[int]): list of the layer_sizes for the MLP device (Optional[torch.device]) Example:: m = TwoTower(ebc, [16, 8], device) """ def __init__( self, embedding_bag_collection: EmbeddingBagCollection, layer_sizes: List[int], device: Optional[torch.device] = None, ) -> None: super().__init__() # If running this example on Torcherc < v0.2.0, # please use embedding_bag_configs as a property, not a function assert ( len(embedding_bag_collection.embedding_bag_configs()) == 2 ), "Expected two EmbeddingBags in the two tower model" assert ( embedding_bag_collection.embedding_bag_configs()[0].embedding_dim == embedding_bag_collection.embedding_bag_configs()[1].embedding_dim ), "Both EmbeddingBagConfigs must have the same dimension" embedding_dim: int = embedding_bag_collection.embedding_bag_configs()[ 0 ].embedding_dim self._feature_names_query: List[str] = ( embedding_bag_collection.embedding_bag_configs()[0].feature_names ) self._candidate_feature_names: List[str] = ( embedding_bag_collection.embedding_bag_configs()[1].feature_names ) self.ebc = embedding_bag_collection self.query_proj = MLP( in_size=embedding_dim, layer_sizes=layer_sizes, device=device ) self.candidate_proj = MLP( in_size=embedding_dim, layer_sizes=layer_sizes, device=device ) def forward(self, kjt: KeyedJaggedTensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: kjt (KeyedJaggedTensor): KJT containing query_ids and candidate_ids to query Returns: Tuple[torch.Tensor, torch.Tensor]: Tuple containing embeddings for each tower """ pooled_embeddings = self.ebc(kjt) query_embedding: torch.Tensor = self.query_proj( torch.cat( [pooled_embeddings[feature] for feature in self._feature_names_query], dim=1, ) ) candidate_embedding: torch.Tensor = self.candidate_proj( torch.cat( [ pooled_embeddings[feature] for feature in self._candidate_feature_names ], dim=1, ) ) return query_embedding, candidate_embedding class TwoTowerTrainTask(nn.Module): """ Train Task for the TwoTower model. Adds BinaryCrossEntropy Loss. to use with train_pipeline Args: two_tower (TwoTower): two tower model Example:: m = TwoTowerTrainTask(two_tower_model) """ def __init__(self, two_tower: TwoTower) -> None: super().__init__() self.two_tower = two_tower self.loss_fn: nn.Module = nn.BCEWithLogitsLoss() def forward( self, batch: Batch ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: """ Args: batch (Batch): batch from torchrec.datasets Returns: Tuple[loss, Tuple[loss, logits, labels]]: each of shape B x 1 """ query_embedding, candidate_embedding = self.two_tower(batch.sparse_features) logits = (query_embedding * candidate_embedding).sum(dim=1).squeeze() loss = self.loss_fn(logits, batch.labels.float()) return loss, (loss.detach(), logits.detach(), batch.labels.detach()) The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( num_embeddings: int = 1024**2, embedding_dim: int = 64, layer_sizes: Optional[List[int]] = None, learning_rate: float = 0.01, batch_size: int = 32, num_iterations: int = 100, num_centroids: int = 100, num_subquantizers: int = 8, bits_per_code: int = 8, num_probe: int = 8, save_dir: Optional[str] = None, ) -> None` to solve the following problem: Trains a simple Two Tower (UV) model, which is a simplified version of [A Dual Augmented Two-tower Model for Online Large-scale Recommendation](https://dlp-kdd.github.io/assets/pdf/DLP-KDD_2021_paper_4.pdf). Torchrec is used to shard the model, and is pipelined so that dataloading, data-parallel to model-parallel comms, and forward/backward are overlapped. It is trained on random data in the format of [MovieLens 20M](https://grouplens.org/datasets/movielens/20m/) dataset in SPMD fashion. The distributed model is gathered to CPU. The item (movie) towers embeddings are used to train a FAISS [IVFPQ](https://github.com/facebookresearch/faiss/wiki/Lower-memory-footprint) index, which is serialized. The resulting `KNNIndex` can be queried with batched `torch.Tensor`, and will return the distances and indices for the approximate K nearest neighbors of the query embeddings. The model itself is also serialized. Args: num_embeddings (int): The number of embeddings the embedding table embedding_dim (int): embedding dimension of both embedding tables layer_sizes (List[int]): list representing layer sizes of the MLP. Last size is the final embedding size learning_rate (float): learning_rate batch_size (int): batch size to use for training num_iterations (int): number of train batches num_centroids (int): The number of centroids (Voronoi cells) num_subquantizers (int): The number of subquanitizers in Product Quantization (PQ) compression of subvectors bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ) num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done. save_dir (Optional[str]): Directory to save model and faiss index. If None, nothing is saved Here is the function: def train( num_embeddings: int = 1024**2, embedding_dim: int = 64, layer_sizes: Optional[List[int]] = None, learning_rate: float = 0.01, batch_size: int = 32, num_iterations: int = 100, num_centroids: int = 100, num_subquantizers: int = 8, bits_per_code: int = 8, num_probe: int = 8, save_dir: Optional[str] = None, ) -> None: """ Trains a simple Two Tower (UV) model, which is a simplified version of [A Dual Augmented Two-tower Model for Online Large-scale Recommendation](https://dlp-kdd.github.io/assets/pdf/DLP-KDD_2021_paper_4.pdf). Torchrec is used to shard the model, and is pipelined so that dataloading, data-parallel to model-parallel comms, and forward/backward are overlapped. It is trained on random data in the format of [MovieLens 20M](https://grouplens.org/datasets/movielens/20m/) dataset in SPMD fashion. The distributed model is gathered to CPU. The item (movie) towers embeddings are used to train a FAISS [IVFPQ](https://github.com/facebookresearch/faiss/wiki/Lower-memory-footprint) index, which is serialized. The resulting `KNNIndex` can be queried with batched `torch.Tensor`, and will return the distances and indices for the approximate K nearest neighbors of the query embeddings. The model itself is also serialized. Args: num_embeddings (int): The number of embeddings the embedding table embedding_dim (int): embedding dimension of both embedding tables layer_sizes (List[int]): list representing layer sizes of the MLP. Last size is the final embedding size learning_rate (float): learning_rate batch_size (int): batch size to use for training num_iterations (int): number of train batches num_centroids (int): The number of centroids (Voronoi cells) num_subquantizers (int): The number of subquanitizers in Product Quantization (PQ) compression of subvectors bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ) num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done. save_dir (Optional[str]): Directory to save model and faiss index. If None, nothing is saved """ if layer_sizes is None: layer_sizes = [128, 64] rank = int(os.environ["LOCAL_RANK"]) if torch.cuda.is_available(): device: torch.device = torch.device(f"cuda:{rank}") backend = "nccl" torch.cuda.set_device(device) else: device: torch.device = torch.device("cpu") backend = "gloo" dist.init_process_group(backend=backend) two_tower_column_names = DEFAULT_RATINGS_COLUMN_NAMES[:2] eb_configs = [ EmbeddingBagConfig( name=f"t_{feature_name}", embedding_dim=embedding_dim, num_embeddings=num_embeddings, feature_names=[feature_name], ) for feature_name in two_tower_column_names ] embedding_bag_collection = EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta"), ) two_tower_model = TwoTower( embedding_bag_collection=embedding_bag_collection, layer_sizes=layer_sizes, device=device, ) two_tower_train_task = TwoTowerTrainTask(two_tower_model) apply_optimizer_in_backward( RowWiseAdagrad, two_tower_train_task.two_tower.ebc.parameters(), {"lr": learning_rate}, ) model = DistributedModelParallel( module=two_tower_train_task, device=device, ) optimizer = KeyedOptimizerWrapper( dict(model.named_parameters()), lambda params: torch.optim.Adam(params, lr=learning_rate), ) dataloader = get_dataloader( batch_size=batch_size, num_embeddings=num_embeddings, pin_memory=(backend == "nccl"), ) dl_iterator = iter(dataloader) train_pipeline = TrainPipelineSparseDist( model, optimizer, device, ) # Train model for _ in range(num_iterations): try: train_pipeline.progress(dl_iterator) except StopIteration: break checkpoint_pg = dist.new_group(backend="gloo") # Copy sharded state_dict to CPU. cpu_state_dict = state_dict_to_device( model.state_dict(), pg=checkpoint_pg, device=torch.device("cpu") ) ebc_cpu = EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta"), ) two_tower_cpu = TwoTower( embedding_bag_collection=ebc_cpu, layer_sizes=layer_sizes, ) two_tower_train_cpu = TwoTowerTrainTask(two_tower_cpu) if rank == 0: two_tower_train_cpu = two_tower_train_cpu.to_empty(device="cpu") state_dict_gather(cpu_state_dict, two_tower_train_cpu.state_dict()) dist.barrier() # Create and train FAISS index for the item (movie) tower on CPU if rank == 0: index = get_index( embedding_dim=embedding_dim, num_centroids=num_centroids, num_probe=num_probe, num_subquantizers=num_subquantizers, bits_per_code=bits_per_code, device=torch.device("cpu"), ) values = torch.tensor(list(range(num_embeddings)), device=torch.device("cpu")) kjt = KeyedJaggedTensor( keys=two_tower_column_names, values=values, lengths=torch.tensor( [0] * num_embeddings + [1] * num_embeddings, device=torch.device("cpu"), ), ) # Get the embeddings of the item(movie) tower by querying model with torch.no_grad(): lookups = two_tower_cpu.ebc(kjt)[two_tower_column_names[1]] item_embeddings = two_tower_cpu.candidate_proj(lookups) index.train(item_embeddings) index.add(item_embeddings) if save_dir is not None: save_dir = save_dir.rstrip("/") quant_model = trec_infer.modules.quantize_embeddings( model, dtype=torch.qint8, inplace=True ) torch.save(quant_model.state_dict(), f"{save_dir}/model.pt") # pyre-ignore[16] faiss.write_index(index, f"{save_dir}/faiss.index")
Trains a simple Two Tower (UV) model, which is a simplified version of [A Dual Augmented Two-tower Model for Online Large-scale Recommendation](https://dlp-kdd.github.io/assets/pdf/DLP-KDD_2021_paper_4.pdf). Torchrec is used to shard the model, and is pipelined so that dataloading, data-parallel to model-parallel comms, and forward/backward are overlapped. It is trained on random data in the format of [MovieLens 20M](https://grouplens.org/datasets/movielens/20m/) dataset in SPMD fashion. The distributed model is gathered to CPU. The item (movie) towers embeddings are used to train a FAISS [IVFPQ](https://github.com/facebookresearch/faiss/wiki/Lower-memory-footprint) index, which is serialized. The resulting `KNNIndex` can be queried with batched `torch.Tensor`, and will return the distances and indices for the approximate K nearest neighbors of the query embeddings. The model itself is also serialized. Args: num_embeddings (int): The number of embeddings the embedding table embedding_dim (int): embedding dimension of both embedding tables layer_sizes (List[int]): list representing layer sizes of the MLP. Last size is the final embedding size learning_rate (float): learning_rate batch_size (int): batch size to use for training num_iterations (int): number of train batches num_centroids (int): The number of centroids (Voronoi cells) num_subquantizers (int): The number of subquanitizers in Product Quantization (PQ) compression of subvectors bits_per_code (int): The number of bits for each subvector in Product Quantization (PQ) num_probe (int): The number of centroids (Voronoi cells) to probe. Must be <= num_centroids. Sweeping powers of 2 for nprobe and picking one of those based on recall statistics (e.g., 1, 2, 4, 8, ..,) is typically done. save_dir (Optional[str]): Directory to save model and faiss index. If None, nothing is saved
8,856
import time from typing import Dict, List, Optional, Tuple import numpy as np import torch from torch.utils.data.dataset import IterableDataset from torchrec.datasets.random import RandomRecDataset from torchrec.datasets.utils import Batch from torchrec.modules.embedding_configs import EmbeddingBagConfig class RandomRecDataset(IterableDataset[Batch]): """ Random iterable dataset used to generate batches for recommender systems (RecSys). Currently produces unweighted sparse features only. TODO: Add weighted sparse features. Args: keys (List[str]): List of feature names for sparse features. batch_size (int): batch size. hash_size (Optional[int]): Max sparse id value. All sparse IDs will be taken modulo this value. hash_sizes (Optional[List[int]]): Max sparse id value per feature in keys. Each sparse ID will be taken modulo the corresponding value from this argument. Note, if this is used, hash_size will be ignored. ids_per_feature (int): Number of IDs per sparse feature. ids_per_features (int): Number of IDs per sparse feature in each key. Note, if this is used, ids_per_feature will be ignored. num_dense (int): Number of dense features. manual_seed (int): Seed for deterministic behavior. num_batches: (Optional[int]): Num batches to generate before raising StopIteration num_generated_batches int: Num batches to cache. If num_batches > num_generated batches, then we will cycle to the first generated batch. If this value is negative, batches will be generated on the fly. min_ids_per_feature (int): Minimum number of IDs per features. Example:: dataset = RandomRecDataset( keys=["feat1", "feat2"], batch_size=16, hash_size=100_000, ids_per_feature=1, num_dense=13, ), example = next(iter(dataset)) """ def __init__( self, keys: List[str], batch_size: int, hash_size: Optional[int] = 100, hash_sizes: Optional[List[int]] = None, ids_per_feature: Optional[int] = 2, ids_per_features: Optional[List[int]] = None, num_dense: int = 50, manual_seed: Optional[int] = None, num_batches: Optional[int] = None, num_generated_batches: int = 10, min_ids_per_feature: Optional[int] = None, min_ids_per_features: Optional[List[int]] = None, ) -> None: super().__init__() if hash_sizes is None: hash_size = hash_size or 100 hash_sizes = [hash_size] * len(keys) assert hash_sizes is not None assert len(hash_sizes) == len( keys ), "length of hash_sizes must be equal to the number of keys" if ids_per_features is None: ids_per_feature = ids_per_feature or 2 ids_per_features = [ids_per_feature] * len(keys) assert ids_per_features is not None if min_ids_per_features is None: min_ids_per_feature = ( min_ids_per_feature if min_ids_per_feature is not None else ids_per_feature ) assert min_ids_per_feature is not None min_ids_per_features = [min_ids_per_feature] * len(keys) assert len(ids_per_features) == len( keys ), "length of ids_per_features must be equal to the number of keys" self.batch_generator = _RandomRecBatch( keys=keys, batch_size=batch_size, hash_sizes=hash_sizes, ids_per_features=ids_per_features, num_dense=num_dense, manual_seed=manual_seed, num_batches=None, num_generated_batches=num_generated_batches, min_ids_per_features=min_ids_per_features, ) self.num_batches: int = cast(int, num_batches if not None else sys.maxsize) def __iter__(self) -> Iterator[Batch]: return itertools.islice(iter(self.batch_generator), self.num_batches) def __len__(self) -> int: return self.num_batches class Batch(Pipelineable): dense_features: torch.Tensor sparse_features: KeyedJaggedTensor labels: torch.Tensor def to(self, device: torch.device, non_blocking: bool = False) -> "Batch": return Batch( dense_features=self.dense_features.to( device=device, non_blocking=non_blocking ), sparse_features=self.sparse_features.to( device=device, non_blocking=non_blocking ), labels=self.labels.to(device=device, non_blocking=non_blocking), ) def record_stream(self, stream: torch.cuda.streams.Stream) -> None: self.dense_features.record_stream(stream) self.sparse_features.record_stream(stream) self.labels.record_stream(stream) def pin_memory(self) -> "Batch": return Batch( dense_features=self.dense_features.pin_memory(), sparse_features=self.sparse_features.pin_memory(), labels=self.labels.pin_memory(), ) class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM def get_random_dataset( batch_size: int, num_batches: int, num_dense_features: int, embedding_bag_configs: List[EmbeddingBagConfig], pooling_factors: Optional[Dict[str, int]] = None, ) -> IterableDataset[Batch]: if pooling_factors is None: pooling_factors = {} keys = [] ids_per_features = [] hash_sizes = [] for table in embedding_bag_configs: for feature_name in table.feature_names: keys.append(feature_name) # guess a pooling factor here ids_per_features.append(pooling_factors.get(feature_name, 64)) hash_sizes.append(table.num_embeddings) return RandomRecDataset( keys=keys, batch_size=batch_size, hash_sizes=hash_sizes, ids_per_features=ids_per_features, num_dense=num_dense_features, num_batches=num_batches, )
null
8,857
import time from typing import Dict, List, Optional, Tuple import numpy as np import torch from torch.utils.data.dataset import IterableDataset from torchrec.datasets.random import RandomRecDataset from torchrec.datasets.utils import Batch from torchrec.modules.embedding_configs import EmbeddingBagConfig def train_one_epoch( model: torch.nn.Module, optimizer: torch.optim.Optimizer, dataset: IterableDataset[Batch], device: torch.device, ) -> float: start_time = time.perf_counter() for data in dataset: sparse_features = data.sparse_features.to(device) pooled_embeddings = model(sparse_features) optimizer.zero_grad() vals = [] for _name, param in pooled_embeddings.to_dict().items(): vals.append(param) torch.cat(vals, dim=1).sum().backward() optimizer.step() end_time = time.perf_counter() return end_time - start_time def train_one_epoch_fused_optimizer( model: torch.nn.Module, dataset: IterableDataset[Batch], device: torch.device, ) -> float: start_time = time.perf_counter() for data in dataset: sparse_features = data.sparse_features.to(device) fused_pooled_embeddings = model(sparse_features) fused_vals = [] for _name, param in fused_pooled_embeddings.to_dict().items(): fused_vals.append(param) torch.cat(fused_vals, dim=1).sum().backward() end_time = time.perf_counter() return end_time - start_time class Batch(Pipelineable): dense_features: torch.Tensor sparse_features: KeyedJaggedTensor labels: torch.Tensor def to(self, device: torch.device, non_blocking: bool = False) -> "Batch": return Batch( dense_features=self.dense_features.to( device=device, non_blocking=non_blocking ), sparse_features=self.sparse_features.to( device=device, non_blocking=non_blocking ), labels=self.labels.to(device=device, non_blocking=non_blocking), ) def record_stream(self, stream: torch.cuda.streams.Stream) -> None: self.dense_features.record_stream(stream) self.sparse_features.record_stream(stream) self.labels.record_stream(stream) def pin_memory(self) -> "Batch": return Batch( dense_features=self.dense_features.pin_memory(), sparse_features=self.sparse_features.pin_memory(), labels=self.labels.pin_memory(), ) def train( model: torch.nn.Module, optimizer: Optional[torch.optim.Optimizer], dataset: IterableDataset[Batch], device: torch.device, epochs: int = 100, ) -> Tuple[float, float]: training_time = [] for _ in range(epochs): if optimizer: training_time.append(train_one_epoch(model, optimizer, dataset, device)) else: training_time.append( train_one_epoch_fused_optimizer(model, dataset, device) ) return np.mean(training_time), np.std(training_time)
null
8,858
import argparse import sys from typing import List, Tuple import torch from fbgemm_gpu.split_table_batched_embeddings_ops_training import EmbeddingLocation from torchrec.github.benchmarks import ebc_benchmarks_utils from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.modules.fused_embedding_modules import FusedEmbeddingBagCollection DLRM_NUM_EMBEDDINGS_PER_FEATURE = [ 45833188, 36746, 17245, 7413, 20243, 3, 7114, 1441, 62, 29275261, 1572176, 345138, 10, 2209, 11267, 128, 4, 974, 14, 48937457, 11316796, 40094537, 452104, 12606, 104, 35, ] def get_shrunk_dlrm_num_embeddings(reduction_degree: int) -> List[int]: return [ num_emb if num_emb < 10000000 else int(num_emb / reduction_degree) for num_emb in DLRM_NUM_EMBEDDINGS_PER_FEATURE ]
null
8,859
import argparse import sys from typing import List, Tuple import torch from fbgemm_gpu.split_table_batched_embeddings_ops_training import EmbeddingLocation from torchrec.github.benchmarks import ebc_benchmarks_utils from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.modules.fused_embedding_modules import FusedEmbeddingBagCollection class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM class FusedEmbeddingBagCollection( EmbeddingBagCollectionInterface, FusedOptimizerModule ): """ FusedEmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It utilizes a technique called Optimizer fusion (register the optimizer with model). The semantics of this is that during the backwards pass, the registered optimizer will be called. It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B x F x D] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. optimizer (Type[torch.optim.Optimizer]): fusion optimizer type optimizer_kwargs: Dict[str, Any]: fusion optimizer kwargs device (Optional[torch.device]): compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=4, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=8, num_embeddings=10, feature_names=["f2"] ) ebc = FusedEmbeddingBagCollection(tables=[table_0, table_1], optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": .01}) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[ 0.2093, 0.1395, 0.1571, 0.3583, 0.0421, 0.0037, -0.0692, 0.0663, 0.2166, -0.3150, -0.2771, -0.0301], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0165, -0.1225, 0.2483, 0.0624, -0.1168, -0.0509, -0.1309, 0.3059], [ 0.0811, -0.1779, -0.1443, 0.1097, -0.4410, -0.4036, 0.4458, -0.2735, -0.3080, -0.2102, -0.0564, 0.5583]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) [0, 4, 12] """ def __init__( self, tables: List[EmbeddingBagConfig], optimizer_type: Type[torch.optim.Optimizer], optimizer_kwargs: Dict[str, Any], is_weighted: bool = False, device: Optional[torch.device] = None, location: Optional[EmbeddingLocation] = None, ) -> None: super().__init__() self._optimizer_type = optimizer_type self._optimizer_kwargs = optimizer_kwargs self._device: torch.device = ( device if device is not None else torch.device("cpu") ) emb_optim_and_kwargs = convert_optimizer_type_and_kwargs( optimizer_type, optimizer_kwargs ) if emb_optim_and_kwargs is None: raise ValueError( f"Cannot fuse optimizer_type={optimizer_type} with kwargs {optimizer_kwargs}" ) (emb_optim_type, emb_opt_kwargs) = emb_optim_and_kwargs if location in [ EmbeddingLocation.DEVICE, EmbeddingLocation.MANAGED, EmbeddingLocation.MANAGED_CACHING, ]: assert device is not None and device.type in [ "cuda", "meta", ], f"Using location={location} requires device=cuda or meta" if device is None: device = torch.device("cpu") if location is None: if device.type in ["cpu", "meta"]: location = EmbeddingLocation.HOST elif device.type == "cuda": location = EmbeddingLocation.DEVICE else: raise ValueError("EmbeddingLocation could not be set") self._is_weighted = is_weighted self._embedding_bag_configs = tables # Registering in a List instead of ModuleList because we want don't want them to be auto-registered. # Their states will be modified via self.embedding_bags self._emb_modules: List[nn.Module] = [] self._key_to_tables: Dict[ Tuple[PoolingType, DataType], List[EmbeddingBagConfig] ] = defaultdict(list) self._length_per_key: List[int] = [] for table in tables: self._length_per_key.extend( [table.embedding_dim] * len(table.feature_names) ) key = (table.pooling, table.data_type) self._key_to_tables[key].append(table) optims = [] for key, tables in self._key_to_tables.items(): (pooling, data_type) = key emb_module = _BatchedFusedEmbeddingLookups( cast(List[BaseEmbeddingConfig], tables), data_type=data_type, pooling=pooling, optimizer_type=emb_optim_type, optimizer_kwargs=emb_opt_kwargs, device=device, embedding_location=location, ) self._emb_modules.append(emb_module) params: Dict[str, torch.Tensor] = {} for param_key, weight in emb_module.fused_optimizer().params.items(): params[f"embedding_bags.{param_key}"] = weight optims.append(("", emb_module.fused_optimizer())) self._optim: CombinedOptimizer = CombinedOptimizer(optims) self._embedding_names: List[str] = list( itertools.chain(*get_embedding_names_by_table(self._embedding_bag_configs)) ) # We map over the parameters from FBGEMM backed kernels to the canonical nn.EmbeddingBag # representation. This provides consistency between this class and the EmbeddingBagCollection's # nn.Module API calls (state_dict, named_modules, etc) self.embedding_bags: nn.ModuleDict = nn.ModuleDict() for (_key, tables), emb_module in zip( self._key_to_tables.items(), self._emb_modules ): for embedding_config, weight in zip( tables, emb_module.split_embedding_weights(), # torch._tensor.Tensor]` is not a function. ): self.embedding_bags[embedding_config.name] = torch.nn.Module() self.embedding_bags[embedding_config.name].register_parameter( "weight", torch.nn.Parameter(weight) ) def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ assert features is not None feature_dict = features.to_dict() embeddings = [] for emb_op, (_key, tables) in zip( self._emb_modules, self._key_to_tables.items() ): indicies = [] lengths = [] offsets = [] weights = [] for table in tables: for feature in table.feature_names: f = feature_dict[feature] indicies.append(f.values()) lengths.append(f.lengths()) if self._is_weighted: weights.append(f.weights()) indicies = torch.cat(indicies) lengths = torch.cat(lengths) offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) if self._is_weighted: weights = torch.cat(weights) embeddings.append( emb_op( indicies.int(), offsets.int(), weights if self._is_weighted else None, ) ) embeddings = torch.cat(embeddings, dim=1) return KeyedTensor( keys=self._embedding_names, values=embeddings, length_per_key=self._length_per_key, ) def _get_name(self) -> str: return "FusedEmbeddingBagCollection" def device(self) -> torch.device: return self._device def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def is_weighted(self) -> bool: return self._is_weighted def optimizer_type(self) -> Type[torch.optim.Optimizer]: return self._optimizer_type def optimizer_kwargs(self) -> Dict[str, Any]: return self._optimizer_kwargs def fused_optimizer(self) -> KeyedOptimizer: return self._optim def get_fused_ebc_uvm_time( embedding_bag_configs: List[EmbeddingBagConfig], device: torch.device, location: EmbeddingLocation, epochs: int = 100, ) -> Tuple[float, float]: fused_ebc = FusedEmbeddingBagCollection( tables=embedding_bag_configs, optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": 0.02}, device=device, location=location, ) dataset = ebc_benchmarks_utils.get_random_dataset( batch_size=64, num_batches=10, num_dense_features=1024, embedding_bag_configs=embedding_bag_configs, ) fused_ebc_time_avg, fused_ebc_time_std = ebc_benchmarks_utils.train( model=fused_ebc, optimizer=None, dataset=dataset, device=device, epochs=epochs, ) return fused_ebc_time_avg, fused_ebc_time_std
null
8,860
import argparse import sys from typing import List, Tuple import torch from fbgemm_gpu.split_table_batched_embeddings_ops_training import EmbeddingLocation from torchrec.github.benchmarks import ebc_benchmarks_utils from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.modules.fused_embedding_modules import FusedEmbeddingBagCollection class EmbeddingBagConfig(BaseEmbeddingConfig): pooling: PoolingType = PoolingType.SUM class EmbeddingBagCollection(EmbeddingBagCollectionInterface): """ EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B * (F * D)] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. device (Optional[torch.device]): default compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"] ) ebc = EmbeddingBagCollection(tables=[table_0, table_1]) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783], [ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011], [-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) tensor([0, 3, 7]) """ def __init__( self, tables: List[EmbeddingBagConfig], is_weighted: bool = False, device: Optional[torch.device] = None, ) -> None: super().__init__() torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}") self._is_weighted = is_weighted self.embedding_bags: nn.ModuleDict = nn.ModuleDict() self._embedding_bag_configs = tables self._lengths_per_embedding: List[int] = [] self._dtypes: List[int] = [] table_names = set() for embedding_config in tables: if embedding_config.name in table_names: raise ValueError(f"Duplicate table name {embedding_config.name}") table_names.add(embedding_config.name) dtype = ( torch.float32 if embedding_config.data_type == DataType.FP32 else torch.float16 ) self.embedding_bags[embedding_config.name] = nn.EmbeddingBag( num_embeddings=embedding_config.num_embeddings, embedding_dim=embedding_config.embedding_dim, mode=pooling_type_to_str(embedding_config.pooling), device=device, include_last_offset=True, dtype=dtype, ) if device is None: device = self.embedding_bags[embedding_config.name].weight.device self._dtypes.append(embedding_config.data_type.value) if not embedding_config.feature_names: embedding_config.feature_names = [embedding_config.name] self._lengths_per_embedding.extend( len(embedding_config.feature_names) * [embedding_config.embedding_dim] ) self._device: torch.device = device or torch.device("cpu") self._embedding_names: List[str] = [ embedding for embeddings in get_embedding_names_by_table(tables) for embedding in embeddings ] self._feature_names: List[List[str]] = [table.feature_names for table in tables] self.reset_parameters() def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ flat_feature_names: List[str] = [] for names in self._feature_names: flat_feature_names.extend(names) inverse_indices = reorder_inverse_indices( inverse_indices=features.inverse_indices_or_none(), feature_names=flat_feature_names, ) pooled_embeddings: List[torch.Tensor] = [] feature_dict = features.to_dict() for i, embedding_bag in enumerate(self.embedding_bags.values()): for feature_name in self._feature_names[i]: f = feature_dict[feature_name] per_sample_weights: Optional[torch.Tensor] = None if self._is_weighted: per_sample_weights = ( f.weights().half() if self._dtypes[i] == DataType.FP16.value else f.weights() ) res = embedding_bag( input=f.values(), offsets=f.offsets(), per_sample_weights=( per_sample_weights if self._is_weighted else None ), ).float() pooled_embeddings.append(res) return KeyedTensor( keys=self._embedding_names, values=process_pooled_embeddings( pooled_embeddings=pooled_embeddings, inverse_indices=inverse_indices, ), length_per_key=self._lengths_per_embedding, ) def is_weighted(self) -> bool: return self._is_weighted def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def device(self) -> torch.device: return self._device def reset_parameters(self) -> None: if (isinstance(self.device, torch.device) and self.device.type == "meta") or ( isinstance(self.device, str) and self.device == "meta" ): return # Initialize embedding bags weights with init_fn for table_config in self._embedding_bag_configs: assert table_config.init_fn is not None param = self.embedding_bags[f"{table_config.name}"].weight # pyre-ignore table_config.init_fn(param) class FusedEmbeddingBagCollection( EmbeddingBagCollectionInterface, FusedOptimizerModule ): """ FusedEmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`). It utilizes a technique called Optimizer fusion (register the optimizer with model). The semantics of this is that during the backwards pass, the registered optimizer will be called. It processes sparse data in the form of `KeyedJaggedTensor` with values of the form [F X B X L] where: * F: features (keys) * B: batch size * L: length of sparse features (jagged) and outputs a `KeyedTensor` with values of the form [B x F x D] where: * F: features (keys) * D: each feature's (key's) embedding dimension * B: batch size Args: tables (List[EmbeddingBagConfig]): list of embedding tables. is_weighted (bool): whether input `KeyedJaggedTensor` is weighted. optimizer (Type[torch.optim.Optimizer]): fusion optimizer type optimizer_kwargs: Dict[str, Any]: fusion optimizer kwargs device (Optional[torch.device]): compute device. Example:: table_0 = EmbeddingBagConfig( name="t1", embedding_dim=4, num_embeddings=10, feature_names=["f1"] ) table_1 = EmbeddingBagConfig( name="t2", embedding_dim=8, num_embeddings=10, feature_names=["f2"] ) ebc = FusedEmbeddingBagCollection(tables=[table_0, table_1], optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": .01}) # 0 1 2 <-- batch # "f1" [0,1] None [2] # "f2" [3] [4] [5,6,7] # ^ # feature features = KeyedJaggedTensor( keys=["f1", "f2"], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]), ) pooled_embeddings = ebc(features) print(pooled_embeddings.values()) tensor([[ 0.2093, 0.1395, 0.1571, 0.3583, 0.0421, 0.0037, -0.0692, 0.0663, 0.2166, -0.3150, -0.2771, -0.0301], [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0165, -0.1225, 0.2483, 0.0624, -0.1168, -0.0509, -0.1309, 0.3059], [ 0.0811, -0.1779, -0.1443, 0.1097, -0.4410, -0.4036, 0.4458, -0.2735, -0.3080, -0.2102, -0.0564, 0.5583]], grad_fn=<CatBackward0>) print(pooled_embeddings.keys()) ['f1', 'f2'] print(pooled_embeddings.offset_per_key()) [0, 4, 12] """ def __init__( self, tables: List[EmbeddingBagConfig], optimizer_type: Type[torch.optim.Optimizer], optimizer_kwargs: Dict[str, Any], is_weighted: bool = False, device: Optional[torch.device] = None, location: Optional[EmbeddingLocation] = None, ) -> None: super().__init__() self._optimizer_type = optimizer_type self._optimizer_kwargs = optimizer_kwargs self._device: torch.device = ( device if device is not None else torch.device("cpu") ) emb_optim_and_kwargs = convert_optimizer_type_and_kwargs( optimizer_type, optimizer_kwargs ) if emb_optim_and_kwargs is None: raise ValueError( f"Cannot fuse optimizer_type={optimizer_type} with kwargs {optimizer_kwargs}" ) (emb_optim_type, emb_opt_kwargs) = emb_optim_and_kwargs if location in [ EmbeddingLocation.DEVICE, EmbeddingLocation.MANAGED, EmbeddingLocation.MANAGED_CACHING, ]: assert device is not None and device.type in [ "cuda", "meta", ], f"Using location={location} requires device=cuda or meta" if device is None: device = torch.device("cpu") if location is None: if device.type in ["cpu", "meta"]: location = EmbeddingLocation.HOST elif device.type == "cuda": location = EmbeddingLocation.DEVICE else: raise ValueError("EmbeddingLocation could not be set") self._is_weighted = is_weighted self._embedding_bag_configs = tables # Registering in a List instead of ModuleList because we want don't want them to be auto-registered. # Their states will be modified via self.embedding_bags self._emb_modules: List[nn.Module] = [] self._key_to_tables: Dict[ Tuple[PoolingType, DataType], List[EmbeddingBagConfig] ] = defaultdict(list) self._length_per_key: List[int] = [] for table in tables: self._length_per_key.extend( [table.embedding_dim] * len(table.feature_names) ) key = (table.pooling, table.data_type) self._key_to_tables[key].append(table) optims = [] for key, tables in self._key_to_tables.items(): (pooling, data_type) = key emb_module = _BatchedFusedEmbeddingLookups( cast(List[BaseEmbeddingConfig], tables), data_type=data_type, pooling=pooling, optimizer_type=emb_optim_type, optimizer_kwargs=emb_opt_kwargs, device=device, embedding_location=location, ) self._emb_modules.append(emb_module) params: Dict[str, torch.Tensor] = {} for param_key, weight in emb_module.fused_optimizer().params.items(): params[f"embedding_bags.{param_key}"] = weight optims.append(("", emb_module.fused_optimizer())) self._optim: CombinedOptimizer = CombinedOptimizer(optims) self._embedding_names: List[str] = list( itertools.chain(*get_embedding_names_by_table(self._embedding_bag_configs)) ) # We map over the parameters from FBGEMM backed kernels to the canonical nn.EmbeddingBag # representation. This provides consistency between this class and the EmbeddingBagCollection's # nn.Module API calls (state_dict, named_modules, etc) self.embedding_bags: nn.ModuleDict = nn.ModuleDict() for (_key, tables), emb_module in zip( self._key_to_tables.items(), self._emb_modules ): for embedding_config, weight in zip( tables, emb_module.split_embedding_weights(), # torch._tensor.Tensor]` is not a function. ): self.embedding_bags[embedding_config.name] = torch.nn.Module() self.embedding_bags[embedding_config.name].register_parameter( "weight", torch.nn.Parameter(weight) ) def forward(self, features: KeyedJaggedTensor) -> KeyedTensor: """ Args: features (KeyedJaggedTensor): KJT of form [F X B X L]. Returns: KeyedTensor """ assert features is not None feature_dict = features.to_dict() embeddings = [] for emb_op, (_key, tables) in zip( self._emb_modules, self._key_to_tables.items() ): indicies = [] lengths = [] offsets = [] weights = [] for table in tables: for feature in table.feature_names: f = feature_dict[feature] indicies.append(f.values()) lengths.append(f.lengths()) if self._is_weighted: weights.append(f.weights()) indicies = torch.cat(indicies) lengths = torch.cat(lengths) offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths) if self._is_weighted: weights = torch.cat(weights) embeddings.append( emb_op( indicies.int(), offsets.int(), weights if self._is_weighted else None, ) ) embeddings = torch.cat(embeddings, dim=1) return KeyedTensor( keys=self._embedding_names, values=embeddings, length_per_key=self._length_per_key, ) def _get_name(self) -> str: return "FusedEmbeddingBagCollection" def device(self) -> torch.device: return self._device def embedding_bag_configs(self) -> List[EmbeddingBagConfig]: return self._embedding_bag_configs def is_weighted(self) -> bool: return self._is_weighted def optimizer_type(self) -> Type[torch.optim.Optimizer]: return self._optimizer_type def optimizer_kwargs(self) -> Dict[str, Any]: return self._optimizer_kwargs def fused_optimizer(self) -> KeyedOptimizer: return self._optim def get_ebc_comparison( embedding_bag_configs: List[EmbeddingBagConfig], device: torch.device, epochs: int = 100, ) -> Tuple[float, float, float, float, float]: # Simple EBC module wrapping a list of nn.EmbeddingBag ebc = EmbeddingBagCollection( tables=embedding_bag_configs, device=device, ) optimizer = torch.optim.SGD(ebc.parameters(), lr=0.02) # EBC with fused optimizer backed by fbgemm SplitTableBatchedEmbeddingBagsCodegen fused_ebc = FusedEmbeddingBagCollection( tables=embedding_bag_configs, optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": 0.02}, device=device, ) dataset = ebc_benchmarks_utils.get_random_dataset( batch_size=64, num_batches=10, num_dense_features=1024, embedding_bag_configs=embedding_bag_configs, ) ebc_time_avg, ebc_time_std = ebc_benchmarks_utils.train( model=ebc, optimizer=optimizer, dataset=dataset, device=device, epochs=epochs, ) fused_ebc_time_avg, fused_ebc_time_std = ebc_benchmarks_utils.train( model=fused_ebc, optimizer=None, dataset=dataset, device=device, epochs=epochs, ) speedup = ebc_time_avg / fused_ebc_time_avg return ebc_time_avg, ebc_time_std, fused_ebc_time_avg, fused_ebc_time_std, speedup
null