repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
tensorflow/mesh
mesh_tensorflow/transformer/dataset.py
pretokenized_t2t_dataset
def pretokenized_t2t_dataset(dataset_name=gin.REQUIRED, text2self=False, data_dir=gin.REQUIRED, dataset_split="train", batch_size=gin.REQUIRED, sequence_length=gin.REQUIRED, vocabulary=None): """Loads the Tensor2tensor dataset specified by dataset_name. Args: dataset_name: TensorFlow Datasets dataset name. text2self: a boolean data_dir: string, data_dir for TensorFlow Datasets dataset_split: a string - "train" or "dev" batch_size: an integer sequence_length: an integer vocabulary: ignored Returns: A tf.data.Dataset of batches """ del vocabulary filepattern = os.path.join( data_dir, dataset_name + "-" + dataset_split + "-*") filenames = tf.gfile.Glob(filepattern) tf.logging.info("Found %s files matching %s" % (len(filenames), filepattern)) if not filenames: raise ValueError("No matching files found") dataset = pretokenized_tfrecord_dataset( filenames=filenames, text2self=text2self, eos_included=True, repeat=dataset_split == "train", batch_size=batch_size, sequence_length=sequence_length) if dataset_split == "train": dataset = dataset.shuffle(1000) return dataset
python
def pretokenized_t2t_dataset(dataset_name=gin.REQUIRED, text2self=False, data_dir=gin.REQUIRED, dataset_split="train", batch_size=gin.REQUIRED, sequence_length=gin.REQUIRED, vocabulary=None): """Loads the Tensor2tensor dataset specified by dataset_name. Args: dataset_name: TensorFlow Datasets dataset name. text2self: a boolean data_dir: string, data_dir for TensorFlow Datasets dataset_split: a string - "train" or "dev" batch_size: an integer sequence_length: an integer vocabulary: ignored Returns: A tf.data.Dataset of batches """ del vocabulary filepattern = os.path.join( data_dir, dataset_name + "-" + dataset_split + "-*") filenames = tf.gfile.Glob(filepattern) tf.logging.info("Found %s files matching %s" % (len(filenames), filepattern)) if not filenames: raise ValueError("No matching files found") dataset = pretokenized_tfrecord_dataset( filenames=filenames, text2self=text2self, eos_included=True, repeat=dataset_split == "train", batch_size=batch_size, sequence_length=sequence_length) if dataset_split == "train": dataset = dataset.shuffle(1000) return dataset
[ "def", "pretokenized_t2t_dataset", "(", "dataset_name", "=", "gin", ".", "REQUIRED", ",", "text2self", "=", "False", ",", "data_dir", "=", "gin", ".", "REQUIRED", ",", "dataset_split", "=", "\"train\"", ",", "batch_size", "=", "gin", ".", "REQUIRED", ",", "s...
Loads the Tensor2tensor dataset specified by dataset_name. Args: dataset_name: TensorFlow Datasets dataset name. text2self: a boolean data_dir: string, data_dir for TensorFlow Datasets dataset_split: a string - "train" or "dev" batch_size: an integer sequence_length: an integer vocabulary: ignored Returns: A tf.data.Dataset of batches
[ "Loads", "the", "Tensor2tensor", "dataset", "specified", "by", "dataset_name", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L380-L417
train
222,700
tensorflow/mesh
mesh_tensorflow/transformer/dataset.py
pack_dataset
def pack_dataset(dataset, length, keys=None, use_custom_ops=False): """Creates a 'packed' version of a dataset on-the-fly. Borrowed from the tensor2tensor library. TODO(noam): make this faster This is meant to replace the irritation of having to create a separate "packed" version of a dataset to train efficiently on TPU. Each example in the output dataset represents several examples in the input dataset. For each key in the input dataset, two additional keys are created: <key>_segmentation: an int32 tensor identifying the parts representing the original example. <key>_position: an int32 tensor identifying the position within the original example. Example: Two input examples get combined to form an output example. The input examples are: {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]} {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]} The output example is: { "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0] "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0] "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0] "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0] "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0] "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0] } 0 represents padding in both the inputs and the outputs. Sequences in the incoming examples are truncated to length "length", and the sequences in the output examples all have fixed (padded) length "length". Args: dataset: a tf.data.Dataset length: an integer keys: a list of strings (e.g. ["inputs", "targets"]) use_custom_ops: a boolean - custom ops are faster but require a custom-built binary, which is not currently possible on cloud-tpu. Returns: a tf.data.Dataset """ shapes = dataset.output_shapes if keys is None: keys = shapes.keys() for k in keys: if k not in shapes: raise ValueError("Key %s not found in dataset. Available keys are %s" % (k, shapes.keys())) if not shapes[k].is_compatible_with(tf.TensorShape([None])): raise ValueError("Tensors to be packed must be one-dimensional.") # trim to length dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys}, num_parallel_calls=tf.data.experimental.AUTOTUNE) # Setting batch_size=length ensures that the concatenated sequences (if they # have length >=1) are sufficient to fill at least one packed example. batch_size = length dataset = dataset.padded_batch( batch_size, padded_shapes={k: [-1] for k in keys}) if use_custom_ops and len(keys) <= 2: return _pack_with_custom_ops(dataset, keys, length) else: return _pack_with_tf_ops(dataset, keys, length)
python
def pack_dataset(dataset, length, keys=None, use_custom_ops=False): """Creates a 'packed' version of a dataset on-the-fly. Borrowed from the tensor2tensor library. TODO(noam): make this faster This is meant to replace the irritation of having to create a separate "packed" version of a dataset to train efficiently on TPU. Each example in the output dataset represents several examples in the input dataset. For each key in the input dataset, two additional keys are created: <key>_segmentation: an int32 tensor identifying the parts representing the original example. <key>_position: an int32 tensor identifying the position within the original example. Example: Two input examples get combined to form an output example. The input examples are: {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]} {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]} The output example is: { "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0] "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0] "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0] "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0] "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0] "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0] } 0 represents padding in both the inputs and the outputs. Sequences in the incoming examples are truncated to length "length", and the sequences in the output examples all have fixed (padded) length "length". Args: dataset: a tf.data.Dataset length: an integer keys: a list of strings (e.g. ["inputs", "targets"]) use_custom_ops: a boolean - custom ops are faster but require a custom-built binary, which is not currently possible on cloud-tpu. Returns: a tf.data.Dataset """ shapes = dataset.output_shapes if keys is None: keys = shapes.keys() for k in keys: if k not in shapes: raise ValueError("Key %s not found in dataset. Available keys are %s" % (k, shapes.keys())) if not shapes[k].is_compatible_with(tf.TensorShape([None])): raise ValueError("Tensors to be packed must be one-dimensional.") # trim to length dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys}, num_parallel_calls=tf.data.experimental.AUTOTUNE) # Setting batch_size=length ensures that the concatenated sequences (if they # have length >=1) are sufficient to fill at least one packed example. batch_size = length dataset = dataset.padded_batch( batch_size, padded_shapes={k: [-1] for k in keys}) if use_custom_ops and len(keys) <= 2: return _pack_with_custom_ops(dataset, keys, length) else: return _pack_with_tf_ops(dataset, keys, length)
[ "def", "pack_dataset", "(", "dataset", ",", "length", ",", "keys", "=", "None", ",", "use_custom_ops", "=", "False", ")", ":", "shapes", "=", "dataset", ".", "output_shapes", "if", "keys", "is", "None", ":", "keys", "=", "shapes", ".", "keys", "(", ")"...
Creates a 'packed' version of a dataset on-the-fly. Borrowed from the tensor2tensor library. TODO(noam): make this faster This is meant to replace the irritation of having to create a separate "packed" version of a dataset to train efficiently on TPU. Each example in the output dataset represents several examples in the input dataset. For each key in the input dataset, two additional keys are created: <key>_segmentation: an int32 tensor identifying the parts representing the original example. <key>_position: an int32 tensor identifying the position within the original example. Example: Two input examples get combined to form an output example. The input examples are: {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]} {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]} The output example is: { "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0] "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0] "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0] "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0] "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0] "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0] } 0 represents padding in both the inputs and the outputs. Sequences in the incoming examples are truncated to length "length", and the sequences in the output examples all have fixed (padded) length "length". Args: dataset: a tf.data.Dataset length: an integer keys: a list of strings (e.g. ["inputs", "targets"]) use_custom_ops: a boolean - custom ops are faster but require a custom-built binary, which is not currently possible on cloud-tpu. Returns: a tf.data.Dataset
[ "Creates", "a", "packed", "version", "of", "a", "dataset", "on", "-", "the", "-", "fly", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L421-L490
train
222,701
tensorflow/mesh
mesh_tensorflow/transformer/dataset.py
trim_and_pad_all_features
def trim_and_pad_all_features(features, length): """Trim and pad first dimension of all features to size length.""" return {k: _trim_and_pad(v, length) for k, v in features.items()}
python
def trim_and_pad_all_features(features, length): """Trim and pad first dimension of all features to size length.""" return {k: _trim_and_pad(v, length) for k, v in features.items()}
[ "def", "trim_and_pad_all_features", "(", "features", ",", "length", ")", ":", "return", "{", "k", ":", "_trim_and_pad", "(", "v", ",", "length", ")", "for", "k", ",", "v", "in", "features", ".", "items", "(", ")", "}" ]
Trim and pad first dimension of all features to size length.
[ "Trim", "and", "pad", "first", "dimension", "of", "all", "features", "to", "size", "length", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L667-L669
train
222,702
tensorflow/mesh
mesh_tensorflow/ops.py
convert_to_dimension
def convert_to_dimension(d): """Converts input to a Dimension. Args: d: Dimension, tuple (string, int), or None. Returns: Dimension or None. Raises: ValueError: If d cannot be converted to a Dimension. """ if d is None: return None if isinstance(d, Dimension): if not isinstance(d.name, str) or not isinstance(d.size, int): raise ValueError("Bad dimension %s" % (d,)) return d name, size = d if isinstance(name, str) and isinstance(size, int): return Dimension(name, size) else: raise ValueError("could not convert %s to Dimension" % (d,))
python
def convert_to_dimension(d): """Converts input to a Dimension. Args: d: Dimension, tuple (string, int), or None. Returns: Dimension or None. Raises: ValueError: If d cannot be converted to a Dimension. """ if d is None: return None if isinstance(d, Dimension): if not isinstance(d.name, str) or not isinstance(d.size, int): raise ValueError("Bad dimension %s" % (d,)) return d name, size = d if isinstance(name, str) and isinstance(size, int): return Dimension(name, size) else: raise ValueError("could not convert %s to Dimension" % (d,))
[ "def", "convert_to_dimension", "(", "d", ")", ":", "if", "d", "is", "None", ":", "return", "None", "if", "isinstance", "(", "d", ",", "Dimension", ")", ":", "if", "not", "isinstance", "(", "d", ".", "name", ",", "str", ")", "or", "not", "isinstance",...
Converts input to a Dimension. Args: d: Dimension, tuple (string, int), or None. Returns: Dimension or None. Raises: ValueError: If d cannot be converted to a Dimension.
[ "Converts", "input", "to", "a", "Dimension", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L38-L60
train
222,703
tensorflow/mesh
mesh_tensorflow/ops.py
convert_to_shape
def convert_to_shape(x): """Converts input to a Shape. Args: x: Shape, str, or None. Returns: Shape or None. Raises: ValueError: If x cannot be converted to a Shape. """ if x is None: return None if isinstance(x, Shape): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x, seconds_to_int=True) return Shape(x)
python
def convert_to_shape(x): """Converts input to a Shape. Args: x: Shape, str, or None. Returns: Shape or None. Raises: ValueError: If x cannot be converted to a Shape. """ if x is None: return None if isinstance(x, Shape): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x, seconds_to_int=True) return Shape(x)
[ "def", "convert_to_shape", "(", "x", ")", ":", "if", "x", "is", "None", ":", "return", "None", "if", "isinstance", "(", "x", ",", "Shape", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "_parse_string_to_list_o...
Converts input to a Shape. Args: x: Shape, str, or None. Returns: Shape or None. Raises: ValueError: If x cannot be converted to a Shape.
[ "Converts", "input", "to", "a", "Shape", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L182-L200
train
222,704
tensorflow/mesh
mesh_tensorflow/ops.py
convert_to_layout_rules
def convert_to_layout_rules(x): """Converts input to a LayoutRules. Args: x: LayoutRules, str, or set-like of string pairs. Returns: LayoutRules. """ if isinstance(x, LayoutRules): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x) return LayoutRules(x)
python
def convert_to_layout_rules(x): """Converts input to a LayoutRules. Args: x: LayoutRules, str, or set-like of string pairs. Returns: LayoutRules. """ if isinstance(x, LayoutRules): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x) return LayoutRules(x)
[ "def", "convert_to_layout_rules", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "LayoutRules", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "_parse_string_to_list_of_pairs", "(", "x", ")", "return", "Lay...
Converts input to a LayoutRules. Args: x: LayoutRules, str, or set-like of string pairs. Returns: LayoutRules.
[ "Converts", "input", "to", "a", "LayoutRules", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L271-L284
train
222,705
tensorflow/mesh
mesh_tensorflow/ops.py
convert_args_to_laid_out_tensors
def convert_args_to_laid_out_tensors(xs): """Convert list elements to laid-out-tensors when possible. Args: xs: a list Returns: a list """ ret = [] for x in xs: if hasattr(x, "to_laid_out_tensor"): ret.append(x.to_laid_out_tensor()) else: ret.append(x) return ret
python
def convert_args_to_laid_out_tensors(xs): """Convert list elements to laid-out-tensors when possible. Args: xs: a list Returns: a list """ ret = [] for x in xs: if hasattr(x, "to_laid_out_tensor"): ret.append(x.to_laid_out_tensor()) else: ret.append(x) return ret
[ "def", "convert_args_to_laid_out_tensors", "(", "xs", ")", ":", "ret", "=", "[", "]", "for", "x", "in", "xs", ":", "if", "hasattr", "(", "x", ",", "\"to_laid_out_tensor\"", ")", ":", "ret", ".", "append", "(", "x", ".", "to_laid_out_tensor", "(", ")", ...
Convert list elements to laid-out-tensors when possible. Args: xs: a list Returns: a list
[ "Convert", "list", "elements", "to", "laid", "-", "out", "-", "tensors", "when", "possible", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1254-L1268
train
222,706
tensorflow/mesh
mesh_tensorflow/ops.py
slicewise
def slicewise(tf_fn, xs, output_shape=None, output_dtype=None, splittable_dims=None, grad_function=None, name=None): """Slice-wise call to any tensorflow function. The output shape and dtype default to those of the first input. splittable_dims is a list of Dimensions which can be split while keeping the computation valid. Args: tf_fn: a function taking n tf.Tensors and returning a tf.Tensor xs: a list of n Tensors output_shape: a Shape (or list of shapes) output_dtype: a dtype (or list of dtypes) splittable_dims: a list of Dimensions which are ok to split grad_function: an optional gradients function. If None, use tf gradient. name: an optional string Returns: a Tensor (or a tuple of Tensors) """ multiple_outputs = isinstance(output_dtype, list) output_shapes = output_shape if multiple_outputs else [output_shape] output_dtypes = output_dtype if multiple_outputs else [output_dtype] op = SlicewiseOperation( tf_fn, xs, [convert_to_shape(shape) or xs[0].shape for shape in output_shapes], [dtype or xs[0].dtype for dtype in output_dtypes], splittable_dims, grad_function, name=name) return tuple(op.outputs) if multiple_outputs else op.outputs[0]
python
def slicewise(tf_fn, xs, output_shape=None, output_dtype=None, splittable_dims=None, grad_function=None, name=None): """Slice-wise call to any tensorflow function. The output shape and dtype default to those of the first input. splittable_dims is a list of Dimensions which can be split while keeping the computation valid. Args: tf_fn: a function taking n tf.Tensors and returning a tf.Tensor xs: a list of n Tensors output_shape: a Shape (or list of shapes) output_dtype: a dtype (or list of dtypes) splittable_dims: a list of Dimensions which are ok to split grad_function: an optional gradients function. If None, use tf gradient. name: an optional string Returns: a Tensor (or a tuple of Tensors) """ multiple_outputs = isinstance(output_dtype, list) output_shapes = output_shape if multiple_outputs else [output_shape] output_dtypes = output_dtype if multiple_outputs else [output_dtype] op = SlicewiseOperation( tf_fn, xs, [convert_to_shape(shape) or xs[0].shape for shape in output_shapes], [dtype or xs[0].dtype for dtype in output_dtypes], splittable_dims, grad_function, name=name) return tuple(op.outputs) if multiple_outputs else op.outputs[0]
[ "def", "slicewise", "(", "tf_fn", ",", "xs", ",", "output_shape", "=", "None", ",", "output_dtype", "=", "None", ",", "splittable_dims", "=", "None", ",", "grad_function", "=", "None", ",", "name", "=", "None", ")", ":", "multiple_outputs", "=", "isinstanc...
Slice-wise call to any tensorflow function. The output shape and dtype default to those of the first input. splittable_dims is a list of Dimensions which can be split while keeping the computation valid. Args: tf_fn: a function taking n tf.Tensors and returning a tf.Tensor xs: a list of n Tensors output_shape: a Shape (or list of shapes) output_dtype: a dtype (or list of dtypes) splittable_dims: a list of Dimensions which are ok to split grad_function: an optional gradients function. If None, use tf gradient. name: an optional string Returns: a Tensor (or a tuple of Tensors)
[ "Slice", "-", "wise", "call", "to", "any", "tensorflow", "function", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1568-L1605
train
222,707
tensorflow/mesh
mesh_tensorflow/ops.py
cwise
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None): """Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor """ return slicewise( tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=name or "cwise")
python
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None): """Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor """ return slicewise( tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=name or "cwise")
[ "def", "cwise", "(", "tf_fn", ",", "xs", ",", "output_dtype", "=", "None", ",", "grad_function", "=", "None", ",", "name", "=", "None", ")", ":", "return", "slicewise", "(", "tf_fn", ",", "xs", ",", "output_dtype", "=", "output_dtype", ",", "splittable_d...
Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor
[ "Component", "-", "wise", "operation", "with", "no", "broadcasting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1608-L1624
train
222,708
tensorflow/mesh
mesh_tensorflow/ops.py
binary_arguments_to_tensors
def binary_arguments_to_tensors(x1, x2): """Convert argument of a binary operation to Tensors. Args: x1: a Tensor or something convertible to a tf Scalar x2: a Tensor or something convertible to a tf Scalar Returns: new_x1: a Tensor new_x2: a Tensor Raises: ValueError: on failure """ if not isinstance(x1, Tensor) and not isinstance(x2, Tensor): raise ValueError("at least one of x1 and x2 must be an mtf Tensor") elif isinstance(x1, Tensor) and isinstance(x2, Tensor): return x1, x2 elif isinstance(x1, Tensor): return x1, import_tf_tensor( x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([])) else: return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype), Shape([])), x2
python
def binary_arguments_to_tensors(x1, x2): """Convert argument of a binary operation to Tensors. Args: x1: a Tensor or something convertible to a tf Scalar x2: a Tensor or something convertible to a tf Scalar Returns: new_x1: a Tensor new_x2: a Tensor Raises: ValueError: on failure """ if not isinstance(x1, Tensor) and not isinstance(x2, Tensor): raise ValueError("at least one of x1 and x2 must be an mtf Tensor") elif isinstance(x1, Tensor) and isinstance(x2, Tensor): return x1, x2 elif isinstance(x1, Tensor): return x1, import_tf_tensor( x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([])) else: return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype), Shape([])), x2
[ "def", "binary_arguments_to_tensors", "(", "x1", ",", "x2", ")", ":", "if", "not", "isinstance", "(", "x1", ",", "Tensor", ")", "and", "not", "isinstance", "(", "x2", ",", "Tensor", ")", ":", "raise", "ValueError", "(", "\"at least one of x1 and x2 must be an ...
Convert argument of a binary operation to Tensors. Args: x1: a Tensor or something convertible to a tf Scalar x2: a Tensor or something convertible to a tf Scalar Returns: new_x1: a Tensor new_x2: a Tensor Raises: ValueError: on failure
[ "Convert", "argument", "of", "a", "binary", "operation", "to", "Tensors", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1845-L1868
train
222,709
tensorflow/mesh
mesh_tensorflow/ops.py
minimum
def minimum(x1, x2, output_shape=None, name=None): """Binary minimum with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) with tf.name_scope(name, default_name="minimum"): x1, x2 = binary_arguments_to_tensors(x1, x2) return MinMaxOperation( tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
python
def minimum(x1, x2, output_shape=None, name=None): """Binary minimum with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) with tf.name_scope(name, default_name="minimum"): x1, x2 = binary_arguments_to_tensors(x1, x2) return MinMaxOperation( tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
[ "def", "minimum", "(", "x1", ",", "x2", ",", "output_shape", "=", "None", ",", "name", "=", "None", ")", ":", "output_shape", "=", "convert_to_shape", "(", "output_shape", ")", "with", "tf", ".", "name_scope", "(", "name", ",", "default_name", "=", "\"mi...
Binary minimum with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
[ "Binary", "minimum", "with", "broadcsting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1960-L1976
train
222,710
tensorflow/mesh
mesh_tensorflow/ops.py
split
def split(x, split_dim, num_or_size_splits, name=None): """Like tf.split. Args: x: a Tensor split_dim: a Dimension in x.shape.dims num_or_size_splits: either an integer dividing split_dim.size or a list of integers adding up to split_dim.size name: an optional string Returns: a list of Tensors. """ return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs
python
def split(x, split_dim, num_or_size_splits, name=None): """Like tf.split. Args: x: a Tensor split_dim: a Dimension in x.shape.dims num_or_size_splits: either an integer dividing split_dim.size or a list of integers adding up to split_dim.size name: an optional string Returns: a list of Tensors. """ return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs
[ "def", "split", "(", "x", ",", "split_dim", ",", "num_or_size_splits", ",", "name", "=", "None", ")", ":", "return", "SplitOperation", "(", "x", ",", "split_dim", ",", "num_or_size_splits", ",", "name", "=", "name", ")", ".", "outputs" ]
Like tf.split. Args: x: a Tensor split_dim: a Dimension in x.shape.dims num_or_size_splits: either an integer dividing split_dim.size or a list of integers adding up to split_dim.size name: an optional string Returns: a list of Tensors.
[ "Like", "tf", ".", "split", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L2215-L2227
train
222,711
tensorflow/mesh
mesh_tensorflow/ops.py
stack
def stack(xs, dim_name, axis=0, name=None): """Stack multiple Tensors to make a new dimension. Args: xs: a list of Tensors with identical shapes. dim_name: a string (name of the new dimension) axis: an integer (index of the new dimension in the output shape) name: an optional string Returns: a Tensor """ ret = StackOperation(xs, dim_name, axis, name).outputs[0] return ret
python
def stack(xs, dim_name, axis=0, name=None): """Stack multiple Tensors to make a new dimension. Args: xs: a list of Tensors with identical shapes. dim_name: a string (name of the new dimension) axis: an integer (index of the new dimension in the output shape) name: an optional string Returns: a Tensor """ ret = StackOperation(xs, dim_name, axis, name).outputs[0] return ret
[ "def", "stack", "(", "xs", ",", "dim_name", ",", "axis", "=", "0", ",", "name", "=", "None", ")", ":", "ret", "=", "StackOperation", "(", "xs", ",", "dim_name", ",", "axis", ",", "name", ")", ".", "outputs", "[", "0", "]", "return", "ret" ]
Stack multiple Tensors to make a new dimension. Args: xs: a list of Tensors with identical shapes. dim_name: a string (name of the new dimension) axis: an integer (index of the new dimension in the output shape) name: an optional string Returns: a Tensor
[ "Stack", "multiple", "Tensors", "to", "make", "a", "new", "dimension", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L2264-L2277
train
222,712
tensorflow/mesh
mesh_tensorflow/ops.py
cumsum
def cumsum(x, dim, exclusive=False): """Cumulative sum. Args: x: a Tensor dim: a Dimension exclusive: a boolean Returns: a Tensor with the same shape as x. """ with tf.variable_scope("cumsum"): new_name = "tmp_dim_cumsum" new_dim = Dimension(new_name, dim.size) new_shape = x.shape.rename_dimension(dim.name, new_name) comparator = less if exclusive else less_equal m = cast( comparator(mtf_range(x.mesh, dim, dtype=tf.float32), mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype) ret = einsum([x, m], output_shape=new_shape) return reshape(ret, x.shape)
python
def cumsum(x, dim, exclusive=False): """Cumulative sum. Args: x: a Tensor dim: a Dimension exclusive: a boolean Returns: a Tensor with the same shape as x. """ with tf.variable_scope("cumsum"): new_name = "tmp_dim_cumsum" new_dim = Dimension(new_name, dim.size) new_shape = x.shape.rename_dimension(dim.name, new_name) comparator = less if exclusive else less_equal m = cast( comparator(mtf_range(x.mesh, dim, dtype=tf.float32), mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype) ret = einsum([x, m], output_shape=new_shape) return reshape(ret, x.shape)
[ "def", "cumsum", "(", "x", ",", "dim", ",", "exclusive", "=", "False", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"cumsum\"", ")", ":", "new_name", "=", "\"tmp_dim_cumsum\"", "new_dim", "=", "Dimension", "(", "new_name", ",", "dim", ".", "size...
Cumulative sum. Args: x: a Tensor dim: a Dimension exclusive: a boolean Returns: a Tensor with the same shape as x.
[ "Cumulative", "sum", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L2324-L2344
train
222,713
tensorflow/mesh
mesh_tensorflow/ops.py
shift
def shift(x, offset, dim, wrap, name=None): """Shift operation. Shift x right by +offset in dimension dim. Args: x: a Tensor offset: an integer. If negative, shift left instead of right. dim: a Dimension of x wrap: a boolean - whether to wrap (True) or pad with zeros (False). name: an optional string Returns: a Tensor with the same shape and dtype as x """ return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]
python
def shift(x, offset, dim, wrap, name=None): """Shift operation. Shift x right by +offset in dimension dim. Args: x: a Tensor offset: an integer. If negative, shift left instead of right. dim: a Dimension of x wrap: a boolean - whether to wrap (True) or pad with zeros (False). name: an optional string Returns: a Tensor with the same shape and dtype as x """ return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]
[ "def", "shift", "(", "x", ",", "offset", ",", "dim", ",", "wrap", ",", "name", "=", "None", ")", ":", "return", "ShiftOperation", "(", "x", ",", "offset", ",", "dim", ",", "wrap", ",", "name", "=", "name", ")", ".", "outputs", "[", "0", "]" ]
Shift operation. Shift x right by +offset in dimension dim. Args: x: a Tensor offset: an integer. If negative, shift left instead of right. dim: a Dimension of x wrap: a boolean - whether to wrap (True) or pad with zeros (False). name: an optional string Returns: a Tensor with the same shape and dtype as x
[ "Shift", "operation", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L2755-L2770
train
222,714
tensorflow/mesh
mesh_tensorflow/ops.py
import_laid_out_tensor
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None): """Import a laid_out_tensor. For expert users. The input must be laid out appropriately given the eventual MeshImpl, and layout. Args: mesh: a Mesh laid_out_tensor: a LaidOutTensor shape: a mtf.Shape name: an optional string Returns: a mtf.Tensor """ return ImportLaidOutTensorOperation( mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]
python
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None): """Import a laid_out_tensor. For expert users. The input must be laid out appropriately given the eventual MeshImpl, and layout. Args: mesh: a Mesh laid_out_tensor: a LaidOutTensor shape: a mtf.Shape name: an optional string Returns: a mtf.Tensor """ return ImportLaidOutTensorOperation( mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]
[ "def", "import_laid_out_tensor", "(", "mesh", ",", "laid_out_tensor", ",", "shape", ",", "name", "=", "None", ")", ":", "return", "ImportLaidOutTensorOperation", "(", "mesh", ",", "laid_out_tensor", ",", "convert_to_shape", "(", "shape", ")", ",", "name", "=", ...
Import a laid_out_tensor. For expert users. The input must be laid out appropriately given the eventual MeshImpl, and layout. Args: mesh: a Mesh laid_out_tensor: a LaidOutTensor shape: a mtf.Shape name: an optional string Returns: a mtf.Tensor
[ "Import", "a", "laid_out_tensor", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L2972-L2989
train
222,715
tensorflow/mesh
mesh_tensorflow/ops.py
get_variable
def get_variable(mesh, name, shape, dtype=tf.float32, master_dtype=None, slice_dtype=None, activation_dtype=None, initializer=None, trainable=True, **kwargs): """Create a new variable or retrieve an already-created one. Args: mesh: a Mesh name: a string (uses the existing tf.variable_scope()) shape: a Shape dtype: a VariableDType or a tf.DType master_dtype: an optional tf.DType (deprecated - use dtype arg) slice_dtype: an optional tf.DType (deprecated - use dtype arg) activation_dtype: an optional tf.DType (deprecated - use dtype arg) initializer: an optional tf initializer function trainable: a boolean **kwargs: additional keyword arguments to tf.get_variable Returns: a Tensor with the given shape and dtype equal to dtype.activation_dtype """ if dtype is None: dtype = VariableDType(master_dtype, slice_dtype, activation_dtype) elif isinstance(dtype, tf.DType): dtype = VariableDType( master_dtype or dtype, slice_dtype or dtype, activation_dtype or dtype) elif not isinstance(dtype, VariableDType): raise ValueError("dtype should be a tf.dtype or a mtf.VariableDType") scope_name = tf.get_variable_scope().name if scope_name: full_name = scope_name + "/" + name else: full_name = name if full_name in mesh.graph.name_to_variable: var = mesh.graph.name_to_variable[full_name] else: var = Variable( mesh, name, convert_to_shape(shape), dtype, initializer, trainable, **kwargs) if var.name != full_name: raise ValueError( "Expected var.name == full_name. %s vs %s" % (var.name, full_name)) mesh.graph.name_to_variable[full_name] = var return var.outputs[0]
python
def get_variable(mesh, name, shape, dtype=tf.float32, master_dtype=None, slice_dtype=None, activation_dtype=None, initializer=None, trainable=True, **kwargs): """Create a new variable or retrieve an already-created one. Args: mesh: a Mesh name: a string (uses the existing tf.variable_scope()) shape: a Shape dtype: a VariableDType or a tf.DType master_dtype: an optional tf.DType (deprecated - use dtype arg) slice_dtype: an optional tf.DType (deprecated - use dtype arg) activation_dtype: an optional tf.DType (deprecated - use dtype arg) initializer: an optional tf initializer function trainable: a boolean **kwargs: additional keyword arguments to tf.get_variable Returns: a Tensor with the given shape and dtype equal to dtype.activation_dtype """ if dtype is None: dtype = VariableDType(master_dtype, slice_dtype, activation_dtype) elif isinstance(dtype, tf.DType): dtype = VariableDType( master_dtype or dtype, slice_dtype or dtype, activation_dtype or dtype) elif not isinstance(dtype, VariableDType): raise ValueError("dtype should be a tf.dtype or a mtf.VariableDType") scope_name = tf.get_variable_scope().name if scope_name: full_name = scope_name + "/" + name else: full_name = name if full_name in mesh.graph.name_to_variable: var = mesh.graph.name_to_variable[full_name] else: var = Variable( mesh, name, convert_to_shape(shape), dtype, initializer, trainable, **kwargs) if var.name != full_name: raise ValueError( "Expected var.name == full_name. %s vs %s" % (var.name, full_name)) mesh.graph.name_to_variable[full_name] = var return var.outputs[0]
[ "def", "get_variable", "(", "mesh", ",", "name", ",", "shape", ",", "dtype", "=", "tf", ".", "float32", ",", "master_dtype", "=", "None", ",", "slice_dtype", "=", "None", ",", "activation_dtype", "=", "None", ",", "initializer", "=", "None", ",", "traina...
Create a new variable or retrieve an already-created one. Args: mesh: a Mesh name: a string (uses the existing tf.variable_scope()) shape: a Shape dtype: a VariableDType or a tf.DType master_dtype: an optional tf.DType (deprecated - use dtype arg) slice_dtype: an optional tf.DType (deprecated - use dtype arg) activation_dtype: an optional tf.DType (deprecated - use dtype arg) initializer: an optional tf initializer function trainable: a boolean **kwargs: additional keyword arguments to tf.get_variable Returns: a Tensor with the given shape and dtype equal to dtype.activation_dtype
[ "Create", "a", "new", "variable", "or", "retrieve", "an", "already", "-", "created", "one", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3210-L3253
train
222,716
tensorflow/mesh
mesh_tensorflow/ops.py
assign
def assign(var, new_val, assign_fn=assign_slice): """Assign a new value to a variable. Args: var: either a Variable operation or its output Tensor. new_val: a Tensor assign_fn: a function from (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation Returns: an Operation Raises: ValueError: if var is not a Variable and var.operation is not a Variable """ if isinstance(var, Tensor): var = var.operation if not isinstance(var, Variable): raise ValueError("var must be a mtf.Variable or its output Tensor.") return Assign([var], [new_val], assign_fn=assign_fn)
python
def assign(var, new_val, assign_fn=assign_slice): """Assign a new value to a variable. Args: var: either a Variable operation or its output Tensor. new_val: a Tensor assign_fn: a function from (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation Returns: an Operation Raises: ValueError: if var is not a Variable and var.operation is not a Variable """ if isinstance(var, Tensor): var = var.operation if not isinstance(var, Variable): raise ValueError("var must be a mtf.Variable or its output Tensor.") return Assign([var], [new_val], assign_fn=assign_fn)
[ "def", "assign", "(", "var", ",", "new_val", ",", "assign_fn", "=", "assign_slice", ")", ":", "if", "isinstance", "(", "var", ",", "Tensor", ")", ":", "var", "=", "var", ".", "operation", "if", "not", "isinstance", "(", "var", ",", "Variable", ")", "...
Assign a new value to a variable. Args: var: either a Variable operation or its output Tensor. new_val: a Tensor assign_fn: a function from (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation Returns: an Operation Raises: ValueError: if var is not a Variable and var.operation is not a Variable
[ "Assign", "a", "new", "value", "to", "a", "variable", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3304-L3321
train
222,717
tensorflow/mesh
mesh_tensorflow/ops.py
Print
def Print(x, data, message, **kwargs): # pylint: disable=invalid-name """Call tf.Print. Args: x: a Tensor. data: a list of Tensor message: a string **kwargs: keyword arguments to tf.Print Returns: a Tensor which is identical in value to x """ return PrintOperation(x, data, message, **kwargs).outputs[0]
python
def Print(x, data, message, **kwargs): # pylint: disable=invalid-name """Call tf.Print. Args: x: a Tensor. data: a list of Tensor message: a string **kwargs: keyword arguments to tf.Print Returns: a Tensor which is identical in value to x """ return PrintOperation(x, data, message, **kwargs).outputs[0]
[ "def", "Print", "(", "x", ",", "data", ",", "message", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=invalid-name", "return", "PrintOperation", "(", "x", ",", "data", ",", "message", ",", "*", "*", "kwargs", ")", ".", "outputs", "[", "0", "]" ]
Call tf.Print. Args: x: a Tensor. data: a list of Tensor message: a string **kwargs: keyword arguments to tf.Print Returns: a Tensor which is identical in value to x
[ "Call", "tf", ".", "Print", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3450-L3461
train
222,718
tensorflow/mesh
mesh_tensorflow/ops.py
rename_dimension
def rename_dimension(x, old_name, new_name): """Reshape a Tensor, renaming one dimension. Args: x: a Tensor old_name: a string new_name: a string Returns: a Tensor """ return reshape(x, x.shape.rename_dimension(old_name, new_name))
python
def rename_dimension(x, old_name, new_name): """Reshape a Tensor, renaming one dimension. Args: x: a Tensor old_name: a string new_name: a string Returns: a Tensor """ return reshape(x, x.shape.rename_dimension(old_name, new_name))
[ "def", "rename_dimension", "(", "x", ",", "old_name", ",", "new_name", ")", ":", "return", "reshape", "(", "x", ",", "x", ".", "shape", ".", "rename_dimension", "(", "old_name", ",", "new_name", ")", ")" ]
Reshape a Tensor, renaming one dimension. Args: x: a Tensor old_name: a string new_name: a string Returns: a Tensor
[ "Reshape", "a", "Tensor", "renaming", "one", "dimension", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3576-L3587
train
222,719
tensorflow/mesh
mesh_tensorflow/ops.py
replace_dimensions
def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims): """Replace dimensions in a Tensor or Shape. old_dim_or_dims consists of a single dimension or a list of dimensions that must occur consecutively in the input shape. They are replaced by the dimensions in new_dim_or_dims. Args: tensor_or_shape: a Tensor or a Shape old_dim_or_dims: a Dimension or a list of Dimensions new_dim_or_dims: a Dimensions or a list of Dimensions Returns: a new Tensor or a Shape """ if isinstance(tensor_or_shape, Tensor): return reshape(tensor_or_shape, replace_dimensions( tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims)) if not isinstance(tensor_or_shape, Shape): raise ValueError( "tensor_or_shape must be a Tensor or Shape got %s" % (tensor_or_shape,)) in_dims = tensor_or_shape.dims if isinstance(old_dim_or_dims, Dimension): old_dim_or_dims = [old_dim_or_dims] if isinstance(new_dim_or_dims, Dimension): new_dim_or_dims = [new_dim_or_dims] if not isinstance(old_dim_or_dims, list) or not old_dim_or_dims: raise ValueError( "old_dim_or_dims must be a Dimension or a list of Dimension got %s" % (old_dim_or_dims,)) if not isinstance(new_dim_or_dims, list) or not new_dim_or_dims: raise ValueError( "new_dim_or_dims must be a Dimension or a list of Dimension got %s" % (new_dim_or_dims,)) try: positions = [in_dims.index(d) for d in old_dim_or_dims] pos = positions[0] if positions != list(range(pos, pos + len(positions))): raise ValueError() except ValueError: raise ValueError( "old_dim_or_dims must be a subsequence of the input's dimensions" " old_dim_or_dims=%s input's dimensions=%s" % (old_dim_or_dims, in_dims)) return Shape(in_dims[:pos] + new_dim_or_dims + in_dims[pos + len(old_dim_or_dims):])
python
def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims): """Replace dimensions in a Tensor or Shape. old_dim_or_dims consists of a single dimension or a list of dimensions that must occur consecutively in the input shape. They are replaced by the dimensions in new_dim_or_dims. Args: tensor_or_shape: a Tensor or a Shape old_dim_or_dims: a Dimension or a list of Dimensions new_dim_or_dims: a Dimensions or a list of Dimensions Returns: a new Tensor or a Shape """ if isinstance(tensor_or_shape, Tensor): return reshape(tensor_or_shape, replace_dimensions( tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims)) if not isinstance(tensor_or_shape, Shape): raise ValueError( "tensor_or_shape must be a Tensor or Shape got %s" % (tensor_or_shape,)) in_dims = tensor_or_shape.dims if isinstance(old_dim_or_dims, Dimension): old_dim_or_dims = [old_dim_or_dims] if isinstance(new_dim_or_dims, Dimension): new_dim_or_dims = [new_dim_or_dims] if not isinstance(old_dim_or_dims, list) or not old_dim_or_dims: raise ValueError( "old_dim_or_dims must be a Dimension or a list of Dimension got %s" % (old_dim_or_dims,)) if not isinstance(new_dim_or_dims, list) or not new_dim_or_dims: raise ValueError( "new_dim_or_dims must be a Dimension or a list of Dimension got %s" % (new_dim_or_dims,)) try: positions = [in_dims.index(d) for d in old_dim_or_dims] pos = positions[0] if positions != list(range(pos, pos + len(positions))): raise ValueError() except ValueError: raise ValueError( "old_dim_or_dims must be a subsequence of the input's dimensions" " old_dim_or_dims=%s input's dimensions=%s" % (old_dim_or_dims, in_dims)) return Shape(in_dims[:pos] + new_dim_or_dims + in_dims[pos + len(old_dim_or_dims):])
[ "def", "replace_dimensions", "(", "tensor_or_shape", ",", "old_dim_or_dims", ",", "new_dim_or_dims", ")", ":", "if", "isinstance", "(", "tensor_or_shape", ",", "Tensor", ")", ":", "return", "reshape", "(", "tensor_or_shape", ",", "replace_dimensions", "(", "tensor_o...
Replace dimensions in a Tensor or Shape. old_dim_or_dims consists of a single dimension or a list of dimensions that must occur consecutively in the input shape. They are replaced by the dimensions in new_dim_or_dims. Args: tensor_or_shape: a Tensor or a Shape old_dim_or_dims: a Dimension or a list of Dimensions new_dim_or_dims: a Dimensions or a list of Dimensions Returns: a new Tensor or a Shape
[ "Replace", "dimensions", "in", "a", "Tensor", "or", "Shape", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3590-L3634
train
222,720
tensorflow/mesh
mesh_tensorflow/ops.py
einsum
def einsum(xs, output_shape=None, reduced_dims=None, name=None): """Einstein summation. einsum(xs, output_shape) is equivalent to broadcasting all inputs to the union of all of their shapes, multiplying them componentwise, and finally reduce_summing down to output_shape. One common case of this is matrix multiplication: x has shape [a, b] y has shape [b, c] matmul(x, y) == einsum([x, y], output_shape=[a, c]) We provide a few options for specifying the output shape: If neither output_shape nor reduced_dims is specified, then the output shape is set to the contain all dimensions that appear exactly once in the inputs, in order of appearance. If output_shape is not specified, then the output shape is set to the contain all dimensions that appear in xs but not in reduced_dims, in the order that they appear in xs. If reduced_dims is also not specified, then reduced_dims is set to the set of all dimensions that appear at least twice in xs. If both output_shape and reduced_dims are specified, then we check that reduced_dims matches the set of dimensions present in xs but not in output_shape, and throw an exception if it does not. This helps to reduce bugs. Args: xs: a list of Tensors output_shape: an optional Shape. reduced_dims: an optional list of Dimensions. name: an optional string Returns: a Tensor Raises: ValueError: if reduced_dims contradicts output_shape """ output_shape = convert_to_shape(output_shape) input_dim_count = collections.defaultdict(int) input_dims = [] for x in xs: for d in x.shape.dims: if d not in input_dim_count: input_dims.append(d) input_dim_count[d] += 1 if output_shape is None: if reduced_dims is None: reduced_dims = [d for d, c in six.iteritems(input_dim_count) if c > 1] output_shape = Shape([d for d in input_dims if d not in reduced_dims]) elif reduced_dims is not None: for d in reduced_dims: if not isinstance(d, Dimension): raise ValueError("reduced_dims must be a list of Dimensions. Got %s." % (reduced_dims,)) computed_reduced_dims = [ d for d in input_dims if d not in output_shape.dims] if set(computed_reduced_dims) != set(reduced_dims): raise ValueError( "Specified reduced_dims and output_shape do not match." " xs=%s output_shape=%s reduced_dims=%s " % ( xs, output_shape, reduced_dims)) return EinsumOperation(xs, output_shape, name=name).outputs[0]
python
def einsum(xs, output_shape=None, reduced_dims=None, name=None): """Einstein summation. einsum(xs, output_shape) is equivalent to broadcasting all inputs to the union of all of their shapes, multiplying them componentwise, and finally reduce_summing down to output_shape. One common case of this is matrix multiplication: x has shape [a, b] y has shape [b, c] matmul(x, y) == einsum([x, y], output_shape=[a, c]) We provide a few options for specifying the output shape: If neither output_shape nor reduced_dims is specified, then the output shape is set to the contain all dimensions that appear exactly once in the inputs, in order of appearance. If output_shape is not specified, then the output shape is set to the contain all dimensions that appear in xs but not in reduced_dims, in the order that they appear in xs. If reduced_dims is also not specified, then reduced_dims is set to the set of all dimensions that appear at least twice in xs. If both output_shape and reduced_dims are specified, then we check that reduced_dims matches the set of dimensions present in xs but not in output_shape, and throw an exception if it does not. This helps to reduce bugs. Args: xs: a list of Tensors output_shape: an optional Shape. reduced_dims: an optional list of Dimensions. name: an optional string Returns: a Tensor Raises: ValueError: if reduced_dims contradicts output_shape """ output_shape = convert_to_shape(output_shape) input_dim_count = collections.defaultdict(int) input_dims = [] for x in xs: for d in x.shape.dims: if d not in input_dim_count: input_dims.append(d) input_dim_count[d] += 1 if output_shape is None: if reduced_dims is None: reduced_dims = [d for d, c in six.iteritems(input_dim_count) if c > 1] output_shape = Shape([d for d in input_dims if d not in reduced_dims]) elif reduced_dims is not None: for d in reduced_dims: if not isinstance(d, Dimension): raise ValueError("reduced_dims must be a list of Dimensions. Got %s." % (reduced_dims,)) computed_reduced_dims = [ d for d in input_dims if d not in output_shape.dims] if set(computed_reduced_dims) != set(reduced_dims): raise ValueError( "Specified reduced_dims and output_shape do not match." " xs=%s output_shape=%s reduced_dims=%s " % ( xs, output_shape, reduced_dims)) return EinsumOperation(xs, output_shape, name=name).outputs[0]
[ "def", "einsum", "(", "xs", ",", "output_shape", "=", "None", ",", "reduced_dims", "=", "None", ",", "name", "=", "None", ")", ":", "output_shape", "=", "convert_to_shape", "(", "output_shape", ")", "input_dim_count", "=", "collections", ".", "defaultdict", ...
Einstein summation. einsum(xs, output_shape) is equivalent to broadcasting all inputs to the union of all of their shapes, multiplying them componentwise, and finally reduce_summing down to output_shape. One common case of this is matrix multiplication: x has shape [a, b] y has shape [b, c] matmul(x, y) == einsum([x, y], output_shape=[a, c]) We provide a few options for specifying the output shape: If neither output_shape nor reduced_dims is specified, then the output shape is set to the contain all dimensions that appear exactly once in the inputs, in order of appearance. If output_shape is not specified, then the output shape is set to the contain all dimensions that appear in xs but not in reduced_dims, in the order that they appear in xs. If reduced_dims is also not specified, then reduced_dims is set to the set of all dimensions that appear at least twice in xs. If both output_shape and reduced_dims are specified, then we check that reduced_dims matches the set of dimensions present in xs but not in output_shape, and throw an exception if it does not. This helps to reduce bugs. Args: xs: a list of Tensors output_shape: an optional Shape. reduced_dims: an optional list of Dimensions. name: an optional string Returns: a Tensor Raises: ValueError: if reduced_dims contradicts output_shape
[ "Einstein", "summation", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3637-L3700
train
222,721
tensorflow/mesh
mesh_tensorflow/ops.py
_reduction_output_shape
def _reduction_output_shape(x, output_shape, reduced_dim): """Helper function to reduce_sum, etc.""" if output_shape is None: if reduced_dim is None: return Shape([]) else: if reduced_dim not in x.shape.dims: raise ValueError( "reduced_dim=%s not in x.shape.dims=%s" % (reduced_dim, x.shape)) return x.shape - reduced_dim if reduced_dim is not None: if [reduced_dim] != [d for d in x.shape.dims if d not in output_shape.dims]: raise ValueError( "reduced_dim contradicts output_shape:" "x=%s output_shape=%s reduced_dim=%s" % (x, output_shape, reduced_dim)) return output_shape
python
def _reduction_output_shape(x, output_shape, reduced_dim): """Helper function to reduce_sum, etc.""" if output_shape is None: if reduced_dim is None: return Shape([]) else: if reduced_dim not in x.shape.dims: raise ValueError( "reduced_dim=%s not in x.shape.dims=%s" % (reduced_dim, x.shape)) return x.shape - reduced_dim if reduced_dim is not None: if [reduced_dim] != [d for d in x.shape.dims if d not in output_shape.dims]: raise ValueError( "reduced_dim contradicts output_shape:" "x=%s output_shape=%s reduced_dim=%s" % (x, output_shape, reduced_dim)) return output_shape
[ "def", "_reduction_output_shape", "(", "x", ",", "output_shape", ",", "reduced_dim", ")", ":", "if", "output_shape", "is", "None", ":", "if", "reduced_dim", "is", "None", ":", "return", "Shape", "(", "[", "]", ")", "else", ":", "if", "reduced_dim", "not", ...
Helper function to reduce_sum, etc.
[ "Helper", "function", "to", "reduce_sum", "etc", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3709-L3725
train
222,722
tensorflow/mesh
mesh_tensorflow/ops.py
top_1
def top_1(x, reduced_dim, dtype=tf.int32, name=None): """Argmax and Max. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims dtype: a tf.dtype (for the output) name: an optional string Returns: indices: a Tensor with given dtype values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim) """ reduced_dim = convert_to_dimension(reduced_dim) with tf.name_scope(name, default_name="top_1"): max_val = reduce_max(x, reduced_dim=reduced_dim) is_max = to_float(equal(x, max_val)) pos = mtf_range(x.mesh, reduced_dim, tf.float32) ret = reduce_max(is_max * pos, reduced_dim=reduced_dim) ret = cast(ret, dtype) return ret, max_val
python
def top_1(x, reduced_dim, dtype=tf.int32, name=None): """Argmax and Max. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims dtype: a tf.dtype (for the output) name: an optional string Returns: indices: a Tensor with given dtype values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim) """ reduced_dim = convert_to_dimension(reduced_dim) with tf.name_scope(name, default_name="top_1"): max_val = reduce_max(x, reduced_dim=reduced_dim) is_max = to_float(equal(x, max_val)) pos = mtf_range(x.mesh, reduced_dim, tf.float32) ret = reduce_max(is_max * pos, reduced_dim=reduced_dim) ret = cast(ret, dtype) return ret, max_val
[ "def", "top_1", "(", "x", ",", "reduced_dim", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "None", ")", ":", "reduced_dim", "=", "convert_to_dimension", "(", "reduced_dim", ")", "with", "tf", ".", "name_scope", "(", "name", ",", "default_name...
Argmax and Max. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims dtype: a tf.dtype (for the output) name: an optional string Returns: indices: a Tensor with given dtype values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
[ "Argmax", "and", "Max", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3875-L3894
train
222,723
tensorflow/mesh
mesh_tensorflow/ops.py
top_k
def top_k(x, reduced_dim, new_dim, dtype=tf.int32, name=None): """Like tf.top_k. This operation returns two tensors with the same shape. The output shape is identical to the shape of x, except that reduced_dim is replaced by new_dim. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims. new_dim: a Dimension. The size determines k. dtype: optional dtype for indices. name: optional string. Returns: indices: a Tensor with given dtype. values: a Tensor with same type as x. """ reduced_dim = convert_to_dimension(reduced_dim) new_dim = convert_to_dimension(new_dim) indices = [] values = [] k = new_dim.size with tf.name_scope(name, default_name="top_k"): for i in xrange(k): max_index, max_val = top_1(x, reduced_dim, dtype) indices.append(max_index) values.append(max_val) if i + 1 < k: x += one_hot(max_index, reduced_dim, on_value=-1e9, dtype=x.dtype) axis = x.shape.dims.index(reduced_dim) return stack(indices, new_dim.name, axis), stack(values, new_dim.name, axis)
python
def top_k(x, reduced_dim, new_dim, dtype=tf.int32, name=None): """Like tf.top_k. This operation returns two tensors with the same shape. The output shape is identical to the shape of x, except that reduced_dim is replaced by new_dim. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims. new_dim: a Dimension. The size determines k. dtype: optional dtype for indices. name: optional string. Returns: indices: a Tensor with given dtype. values: a Tensor with same type as x. """ reduced_dim = convert_to_dimension(reduced_dim) new_dim = convert_to_dimension(new_dim) indices = [] values = [] k = new_dim.size with tf.name_scope(name, default_name="top_k"): for i in xrange(k): max_index, max_val = top_1(x, reduced_dim, dtype) indices.append(max_index) values.append(max_val) if i + 1 < k: x += one_hot(max_index, reduced_dim, on_value=-1e9, dtype=x.dtype) axis = x.shape.dims.index(reduced_dim) return stack(indices, new_dim.name, axis), stack(values, new_dim.name, axis)
[ "def", "top_k", "(", "x", ",", "reduced_dim", ",", "new_dim", ",", "dtype", "=", "tf", ".", "int32", ",", "name", "=", "None", ")", ":", "reduced_dim", "=", "convert_to_dimension", "(", "reduced_dim", ")", "new_dim", "=", "convert_to_dimension", "(", "new_...
Like tf.top_k. This operation returns two tensors with the same shape. The output shape is identical to the shape of x, except that reduced_dim is replaced by new_dim. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims. new_dim: a Dimension. The size determines k. dtype: optional dtype for indices. name: optional string. Returns: indices: a Tensor with given dtype. values: a Tensor with same type as x.
[ "Like", "tf", ".", "top_k", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3902-L3932
train
222,724
tensorflow/mesh
mesh_tensorflow/ops.py
add
def add(x1, x2, output_shape=None, name=None): """Binary addition with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="add"): x1, x2 = binary_arguments_to_tensors(x1, x2) return AddOperation( x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
python
def add(x1, x2, output_shape=None, name=None): """Binary addition with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="add"): x1, x2 = binary_arguments_to_tensors(x1, x2) return AddOperation( x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
[ "def", "add", "(", "x1", ",", "x2", ",", "output_shape", "=", "None", ",", "name", "=", "None", ")", ":", "output_shape", "=", "convert_to_shape", "(", "output_shape", ")", "if", "not", "isinstance", "(", "x2", ",", "Tensor", ")", ":", "return", "Scala...
Binary addition with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
[ "Binary", "addition", "with", "broadcsting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3968-L3986
train
222,725
tensorflow/mesh
mesh_tensorflow/ops.py
sub
def sub(x1, x2, output_shape=None, name=None): """Binary subtraction with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, -x2).outputs[0] with tf.name_scope(name, default_name="sub"): x1, x2 = binary_arguments_to_tensors(x1, x2) return add(x1, negative(x2), output_shape=output_shape)
python
def sub(x1, x2, output_shape=None, name=None): """Binary subtraction with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, -x2).outputs[0] with tf.name_scope(name, default_name="sub"): x1, x2 = binary_arguments_to_tensors(x1, x2) return add(x1, negative(x2), output_shape=output_shape)
[ "def", "sub", "(", "x1", ",", "x2", ",", "output_shape", "=", "None", ",", "name", "=", "None", ")", ":", "output_shape", "=", "convert_to_shape", "(", "output_shape", ")", "if", "not", "isinstance", "(", "x2", ",", "Tensor", ")", ":", "return", "Scala...
Binary subtraction with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
[ "Binary", "subtraction", "with", "broadcsting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3995-L4011
train
222,726
tensorflow/mesh
mesh_tensorflow/ops.py
multiply
def multiply(x1, x2, output_shape=None, name=None): """Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="mul"): x1, x2 = binary_arguments_to_tensors(x1, x2) return einsum( [x1, x2], output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape))
python
def multiply(x1, x2, output_shape=None, name=None): """Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="mul"): x1, x2 = binary_arguments_to_tensors(x1, x2) return einsum( [x1, x2], output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape))
[ "def", "multiply", "(", "x1", ",", "x2", ",", "output_shape", "=", "None", ",", "name", "=", "None", ")", ":", "if", "not", "isinstance", "(", "x2", ",", "Tensor", ")", ":", "return", "ScalarMultiplyOperation", "(", "x1", ",", "x2", ")", ".", "output...
Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
[ "Binary", "multiplication", "with", "broadcasting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4014-L4032
train
222,727
tensorflow/mesh
mesh_tensorflow/ops.py
divide
def divide(x1, x2, output_shape=None, name=None): """Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0] with tf.name_scope(name, default_name="divide"): x1, x2 = binary_arguments_to_tensors(x1, x2) return multiply(x1, reciprocal(x2), output_shape=output_shape)
python
def divide(x1, x2, output_shape=None, name=None): """Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor """ output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0] with tf.name_scope(name, default_name="divide"): x1, x2 = binary_arguments_to_tensors(x1, x2) return multiply(x1, reciprocal(x2), output_shape=output_shape)
[ "def", "divide", "(", "x1", ",", "x2", ",", "output_shape", "=", "None", ",", "name", "=", "None", ")", ":", "output_shape", "=", "convert_to_shape", "(", "output_shape", ")", "if", "not", "isinstance", "(", "x2", ",", "Tensor", ")", ":", "return", "Sc...
Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
[ "Binary", "division", "with", "broadcasting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4035-L4051
train
222,728
tensorflow/mesh
mesh_tensorflow/ops.py
one_hot
def one_hot(indices, output_dim, on_value=1.0, off_value=0.0, dtype=tf.float32, name=None): """One hot operation. TODO(noam): Is there a good reason we need a special mtf.Operation here? We could just use some code like this: cast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)), dtype) Args: indices: a Tensor output_dim: a Dimension on_value: Value taken when indices are on at a location, default 1 off_value: Value taken when indices are off at a location, default 0 dtype: a tf.DType name: an optional string Returns: a Tensor with shape extended by output_dim for the last axis. """ return OneHotOperation( indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]
python
def one_hot(indices, output_dim, on_value=1.0, off_value=0.0, dtype=tf.float32, name=None): """One hot operation. TODO(noam): Is there a good reason we need a special mtf.Operation here? We could just use some code like this: cast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)), dtype) Args: indices: a Tensor output_dim: a Dimension on_value: Value taken when indices are on at a location, default 1 off_value: Value taken when indices are off at a location, default 0 dtype: a tf.DType name: an optional string Returns: a Tensor with shape extended by output_dim for the last axis. """ return OneHotOperation( indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]
[ "def", "one_hot", "(", "indices", ",", "output_dim", ",", "on_value", "=", "1.0", ",", "off_value", "=", "0.0", ",", "dtype", "=", "tf", ".", "float32", ",", "name", "=", "None", ")", ":", "return", "OneHotOperation", "(", "indices", ",", "output_dim", ...
One hot operation. TODO(noam): Is there a good reason we need a special mtf.Operation here? We could just use some code like this: cast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)), dtype) Args: indices: a Tensor output_dim: a Dimension on_value: Value taken when indices are on at a location, default 1 off_value: Value taken when indices are off at a location, default 0 dtype: a tf.DType name: an optional string Returns: a Tensor with shape extended by output_dim for the last axis.
[ "One", "hot", "operation", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4087-L4107
train
222,729
tensorflow/mesh
mesh_tensorflow/ops.py
gradients
def gradients(ys, xs, grad_ys=None): """Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors """ graph = ys[0].graph if not grad_ys: grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys] # figure out what Tensors are downstream of xs downstream = set(xs) for op in graph.operations: if op.has_gradient: if set(op.inputs) & downstream: downstream |= set(op.outputs) tensor_to_gradient = dict(zip(ys, grad_ys)) for op in graph.operations[::-1]: grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs] if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream): with tf.variable_scope(op.name + "/gradients"): input_grads = op.gradient(grad_outputs) for inp, grad in zip(op.inputs, input_grads): if inp in downstream and grad is not None: if inp in tensor_to_gradient: tensor_to_gradient[inp] += grad else: tensor_to_gradient[inp] = grad return [tensor_to_gradient.get(x, None) for x in xs]
python
def gradients(ys, xs, grad_ys=None): """Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors """ graph = ys[0].graph if not grad_ys: grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys] # figure out what Tensors are downstream of xs downstream = set(xs) for op in graph.operations: if op.has_gradient: if set(op.inputs) & downstream: downstream |= set(op.outputs) tensor_to_gradient = dict(zip(ys, grad_ys)) for op in graph.operations[::-1]: grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs] if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream): with tf.variable_scope(op.name + "/gradients"): input_grads = op.gradient(grad_outputs) for inp, grad in zip(op.inputs, input_grads): if inp in downstream and grad is not None: if inp in tensor_to_gradient: tensor_to_gradient[inp] += grad else: tensor_to_gradient[inp] = grad return [tensor_to_gradient.get(x, None) for x in xs]
[ "def", "gradients", "(", "ys", ",", "xs", ",", "grad_ys", "=", "None", ")", ":", "graph", "=", "ys", "[", "0", "]", ".", "graph", "if", "not", "grad_ys", ":", "grad_ys", "=", "[", "Constant", "(", "y", ".", "mesh", ",", "1.0", ",", "y", ".", ...
Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors
[ "Compute", "gradients", "in", "dtf", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4129-L4161
train
222,730
tensorflow/mesh
mesh_tensorflow/ops.py
_infer_binary_broadcast_shape
def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None): """Infer shape of the output of a binary op with broadcasting. If the output shape is not given with given_output_shape, then we check to see if one of the shapes is a subsequence of the other one, and we return the one that is the supersequence. Otherwise, we list the dimensions of shape1, followed by all new dimensions in shape2. Args: shape1: a Shape shape2: a Shape given_output_shape: an optional Shape Returns: a Shape """ shape1 = convert_to_shape(shape1) shape2 = convert_to_shape(shape2) given_output_shape = convert_to_shape(given_output_shape) if given_output_shape is not None: return given_output_shape if is_subsequence(shape1.dims, shape2.dims): return shape2 if is_subsequence(shape2.dims, shape1.dims): return shape1 return Shape( shape1.dims + [d for d in shape2.dims if d not in shape1.dims])
python
def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None): """Infer shape of the output of a binary op with broadcasting. If the output shape is not given with given_output_shape, then we check to see if one of the shapes is a subsequence of the other one, and we return the one that is the supersequence. Otherwise, we list the dimensions of shape1, followed by all new dimensions in shape2. Args: shape1: a Shape shape2: a Shape given_output_shape: an optional Shape Returns: a Shape """ shape1 = convert_to_shape(shape1) shape2 = convert_to_shape(shape2) given_output_shape = convert_to_shape(given_output_shape) if given_output_shape is not None: return given_output_shape if is_subsequence(shape1.dims, shape2.dims): return shape2 if is_subsequence(shape2.dims, shape1.dims): return shape1 return Shape( shape1.dims + [d for d in shape2.dims if d not in shape1.dims])
[ "def", "_infer_binary_broadcast_shape", "(", "shape1", ",", "shape2", ",", "given_output_shape", "=", "None", ")", ":", "shape1", "=", "convert_to_shape", "(", "shape1", ")", "shape2", "=", "convert_to_shape", "(", "shape2", ")", "given_output_shape", "=", "conver...
Infer shape of the output of a binary op with broadcasting. If the output shape is not given with given_output_shape, then we check to see if one of the shapes is a subsequence of the other one, and we return the one that is the supersequence. Otherwise, we list the dimensions of shape1, followed by all new dimensions in shape2. Args: shape1: a Shape shape2: a Shape given_output_shape: an optional Shape Returns: a Shape
[ "Infer", "shape", "of", "the", "output", "of", "a", "binary", "op", "with", "broadcasting", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4164-L4189
train
222,731
tensorflow/mesh
mesh_tensorflow/ops.py
_expand_dims
def _expand_dims(x, input_shape, output_shape): """Expand dimensions and transpose if necessary. Args: x: a tf.Tensor input_shape: a Shape output_shape: a Shape whose dimensions are a superset of those in input_shape Returns: a tf.Tensor """ verify_no_new_dims([output_shape], input_shape) if input_shape == output_shape or input_shape.ndims == 0: return x perm = [input_shape.dims.index(d) for d in output_shape.dims if d in input_shape.dims] x = tf.transpose(x, perm) for i, d in enumerate(output_shape.dims): if d not in input_shape.dims: x = tf.expand_dims(x, i) return x
python
def _expand_dims(x, input_shape, output_shape): """Expand dimensions and transpose if necessary. Args: x: a tf.Tensor input_shape: a Shape output_shape: a Shape whose dimensions are a superset of those in input_shape Returns: a tf.Tensor """ verify_no_new_dims([output_shape], input_shape) if input_shape == output_shape or input_shape.ndims == 0: return x perm = [input_shape.dims.index(d) for d in output_shape.dims if d in input_shape.dims] x = tf.transpose(x, perm) for i, d in enumerate(output_shape.dims): if d not in input_shape.dims: x = tf.expand_dims(x, i) return x
[ "def", "_expand_dims", "(", "x", ",", "input_shape", ",", "output_shape", ")", ":", "verify_no_new_dims", "(", "[", "output_shape", "]", ",", "input_shape", ")", "if", "input_shape", "==", "output_shape", "or", "input_shape", ".", "ndims", "==", "0", ":", "r...
Expand dimensions and transpose if necessary. Args: x: a tf.Tensor input_shape: a Shape output_shape: a Shape whose dimensions are a superset of those in input_shape Returns: a tf.Tensor
[ "Expand", "dimensions", "and", "transpose", "if", "necessary", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4192-L4213
train
222,732
tensorflow/mesh
mesh_tensorflow/ops.py
_einsum_equation
def _einsum_equation(input_shapes, output_shape): """Turn shapes into an einsum equation. e.g. "ij,jk->ik" Args: input_shapes: a list of Shapes output_shape: a Shape Returns: a string """ ret = [] next_letter = ord("a") dim_to_letter = {} for shape_num, shape in enumerate(input_shapes + [output_shape]): if shape_num == len(input_shapes): ret.append("->") elif shape_num > 0: ret.append(",") for d in shape.dims: if d not in dim_to_letter: dim_to_letter[d] = chr(next_letter) next_letter += 1 ret.append(dim_to_letter[d]) return "".join(ret)
python
def _einsum_equation(input_shapes, output_shape): """Turn shapes into an einsum equation. e.g. "ij,jk->ik" Args: input_shapes: a list of Shapes output_shape: a Shape Returns: a string """ ret = [] next_letter = ord("a") dim_to_letter = {} for shape_num, shape in enumerate(input_shapes + [output_shape]): if shape_num == len(input_shapes): ret.append("->") elif shape_num > 0: ret.append(",") for d in shape.dims: if d not in dim_to_letter: dim_to_letter[d] = chr(next_letter) next_letter += 1 ret.append(dim_to_letter[d]) return "".join(ret)
[ "def", "_einsum_equation", "(", "input_shapes", ",", "output_shape", ")", ":", "ret", "=", "[", "]", "next_letter", "=", "ord", "(", "\"a\"", ")", "dim_to_letter", "=", "{", "}", "for", "shape_num", ",", "shape", "in", "enumerate", "(", "input_shapes", "+"...
Turn shapes into an einsum equation. e.g. "ij,jk->ik" Args: input_shapes: a list of Shapes output_shape: a Shape Returns: a string
[ "Turn", "shapes", "into", "an", "einsum", "equation", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4216-L4241
train
222,733
tensorflow/mesh
mesh_tensorflow/ops.py
is_subsequence
def is_subsequence(short_seq, long_seq): """Is short_seq a subsequence of long_seq.""" if not short_seq: return True pos = 0 for x in long_seq: if pos == len(short_seq): return True if short_seq[pos] == x: pos += 1 if pos == len(short_seq): return True return False
python
def is_subsequence(short_seq, long_seq): """Is short_seq a subsequence of long_seq.""" if not short_seq: return True pos = 0 for x in long_seq: if pos == len(short_seq): return True if short_seq[pos] == x: pos += 1 if pos == len(short_seq): return True return False
[ "def", "is_subsequence", "(", "short_seq", ",", "long_seq", ")", ":", "if", "not", "short_seq", ":", "return", "True", "pos", "=", "0", "for", "x", "in", "long_seq", ":", "if", "pos", "==", "len", "(", "short_seq", ")", ":", "return", "True", "if", "...
Is short_seq a subsequence of long_seq.
[ "Is", "short_seq", "a", "subsequence", "of", "long_seq", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4244-L4256
train
222,734
tensorflow/mesh
mesh_tensorflow/ops.py
verify_no_new_dims
def verify_no_new_dims(input_shapes, output_shape): """Verifies that all dimensions in the output are in at least one input. Args: input_shapes: a list of Shapes output_shape: a Shape Raises: ValueError: if there are new dimensions in the output. """ all_input_dims = set(sum([s.dims for s in input_shapes], [])) all_output_dims = set(output_shape.dims) if not all_output_dims.issubset(all_input_dims): raise ValueError( "No new dimensions allowed in output" " input_shapes = %s output_shape= %s" % ([s.dims for s in input_shapes], output_shape.dims))
python
def verify_no_new_dims(input_shapes, output_shape): """Verifies that all dimensions in the output are in at least one input. Args: input_shapes: a list of Shapes output_shape: a Shape Raises: ValueError: if there are new dimensions in the output. """ all_input_dims = set(sum([s.dims for s in input_shapes], [])) all_output_dims = set(output_shape.dims) if not all_output_dims.issubset(all_input_dims): raise ValueError( "No new dimensions allowed in output" " input_shapes = %s output_shape= %s" % ([s.dims for s in input_shapes], output_shape.dims))
[ "def", "verify_no_new_dims", "(", "input_shapes", ",", "output_shape", ")", ":", "all_input_dims", "=", "set", "(", "sum", "(", "[", "s", ".", "dims", "for", "s", "in", "input_shapes", "]", ",", "[", "]", ")", ")", "all_output_dims", "=", "set", "(", "...
Verifies that all dimensions in the output are in at least one input. Args: input_shapes: a list of Shapes output_shape: a Shape Raises: ValueError: if there are new dimensions in the output.
[ "Verifies", "that", "all", "dimensions", "in", "the", "output", "are", "in", "at", "least", "one", "input", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4259-L4274
train
222,735
tensorflow/mesh
mesh_tensorflow/ops.py
pnum_to_processor_coordinates
def pnum_to_processor_coordinates(mesh_shape, pnum): """Coordinates of a processor in the mesh. Args: mesh_shape: a Shape pnum: an integer less than len(mesh_shape) Returns: a list of integers with length len(mesh_shape) """ ret = [] for dimsize in mesh_shape.to_integer_list[::-1]: ret.append(pnum % dimsize) pnum //= dimsize return ret[::-1]
python
def pnum_to_processor_coordinates(mesh_shape, pnum): """Coordinates of a processor in the mesh. Args: mesh_shape: a Shape pnum: an integer less than len(mesh_shape) Returns: a list of integers with length len(mesh_shape) """ ret = [] for dimsize in mesh_shape.to_integer_list[::-1]: ret.append(pnum % dimsize) pnum //= dimsize return ret[::-1]
[ "def", "pnum_to_processor_coordinates", "(", "mesh_shape", ",", "pnum", ")", ":", "ret", "=", "[", "]", "for", "dimsize", "in", "mesh_shape", ".", "to_integer_list", "[", ":", ":", "-", "1", "]", ":", "ret", ".", "append", "(", "pnum", "%", "dimsize", ...
Coordinates of a processor in the mesh. Args: mesh_shape: a Shape pnum: an integer less than len(mesh_shape) Returns: a list of integers with length len(mesh_shape)
[ "Coordinates", "of", "a", "processor", "in", "the", "mesh", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4277-L4291
train
222,736
tensorflow/mesh
mesh_tensorflow/ops.py
processor_coordinates_to_pnum
def processor_coordinates_to_pnum(mesh_shape, coord): """Inverse of pnum_to_processor_coordinates. Args: mesh_shape: a Shape coord: a list of integers with length len(mesh_shape) Returns: an integer less than len(mesh_shape) """ ret = 0 multiplier = 1 for c, d in zip(coord[::-1], mesh_shape.to_integer_list[::-1]): ret += multiplier * c multiplier *= d return ret
python
def processor_coordinates_to_pnum(mesh_shape, coord): """Inverse of pnum_to_processor_coordinates. Args: mesh_shape: a Shape coord: a list of integers with length len(mesh_shape) Returns: an integer less than len(mesh_shape) """ ret = 0 multiplier = 1 for c, d in zip(coord[::-1], mesh_shape.to_integer_list[::-1]): ret += multiplier * c multiplier *= d return ret
[ "def", "processor_coordinates_to_pnum", "(", "mesh_shape", ",", "coord", ")", ":", "ret", "=", "0", "multiplier", "=", "1", "for", "c", ",", "d", "in", "zip", "(", "coord", "[", ":", ":", "-", "1", "]", ",", "mesh_shape", ".", "to_integer_list", "[", ...
Inverse of pnum_to_processor_coordinates. Args: mesh_shape: a Shape coord: a list of integers with length len(mesh_shape) Returns: an integer less than len(mesh_shape)
[ "Inverse", "of", "pnum_to_processor_coordinates", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4294-L4309
train
222,737
tensorflow/mesh
mesh_tensorflow/ops.py
pnum_to_group
def pnum_to_group(mesh_shape, group_dims, pnum): """Group number for grouped allreduce. Args: mesh_shape: a Shape group_dims: a list of integers (the dimensions reduced over) pnum: an integer Returns: an integer """ coord = pnum_to_processor_coordinates(mesh_shape, pnum) remaining_shape = Shape( [d for i, d in enumerate(mesh_shape) if i not in group_dims]) remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims] return processor_coordinates_to_pnum(remaining_shape, remaining_coord)
python
def pnum_to_group(mesh_shape, group_dims, pnum): """Group number for grouped allreduce. Args: mesh_shape: a Shape group_dims: a list of integers (the dimensions reduced over) pnum: an integer Returns: an integer """ coord = pnum_to_processor_coordinates(mesh_shape, pnum) remaining_shape = Shape( [d for i, d in enumerate(mesh_shape) if i not in group_dims]) remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims] return processor_coordinates_to_pnum(remaining_shape, remaining_coord)
[ "def", "pnum_to_group", "(", "mesh_shape", ",", "group_dims", ",", "pnum", ")", ":", "coord", "=", "pnum_to_processor_coordinates", "(", "mesh_shape", ",", "pnum", ")", "remaining_shape", "=", "Shape", "(", "[", "d", "for", "i", ",", "d", "in", "enumerate", ...
Group number for grouped allreduce. Args: mesh_shape: a Shape group_dims: a list of integers (the dimensions reduced over) pnum: an integer Returns: an integer
[ "Group", "number", "for", "grouped", "allreduce", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4312-L4327
train
222,738
tensorflow/mesh
mesh_tensorflow/ops.py
processor_groups
def processor_groups(mesh_shape, group_dims): """Groups of processors which differ only in the given dimensions. Args: mesh_shape: a Shape group_dims: a list of integers Returns: a list of lists of integers (processor numbers) """ group_numbers = [ pnum_to_group(mesh_shape, group_dims, pnum) for pnum in xrange(mesh_shape.size)] ret = [] for pnum, g in enumerate(group_numbers): while len(ret) <= g: ret.append([]) ret[g].append(pnum) return ret
python
def processor_groups(mesh_shape, group_dims): """Groups of processors which differ only in the given dimensions. Args: mesh_shape: a Shape group_dims: a list of integers Returns: a list of lists of integers (processor numbers) """ group_numbers = [ pnum_to_group(mesh_shape, group_dims, pnum) for pnum in xrange(mesh_shape.size)] ret = [] for pnum, g in enumerate(group_numbers): while len(ret) <= g: ret.append([]) ret[g].append(pnum) return ret
[ "def", "processor_groups", "(", "mesh_shape", ",", "group_dims", ")", ":", "group_numbers", "=", "[", "pnum_to_group", "(", "mesh_shape", ",", "group_dims", ",", "pnum", ")", "for", "pnum", "in", "xrange", "(", "mesh_shape", ".", "size", ")", "]", "ret", "...
Groups of processors which differ only in the given dimensions. Args: mesh_shape: a Shape group_dims: a list of integers Returns: a list of lists of integers (processor numbers)
[ "Groups", "of", "processors", "which", "differ", "only", "in", "the", "given", "dimensions", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4330-L4348
train
222,739
tensorflow/mesh
mesh_tensorflow/ops.py
mtf_range
def mtf_range(mesh, dim, dtype, name=None): """Create a 1d mesh tensor with a range from [0, dim.size). Call externally as mtf.range() Args: mesh: a Mesh dim: a Dimension dtype: a tf.DType name: an optional string Returns: a Tensor """ dim = convert_to_dimension(dim) with tf.variable_scope(name, default_name="range"): if dtype == tf.bfloat16: # tf.range(dtype=bfloat16) gives the wrong shape. # TODO(noam): report the bug. tf_range = tf.cast(tf.range(dim.size), tf.bfloat16) else: tf_range = tf.range(dim.size, dtype=dtype) return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))
python
def mtf_range(mesh, dim, dtype, name=None): """Create a 1d mesh tensor with a range from [0, dim.size). Call externally as mtf.range() Args: mesh: a Mesh dim: a Dimension dtype: a tf.DType name: an optional string Returns: a Tensor """ dim = convert_to_dimension(dim) with tf.variable_scope(name, default_name="range"): if dtype == tf.bfloat16: # tf.range(dtype=bfloat16) gives the wrong shape. # TODO(noam): report the bug. tf_range = tf.cast(tf.range(dim.size), tf.bfloat16) else: tf_range = tf.range(dim.size, dtype=dtype) return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))
[ "def", "mtf_range", "(", "mesh", ",", "dim", ",", "dtype", ",", "name", "=", "None", ")", ":", "dim", "=", "convert_to_dimension", "(", "dim", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"range\"", ")", ":", "if",...
Create a 1d mesh tensor with a range from [0, dim.size). Call externally as mtf.range() Args: mesh: a Mesh dim: a Dimension dtype: a tf.DType name: an optional string Returns: a Tensor
[ "Create", "a", "1d", "mesh", "tensor", "with", "a", "range", "from", "[", "0", "dim", ".", "size", ")", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4406-L4428
train
222,740
tensorflow/mesh
mesh_tensorflow/ops.py
pretty_print_counters
def pretty_print_counters(counters): """print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string """ totals = collections.defaultdict(int) for (name, val) in counters: prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name] for p in prefixes: totals[p] += val parts = [] for name, val in sorted(six.iteritems(totals)): parts.append(" " * name.count("/") + "%s: %.3g" % (name, val)) return "\n".join(parts)
python
def pretty_print_counters(counters): """print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string """ totals = collections.defaultdict(int) for (name, val) in counters: prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name] for p in prefixes: totals[p] += val parts = [] for name, val in sorted(six.iteritems(totals)): parts.append(" " * name.count("/") + "%s: %.3g" % (name, val)) return "\n".join(parts)
[ "def", "pretty_print_counters", "(", "counters", ")", ":", "totals", "=", "collections", ".", "defaultdict", "(", "int", ")", "for", "(", "name", ",", "val", ")", "in", "counters", ":", "prefixes", "=", "[", "name", "[", ":", "i", "]", "for", "i", "i...
print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string
[ "print", "counters", "hierarchically", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4431-L4453
train
222,741
tensorflow/mesh
mesh_tensorflow/ops.py
_parse_string_to_list_of_pairs
def _parse_string_to_list_of_pairs(s, seconds_to_int=False): r"""Parses a string into a list of pairs. In the input string, each pair is separated by a colon, and the delimiters between pairs are any of " ,.;". e.g. "rows:32,cols:32" Args: s: str to parse. seconds_to_int: Boolean. If True, then the second elements are returned as integers; otherwise they are strings. Returns: List of tuple pairs. Raises: ValueError: Badly formatted string. """ ret = [] for p in [s.split(":") for s in re.sub("[,.;]", " ", s).split()]: if len(p) != 2: raise ValueError("bad input to _parse_string_to_list_of_pairs %s" % s) if seconds_to_int: ret.append((p[0], int(p[1]))) else: ret.append(tuple(p)) return ret
python
def _parse_string_to_list_of_pairs(s, seconds_to_int=False): r"""Parses a string into a list of pairs. In the input string, each pair is separated by a colon, and the delimiters between pairs are any of " ,.;". e.g. "rows:32,cols:32" Args: s: str to parse. seconds_to_int: Boolean. If True, then the second elements are returned as integers; otherwise they are strings. Returns: List of tuple pairs. Raises: ValueError: Badly formatted string. """ ret = [] for p in [s.split(":") for s in re.sub("[,.;]", " ", s).split()]: if len(p) != 2: raise ValueError("bad input to _parse_string_to_list_of_pairs %s" % s) if seconds_to_int: ret.append((p[0], int(p[1]))) else: ret.append(tuple(p)) return ret
[ "def", "_parse_string_to_list_of_pairs", "(", "s", ",", "seconds_to_int", "=", "False", ")", ":", "ret", "=", "[", "]", "for", "p", "in", "[", "s", ".", "split", "(", "\":\"", ")", "for", "s", "in", "re", ".", "sub", "(", "\"[,.;]\"", ",", "\" \"", ...
r"""Parses a string into a list of pairs. In the input string, each pair is separated by a colon, and the delimiters between pairs are any of " ,.;". e.g. "rows:32,cols:32" Args: s: str to parse. seconds_to_int: Boolean. If True, then the second elements are returned as integers; otherwise they are strings. Returns: List of tuple pairs. Raises: ValueError: Badly formatted string.
[ "r", "Parses", "a", "string", "into", "a", "list", "of", "pairs", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4456-L4483
train
222,742
tensorflow/mesh
mesh_tensorflow/ops.py
parallel
def parallel(devices, fn, *args, **kwargs): """Call a function once on each device. Args: devices: a list of n devices fn: a function *args: arguments, each of which is a list of length n **kwargs: keyword-args, each of which is a list of length n Returns: a list of length n Raises: ValueError: if the arguments are not all lists of length n """ if not isinstance(devices, list): raise ValueError("devices must be a list") for x in list(args) + list(six.itervalues(kwargs)): if not isinstance(x, list) or len(x) != len(devices): raise ValueError( "Argument not a list with same length as devices " "arg=%s devices=%s" % (x, devices)) ret = [] for i, device in enumerate(devices): with tf.device(device): with tf.variable_scope("parallel_%d" % i): my_args = [x[i] for x in args] my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)} ret.append(fn(*my_args, **my_kwargs)) return ret
python
def parallel(devices, fn, *args, **kwargs): """Call a function once on each device. Args: devices: a list of n devices fn: a function *args: arguments, each of which is a list of length n **kwargs: keyword-args, each of which is a list of length n Returns: a list of length n Raises: ValueError: if the arguments are not all lists of length n """ if not isinstance(devices, list): raise ValueError("devices must be a list") for x in list(args) + list(six.itervalues(kwargs)): if not isinstance(x, list) or len(x) != len(devices): raise ValueError( "Argument not a list with same length as devices " "arg=%s devices=%s" % (x, devices)) ret = [] for i, device in enumerate(devices): with tf.device(device): with tf.variable_scope("parallel_%d" % i): my_args = [x[i] for x in args] my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)} ret.append(fn(*my_args, **my_kwargs)) return ret
[ "def", "parallel", "(", "devices", ",", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "devices", ",", "list", ")", ":", "raise", "ValueError", "(", "\"devices must be a list\"", ")", "for", "x", "in", "list"...
Call a function once on each device. Args: devices: a list of n devices fn: a function *args: arguments, each of which is a list of length n **kwargs: keyword-args, each of which is a list of length n Returns: a list of length n Raises: ValueError: if the arguments are not all lists of length n
[ "Call", "a", "function", "once", "on", "each", "device", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4486-L4513
train
222,743
tensorflow/mesh
mesh_tensorflow/ops.py
random_uniform
def random_uniform(mesh, shape, **kwargs): """Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor """ shape = convert_to_shape(shape) return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
python
def random_uniform(mesh, shape, **kwargs): """Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor """ shape = convert_to_shape(shape) return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
[ "def", "random_uniform", "(", "mesh", ",", "shape", ",", "*", "*", "kwargs", ")", ":", "shape", "=", "convert_to_shape", "(", "shape", ")", "return", "RandomOperation", "(", "mesh", ",", "shape", ",", "tf", ".", "random", ".", "uniform", ",", "*", "*",...
Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor
[ "Random", "uniform", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4611-L4623
train
222,744
tensorflow/mesh
mesh_tensorflow/ops.py
dropout
def dropout(x, keep_prob, noise_shape=None, name=None): """Dropout layer. Args: x: a Tensor keep_prob: a float between 0.0 and 1.0 noise_shape: an optional Shape (a subset of x.shape) name: an optional string Returns: a Tensor """ noise_shape = convert_to_shape(noise_shape) if noise_shape is None: noise_shape = x.shape with tf.variable_scope(name, default_name="dropout"): if keep_prob == 1.0: return x noise = cast(less(random_uniform( x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype) noise /= keep_prob return x * noise
python
def dropout(x, keep_prob, noise_shape=None, name=None): """Dropout layer. Args: x: a Tensor keep_prob: a float between 0.0 and 1.0 noise_shape: an optional Shape (a subset of x.shape) name: an optional string Returns: a Tensor """ noise_shape = convert_to_shape(noise_shape) if noise_shape is None: noise_shape = x.shape with tf.variable_scope(name, default_name="dropout"): if keep_prob == 1.0: return x noise = cast(less(random_uniform( x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype) noise /= keep_prob return x * noise
[ "def", "dropout", "(", "x", ",", "keep_prob", ",", "noise_shape", "=", "None", ",", "name", "=", "None", ")", ":", "noise_shape", "=", "convert_to_shape", "(", "noise_shape", ")", "if", "noise_shape", "is", "None", ":", "noise_shape", "=", "x", ".", "sha...
Dropout layer. Args: x: a Tensor keep_prob: a float between 0.0 and 1.0 noise_shape: an optional Shape (a subset of x.shape) name: an optional string Returns: a Tensor
[ "Dropout", "layer", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4626-L4647
train
222,745
tensorflow/mesh
mesh_tensorflow/ops.py
_cumprod
def _cumprod(l): """Cumulative product of a list. Args: l: a list of integers Returns: a list with one more element (starting with 1) """ ret = [1] for item in l: ret.append(ret[-1] * item) return ret
python
def _cumprod(l): """Cumulative product of a list. Args: l: a list of integers Returns: a list with one more element (starting with 1) """ ret = [1] for item in l: ret.append(ret[-1] * item) return ret
[ "def", "_cumprod", "(", "l", ")", ":", "ret", "=", "[", "1", "]", "for", "item", "in", "l", ":", "ret", ".", "append", "(", "ret", "[", "-", "1", "]", "*", "item", ")", "return", "ret" ]
Cumulative product of a list. Args: l: a list of integers Returns: a list with one more element (starting with 1)
[ "Cumulative", "product", "of", "a", "list", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4650-L4661
train
222,746
tensorflow/mesh
mesh_tensorflow/ops.py
while_loop
def while_loop(cond_fn, body_fn, inputs, num_loop_vars=None, has_accumulators=False, **kwargs): """While Loop. See comments above for WhileLoopOperation num_loop_vars is a hack for the multi-gpu setup. In this case, loops are generally slow, as all loop variables are placed on device. By setting num_loop_vars=k, then all of the loop variables except for the first k are handled as mtf Variables instead of loop variables, using explicit updates and control dependencies. In this case, we only return the first num_loop_vars outputs. Do not use this option on TPU, since it is unnecessary and also produces incorrect results, since xla does not respect control dependencies. Args: cond_fn: a function from n Tensors to scalar boolean Tensor body_fn: a function from n Tensors to list of n Tensors inputs: a list of n Tensors num_loop_vars: an optional integer. has_accumulators: a boolean **kwargs: additional kwargs passed to tf.while_loop Returns: a list of n Tensors. """ if num_loop_vars is None: return WhileLoopOperation(cond_fn, body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs # Turn all loop vars except for the first ones into non-loop vars. # see comments in docstring. assert num_loop_vars > 0 extra_inputs = inputs[num_loop_vars:] my_vars = [] for i, x in enumerate(extra_inputs): my_vars.append(get_variable( x.mesh, "loop_var_%d" % i, x.shape, initializer=tf.zeros_initializer(), dtype=x.dtype, collections=[tf.GraphKeys.LOCAL_VARIABLES])) my_vars = tuple(my_vars) first_input = depend( inputs[0], [assign(var, x) for var, x in zip(my_vars, extra_inputs)]) inputs = [first_input] + inputs[1:num_loop_vars] def my_cond_fn(*inputs): return cond_fn(*(inputs + my_vars)) def my_body_fn(*inputs): outputs = tuple(body_fn(*(inputs + my_vars))) extra_outputs = outputs[num_loop_vars:] first_output = depend( outputs[0], [assign(var, x) for var, x in zip(my_vars, extra_outputs)]) outputs = (first_output,) + outputs[1:num_loop_vars] return outputs return WhileLoopOperation( my_cond_fn, my_body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs
python
def while_loop(cond_fn, body_fn, inputs, num_loop_vars=None, has_accumulators=False, **kwargs): """While Loop. See comments above for WhileLoopOperation num_loop_vars is a hack for the multi-gpu setup. In this case, loops are generally slow, as all loop variables are placed on device. By setting num_loop_vars=k, then all of the loop variables except for the first k are handled as mtf Variables instead of loop variables, using explicit updates and control dependencies. In this case, we only return the first num_loop_vars outputs. Do not use this option on TPU, since it is unnecessary and also produces incorrect results, since xla does not respect control dependencies. Args: cond_fn: a function from n Tensors to scalar boolean Tensor body_fn: a function from n Tensors to list of n Tensors inputs: a list of n Tensors num_loop_vars: an optional integer. has_accumulators: a boolean **kwargs: additional kwargs passed to tf.while_loop Returns: a list of n Tensors. """ if num_loop_vars is None: return WhileLoopOperation(cond_fn, body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs # Turn all loop vars except for the first ones into non-loop vars. # see comments in docstring. assert num_loop_vars > 0 extra_inputs = inputs[num_loop_vars:] my_vars = [] for i, x in enumerate(extra_inputs): my_vars.append(get_variable( x.mesh, "loop_var_%d" % i, x.shape, initializer=tf.zeros_initializer(), dtype=x.dtype, collections=[tf.GraphKeys.LOCAL_VARIABLES])) my_vars = tuple(my_vars) first_input = depend( inputs[0], [assign(var, x) for var, x in zip(my_vars, extra_inputs)]) inputs = [first_input] + inputs[1:num_loop_vars] def my_cond_fn(*inputs): return cond_fn(*(inputs + my_vars)) def my_body_fn(*inputs): outputs = tuple(body_fn(*(inputs + my_vars))) extra_outputs = outputs[num_loop_vars:] first_output = depend( outputs[0], [assign(var, x) for var, x in zip(my_vars, extra_outputs)]) outputs = (first_output,) + outputs[1:num_loop_vars] return outputs return WhileLoopOperation( my_cond_fn, my_body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs
[ "def", "while_loop", "(", "cond_fn", ",", "body_fn", ",", "inputs", ",", "num_loop_vars", "=", "None", ",", "has_accumulators", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "num_loop_vars", "is", "None", ":", "return", "WhileLoopOperation", "(", ...
While Loop. See comments above for WhileLoopOperation num_loop_vars is a hack for the multi-gpu setup. In this case, loops are generally slow, as all loop variables are placed on device. By setting num_loop_vars=k, then all of the loop variables except for the first k are handled as mtf Variables instead of loop variables, using explicit updates and control dependencies. In this case, we only return the first num_loop_vars outputs. Do not use this option on TPU, since it is unnecessary and also produces incorrect results, since xla does not respect control dependencies. Args: cond_fn: a function from n Tensors to scalar boolean Tensor body_fn: a function from n Tensors to list of n Tensors inputs: a list of n Tensors num_loop_vars: an optional integer. has_accumulators: a boolean **kwargs: additional kwargs passed to tf.while_loop Returns: a list of n Tensors.
[ "While", "Loop", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4855-L4910
train
222,747
tensorflow/mesh
mesh_tensorflow/ops.py
_shape_union
def _shape_union(shapes): """A shape containing the union of all dimensions in the input shapes. Args: shapes: a list of Shapes Returns: a Shape """ return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
python
def _shape_union(shapes): """A shape containing the union of all dimensions in the input shapes. Args: shapes: a list of Shapes Returns: a Shape """ return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
[ "def", "_shape_union", "(", "shapes", ")", ":", "return", "Shape", "(", "sorted", "(", "list", "(", "set", "(", "sum", "(", "[", "s", ".", "dims", "for", "s", "in", "shapes", "]", ",", "[", "]", ")", ")", ")", ")", ")" ]
A shape containing the union of all dimensions in the input shapes. Args: shapes: a list of Shapes Returns: a Shape
[ "A", "shape", "containing", "the", "union", "of", "all", "dimensions", "in", "the", "input", "shapes", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4921-L4930
train
222,748
tensorflow/mesh
mesh_tensorflow/ops.py
_tf_flatten_batch_dims
def _tf_flatten_batch_dims(x, num_nonbatch_dims): """Flatten all but last num_nonbatch_dims into one dimension. Args: x: a tf.Tensor: num_nonbatch_dims: an integer Returns: a tf.Tensor with 1 + num_nonbatch_dims dimensions. """ shape = x.shape.as_list() assert None not in shape new_shape = ([list_product(shape[:-num_nonbatch_dims])] + shape[-num_nonbatch_dims:]) if new_shape != shape: x = tf.reshape(x, new_shape) return x
python
def _tf_flatten_batch_dims(x, num_nonbatch_dims): """Flatten all but last num_nonbatch_dims into one dimension. Args: x: a tf.Tensor: num_nonbatch_dims: an integer Returns: a tf.Tensor with 1 + num_nonbatch_dims dimensions. """ shape = x.shape.as_list() assert None not in shape new_shape = ([list_product(shape[:-num_nonbatch_dims])] + shape[-num_nonbatch_dims:]) if new_shape != shape: x = tf.reshape(x, new_shape) return x
[ "def", "_tf_flatten_batch_dims", "(", "x", ",", "num_nonbatch_dims", ")", ":", "shape", "=", "x", ".", "shape", ".", "as_list", "(", ")", "assert", "None", "not", "in", "shape", "new_shape", "=", "(", "[", "list_product", "(", "shape", "[", ":", "-", "...
Flatten all but last num_nonbatch_dims into one dimension. Args: x: a tf.Tensor: num_nonbatch_dims: an integer Returns: a tf.Tensor with 1 + num_nonbatch_dims dimensions.
[ "Flatten", "all", "but", "last", "num_nonbatch_dims", "into", "one", "dimension", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4933-L4949
train
222,749
tensorflow/mesh
mesh_tensorflow/ops.py
_tf_restore_batch_dims
def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype): """Reverse op of _tf_flatten_batch_dims. Un-flatten the first dimension of x to match all but the last num_nonbatch_dims dimensions of prototype. Args: x: a tf.Tensor with 1 + num_nonbatch_dims dimensions num_nonbatch_dims: an integer prototype: a tf.Tensor Returns: a tf.Tensor """ assert x.shape.ndims == 1 + num_nonbatch_dims new_shape = ( prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:]) assert None not in new_shape if new_shape != x.shape.as_list(): x = tf.reshape(x, new_shape) return x
python
def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype): """Reverse op of _tf_flatten_batch_dims. Un-flatten the first dimension of x to match all but the last num_nonbatch_dims dimensions of prototype. Args: x: a tf.Tensor with 1 + num_nonbatch_dims dimensions num_nonbatch_dims: an integer prototype: a tf.Tensor Returns: a tf.Tensor """ assert x.shape.ndims == 1 + num_nonbatch_dims new_shape = ( prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:]) assert None not in new_shape if new_shape != x.shape.as_list(): x = tf.reshape(x, new_shape) return x
[ "def", "_tf_restore_batch_dims", "(", "x", ",", "num_nonbatch_dims", ",", "prototype", ")", ":", "assert", "x", ".", "shape", ".", "ndims", "==", "1", "+", "num_nonbatch_dims", "new_shape", "=", "(", "prototype", ".", "shape", ".", "as_list", "(", ")", "["...
Reverse op of _tf_flatten_batch_dims. Un-flatten the first dimension of x to match all but the last num_nonbatch_dims dimensions of prototype. Args: x: a tf.Tensor with 1 + num_nonbatch_dims dimensions num_nonbatch_dims: an integer prototype: a tf.Tensor Returns: a tf.Tensor
[ "Reverse", "op", "of", "_tf_flatten_batch_dims", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4952-L4972
train
222,750
tensorflow/mesh
mesh_tensorflow/ops.py
halo_exchange
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False): """Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size. """ if halo_size == 0: return x block_size = block_size_dim.size partial_size = halo_size % block_size num_complete_blocks = halo_size // block_size parts = [x] for i in xrange(1, num_complete_blocks + 1): parts = ([shift(x, i, blocks_dim, wrap)] + parts + [shift(x, -i, blocks_dim, wrap)]) if partial_size > 0: left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name) right_margin = mtf_slice( x, block_size_dim.size - partial_size, partial_size, block_size_dim.name) parts = ( [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)] + parts + [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)]) return concat(parts, block_size_dim.name)
python
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False): """Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size. """ if halo_size == 0: return x block_size = block_size_dim.size partial_size = halo_size % block_size num_complete_blocks = halo_size // block_size parts = [x] for i in xrange(1, num_complete_blocks + 1): parts = ([shift(x, i, blocks_dim, wrap)] + parts + [shift(x, -i, blocks_dim, wrap)]) if partial_size > 0: left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name) right_margin = mtf_slice( x, block_size_dim.size - partial_size, partial_size, block_size_dim.name) parts = ( [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)] + parts + [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)]) return concat(parts, block_size_dim.name)
[ "def", "halo_exchange", "(", "x", ",", "blocks_dim", ",", "block_size_dim", ",", "halo_size", ",", "wrap", "=", "False", ")", ":", "if", "halo_size", "==", "0", ":", "return", "x", "block_size", "=", "block_size_dim", ".", "size", "partial_size", "=", "hal...
Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size.
[ "Concat", "each", "block", "with", "the", "margins", "of", "adjacent", "blocks", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4975-L5011
train
222,751
tensorflow/mesh
mesh_tensorflow/ops.py
conv2d_with_blocks
def conv2d_with_blocks( conv_input, conv_filter, strides, padding, h_blocks_dim=None, w_blocks_dim=None, name=None): """conv2d operation with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as h_blocks_dim and w_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter height and width. Currently, only "SAME" padding with dilation rate of 1 is supported. Args: conv_input: a Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim] conv_filter: a Tensor of shape [filter_height, filter_width, in_channels_dim, out_channels_dim] strides: A list of ints. 1-D tensor of length 4. padding: string, "SAME". The type of padding algorithm to use. Valid is not currently supported. h_blocks_dim: Dimension representing number of height blocks. w_blocks_dim: Dimension representing number of height blocks. name: A name for the operation (optional). Returns: A Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim] """ filter_h_dim, filter_w_dim = conv_filter.shape.dims[:2] assert filter_h_dim.size % 2 == 1 assert filter_w_dim.size % 2 == 1 h_dim, w_dim = conv_input.shape.dims[-3:-1] # If h_blocks_dim and w_blocks_dim is not split, directly call conv2d. if h_blocks_dim is None and w_blocks_dim is None: return conv2d(conv_input, conv_filter, strides, padding, name) # Padding 'VALID' is not supported yet. if padding != "SAME": raise NotImplementedError("conv2d_with_blocks requires padding=SAME") # Halo exchange for h_blocks and w_blocks. for blocks_dim, block_size_dim, halo_size in [ (h_blocks_dim, h_dim, filter_h_dim.size // 2), (w_blocks_dim, w_dim, filter_w_dim.size // 2)]: if halo_size > 0: if blocks_dim is not None: conv_input = halo_exchange( conv_input, blocks_dim, block_size_dim, halo_size) else: conv_input = pad( conv_input, [halo_size, halo_size], block_size_dim.name) return conv2d(conv_input, conv_filter, strides, "VALID", name)
python
def conv2d_with_blocks( conv_input, conv_filter, strides, padding, h_blocks_dim=None, w_blocks_dim=None, name=None): """conv2d operation with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as h_blocks_dim and w_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter height and width. Currently, only "SAME" padding with dilation rate of 1 is supported. Args: conv_input: a Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim] conv_filter: a Tensor of shape [filter_height, filter_width, in_channels_dim, out_channels_dim] strides: A list of ints. 1-D tensor of length 4. padding: string, "SAME". The type of padding algorithm to use. Valid is not currently supported. h_blocks_dim: Dimension representing number of height blocks. w_blocks_dim: Dimension representing number of height blocks. name: A name for the operation (optional). Returns: A Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim] """ filter_h_dim, filter_w_dim = conv_filter.shape.dims[:2] assert filter_h_dim.size % 2 == 1 assert filter_w_dim.size % 2 == 1 h_dim, w_dim = conv_input.shape.dims[-3:-1] # If h_blocks_dim and w_blocks_dim is not split, directly call conv2d. if h_blocks_dim is None and w_blocks_dim is None: return conv2d(conv_input, conv_filter, strides, padding, name) # Padding 'VALID' is not supported yet. if padding != "SAME": raise NotImplementedError("conv2d_with_blocks requires padding=SAME") # Halo exchange for h_blocks and w_blocks. for blocks_dim, block_size_dim, halo_size in [ (h_blocks_dim, h_dim, filter_h_dim.size // 2), (w_blocks_dim, w_dim, filter_w_dim.size // 2)]: if halo_size > 0: if blocks_dim is not None: conv_input = halo_exchange( conv_input, blocks_dim, block_size_dim, halo_size) else: conv_input = pad( conv_input, [halo_size, halo_size], block_size_dim.name) return conv2d(conv_input, conv_filter, strides, "VALID", name)
[ "def", "conv2d_with_blocks", "(", "conv_input", ",", "conv_filter", ",", "strides", ",", "padding", ",", "h_blocks_dim", "=", "None", ",", "w_blocks_dim", "=", "None", ",", "name", "=", "None", ")", ":", "filter_h_dim", ",", "filter_w_dim", "=", "conv_filter",...
conv2d operation with spatial partitioning. Spatial partitioning is implemented by decomposing the image into blocks. Block dimensions represented as h_blocks_dim and w_blocks_dim can be split along the mesh axis. If split, then we do a halo exchange where each block receives the part of the image from its left and right neighbors necessary to do the convolution. Exchange can involve complete or partial blocks depending on the filter height and width. Currently, only "SAME" padding with dilation rate of 1 is supported. Args: conv_input: a Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim] conv_filter: a Tensor of shape [filter_height, filter_width, in_channels_dim, out_channels_dim] strides: A list of ints. 1-D tensor of length 4. padding: string, "SAME". The type of padding algorithm to use. Valid is not currently supported. h_blocks_dim: Dimension representing number of height blocks. w_blocks_dim: Dimension representing number of height blocks. name: A name for the operation (optional). Returns: A Tensor of shape [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
[ "conv2d", "operation", "with", "spatial", "partitioning", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L5049-L5108
train
222,752
tensorflow/mesh
mesh_tensorflow/ops.py
tensor_dim_to_mesh_dim_size
def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim): """How many ways does a tensor dimension get split. This is used to "cheat" when building the mtf graph and peek at how a tensor dimension will be split. Returns 1 if the tensor dimension is not split. Args: layout: an input to convert_to_layout_rules mesh_shape: an input to convert_to_shape tensor_dim: a Dimension Returns: an integer """ layout_rules = convert_to_layout_rules(layout) mesh_shape = convert_to_shape(mesh_shape) mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape) if mesh_axis is None: return 1 else: return mesh_shape.dims[mesh_axis].size
python
def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim): """How many ways does a tensor dimension get split. This is used to "cheat" when building the mtf graph and peek at how a tensor dimension will be split. Returns 1 if the tensor dimension is not split. Args: layout: an input to convert_to_layout_rules mesh_shape: an input to convert_to_shape tensor_dim: a Dimension Returns: an integer """ layout_rules = convert_to_layout_rules(layout) mesh_shape = convert_to_shape(mesh_shape) mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape) if mesh_axis is None: return 1 else: return mesh_shape.dims[mesh_axis].size
[ "def", "tensor_dim_to_mesh_dim_size", "(", "layout", ",", "mesh_shape", ",", "tensor_dim", ")", ":", "layout_rules", "=", "convert_to_layout_rules", "(", "layout", ")", "mesh_shape", "=", "convert_to_shape", "(", "mesh_shape", ")", "mesh_axis", "=", "layout_rules", ...
How many ways does a tensor dimension get split. This is used to "cheat" when building the mtf graph and peek at how a tensor dimension will be split. Returns 1 if the tensor dimension is not split. Args: layout: an input to convert_to_layout_rules mesh_shape: an input to convert_to_shape tensor_dim: a Dimension Returns: an integer
[ "How", "many", "ways", "does", "a", "tensor", "dimension", "get", "split", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L5111-L5132
train
222,753
tensorflow/mesh
mesh_tensorflow/ops.py
serialize_training_step
def serialize_training_step(features, model_fn, batch_dim, num_splits): """Break the training batch into multiple microbatches. Returns two structures: grads - a list of Tensors corresponding to the gradients on graph.trainable_variables. These are summed across all microbatches outputs - a dictionary of Tensors corresponding to the output dictionary of model_fn. Each value is either summed across all microbatches (if it has no batch-dimension), or concatenated across all microbatches to represent the original batch (if it does have a batch-dimension). Args: features: a dictionary of Tensors, each with a batch_dim dimension model_fn: a function from feature dictionary to output dictionary output_dictionary must contain "loss" batch_dim: a Dimension num_splits: an integer dividing batch_dim.size Returns: grads: a list of Tensors corresponding to the gradients on graph.trainable_variables outputs: dictionary of output Tensors summed across microbatches """ for v in features.values(): mesh = v.mesh graph = v.graph microbatch_dim = Dimension("microbatch", num_splits) smaller_batch_dim = Dimension(batch_dim.name, batch_dim.size // num_splits) cache = {} def select(t, microbatch_num): return gather( replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]), microbatch_num, microbatch_dim) def cond_fn(microbatch_num): return less(microbatch_num, num_splits) def body_fn(microbatch_num): """Body function for mtf.while_loop. Args: microbatch_num: a mtf Scalar Returns: a list of mtf Tensors """ my_features = {} for k, v in six.iteritems(features): my_features[k] = select(v, microbatch_num) outputs = model_fn(my_features) grads = gradients( [outputs["loss"]], [v.outputs[0] for v in graph.trainable_variables]) output_keys = outputs.keys() cache["output_keys"] = output_keys ret = [] ret.append(microbatch_num + 1) # The rest of the returned values are "accumulators" that get summed # across all microbatches. for t in outputs.values(): if smaller_batch_dim in t.shape: # The output contains a batch dimension, so we want to concatenate # across microbatches. # Here we pad the tensor for each microbatch - summing will complete # the concatenation. t = einsum( [t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)], output_shape=replace_dimensions( t.shape, smaller_batch_dim, [smaller_batch_dim, microbatch_dim])) t = replace_dimensions( t, [smaller_batch_dim, microbatch_dim], batch_dim) ret.append(t) else: # There is no batch dimension. Sum across all microbatches. ret.append(t) # we also want to sum the gradients. ret.extend(grads) return ret while_out = while_loop( cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)], has_accumulators=True) num_outputs = len(cache["output_keys"]) combined_outputs = {} for k, v in zip(cache["output_keys"], while_out[1:1 + num_outputs]): combined_outputs[k] = v combined_grads = while_out[1 + num_outputs:] return combined_grads, combined_outputs
python
def serialize_training_step(features, model_fn, batch_dim, num_splits): """Break the training batch into multiple microbatches. Returns two structures: grads - a list of Tensors corresponding to the gradients on graph.trainable_variables. These are summed across all microbatches outputs - a dictionary of Tensors corresponding to the output dictionary of model_fn. Each value is either summed across all microbatches (if it has no batch-dimension), or concatenated across all microbatches to represent the original batch (if it does have a batch-dimension). Args: features: a dictionary of Tensors, each with a batch_dim dimension model_fn: a function from feature dictionary to output dictionary output_dictionary must contain "loss" batch_dim: a Dimension num_splits: an integer dividing batch_dim.size Returns: grads: a list of Tensors corresponding to the gradients on graph.trainable_variables outputs: dictionary of output Tensors summed across microbatches """ for v in features.values(): mesh = v.mesh graph = v.graph microbatch_dim = Dimension("microbatch", num_splits) smaller_batch_dim = Dimension(batch_dim.name, batch_dim.size // num_splits) cache = {} def select(t, microbatch_num): return gather( replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]), microbatch_num, microbatch_dim) def cond_fn(microbatch_num): return less(microbatch_num, num_splits) def body_fn(microbatch_num): """Body function for mtf.while_loop. Args: microbatch_num: a mtf Scalar Returns: a list of mtf Tensors """ my_features = {} for k, v in six.iteritems(features): my_features[k] = select(v, microbatch_num) outputs = model_fn(my_features) grads = gradients( [outputs["loss"]], [v.outputs[0] for v in graph.trainable_variables]) output_keys = outputs.keys() cache["output_keys"] = output_keys ret = [] ret.append(microbatch_num + 1) # The rest of the returned values are "accumulators" that get summed # across all microbatches. for t in outputs.values(): if smaller_batch_dim in t.shape: # The output contains a batch dimension, so we want to concatenate # across microbatches. # Here we pad the tensor for each microbatch - summing will complete # the concatenation. t = einsum( [t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)], output_shape=replace_dimensions( t.shape, smaller_batch_dim, [smaller_batch_dim, microbatch_dim])) t = replace_dimensions( t, [smaller_batch_dim, microbatch_dim], batch_dim) ret.append(t) else: # There is no batch dimension. Sum across all microbatches. ret.append(t) # we also want to sum the gradients. ret.extend(grads) return ret while_out = while_loop( cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)], has_accumulators=True) num_outputs = len(cache["output_keys"]) combined_outputs = {} for k, v in zip(cache["output_keys"], while_out[1:1 + num_outputs]): combined_outputs[k] = v combined_grads = while_out[1 + num_outputs:] return combined_grads, combined_outputs
[ "def", "serialize_training_step", "(", "features", ",", "model_fn", ",", "batch_dim", ",", "num_splits", ")", ":", "for", "v", "in", "features", ".", "values", "(", ")", ":", "mesh", "=", "v", ".", "mesh", "graph", "=", "v", ".", "graph", "microbatch_dim...
Break the training batch into multiple microbatches. Returns two structures: grads - a list of Tensors corresponding to the gradients on graph.trainable_variables. These are summed across all microbatches outputs - a dictionary of Tensors corresponding to the output dictionary of model_fn. Each value is either summed across all microbatches (if it has no batch-dimension), or concatenated across all microbatches to represent the original batch (if it does have a batch-dimension). Args: features: a dictionary of Tensors, each with a batch_dim dimension model_fn: a function from feature dictionary to output dictionary output_dictionary must contain "loss" batch_dim: a Dimension num_splits: an integer dividing batch_dim.size Returns: grads: a list of Tensors corresponding to the gradients on graph.trainable_variables outputs: dictionary of output Tensors summed across microbatches
[ "Break", "the", "training", "batch", "into", "multiple", "microbatches", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L5146-L5231
train
222,754
tensorflow/mesh
mesh_tensorflow/ops.py
Shape.rename_dimension
def rename_dimension(self, old_name, new_name): """Returns a copy where one dimension is renamed.""" if old_name not in self.dimension_names: raise ValueError("Shape %s does not have dimension named %s" % (self, old_name)) return Shape( [Dimension(new_name, d.size) if d.name == old_name else d for d in self.dims])
python
def rename_dimension(self, old_name, new_name): """Returns a copy where one dimension is renamed.""" if old_name not in self.dimension_names: raise ValueError("Shape %s does not have dimension named %s" % (self, old_name)) return Shape( [Dimension(new_name, d.size) if d.name == old_name else d for d in self.dims])
[ "def", "rename_dimension", "(", "self", ",", "old_name", ",", "new_name", ")", ":", "if", "old_name", "not", "in", "self", ".", "dimension_names", ":", "raise", "ValueError", "(", "\"Shape %s does not have dimension named %s\"", "%", "(", "self", ",", "old_name", ...
Returns a copy where one dimension is renamed.
[ "Returns", "a", "copy", "where", "one", "dimension", "is", "renamed", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L163-L170
train
222,755
tensorflow/mesh
mesh_tensorflow/ops.py
Shape.resize_dimension
def resize_dimension(self, name, new_size): """Returns a copy where one dimension has a different size.""" if name not in self.dimension_names: raise ValueError("Shape %s does not have dimension named %s" % (self, name)) return Shape( [Dimension(name, new_size) if d.name == name else d for d in self.dims])
python
def resize_dimension(self, name, new_size): """Returns a copy where one dimension has a different size.""" if name not in self.dimension_names: raise ValueError("Shape %s does not have dimension named %s" % (self, name)) return Shape( [Dimension(name, new_size) if d.name == name else d for d in self.dims])
[ "def", "resize_dimension", "(", "self", ",", "name", ",", "new_size", ")", ":", "if", "name", "not", "in", "self", ".", "dimension_names", ":", "raise", "ValueError", "(", "\"Shape %s does not have dimension named %s\"", "%", "(", "self", ",", "name", ")", ")"...
Returns a copy where one dimension has a different size.
[ "Returns", "a", "copy", "where", "one", "dimension", "has", "a", "different", "size", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L172-L179
train
222,756
tensorflow/mesh
mesh_tensorflow/ops.py
LayoutRules.tensor_layout
def tensor_layout(self, tensor_shape, mesh_shape): """Computes TensorLayout given a Tensor Shape and a Mesh Shape. Args: tensor_shape: Shape. mesh_shape: Shape. Returns: TensorLayout. Raises: ValueError: If two Tensor Dimensions map to the same Mesh Dimensions. """ ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape) for d in tensor_shape] not_nones = [a for a in ret if a is not None] if len(not_nones) != len(set(not_nones)): raise ValueError( "Two Tensor Dimensions may not map to the same Mesh Dimension:" " layout=%s tensor_shape=%s mesh_shape=%s " % (self, tensor_shape, mesh_shape)) return TensorLayout(ret)
python
def tensor_layout(self, tensor_shape, mesh_shape): """Computes TensorLayout given a Tensor Shape and a Mesh Shape. Args: tensor_shape: Shape. mesh_shape: Shape. Returns: TensorLayout. Raises: ValueError: If two Tensor Dimensions map to the same Mesh Dimensions. """ ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape) for d in tensor_shape] not_nones = [a for a in ret if a is not None] if len(not_nones) != len(set(not_nones)): raise ValueError( "Two Tensor Dimensions may not map to the same Mesh Dimension:" " layout=%s tensor_shape=%s mesh_shape=%s " % (self, tensor_shape, mesh_shape)) return TensorLayout(ret)
[ "def", "tensor_layout", "(", "self", ",", "tensor_shape", ",", "mesh_shape", ")", ":", "ret", "=", "[", "self", ".", "tensor_dimension_to_mesh_axis", "(", "d", ",", "mesh_shape", ")", "for", "d", "in", "tensor_shape", "]", "not_nones", "=", "[", "a", "for"...
Computes TensorLayout given a Tensor Shape and a Mesh Shape. Args: tensor_shape: Shape. mesh_shape: Shape. Returns: TensorLayout. Raises: ValueError: If two Tensor Dimensions map to the same Mesh Dimensions.
[ "Computes", "TensorLayout", "given", "a", "Tensor", "Shape", "and", "a", "Mesh", "Shape", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L247-L268
train
222,757
tensorflow/mesh
mesh_tensorflow/ops.py
TensorLayout.mesh_axis_to_tensor_axis
def mesh_axis_to_tensor_axis(self, mesh_ndims): """For each mesh axis, which Tensor axis maps to it. Args: mesh_ndims: int. Returns: Tuple of optional integers, with length mesh_ndims. """ ta2ma = self._tensor_axis_to_mesh_axis return tuple( [ta2ma.index(mesh_axis) if mesh_axis in ta2ma else None for mesh_axis in xrange(mesh_ndims)])
python
def mesh_axis_to_tensor_axis(self, mesh_ndims): """For each mesh axis, which Tensor axis maps to it. Args: mesh_ndims: int. Returns: Tuple of optional integers, with length mesh_ndims. """ ta2ma = self._tensor_axis_to_mesh_axis return tuple( [ta2ma.index(mesh_axis) if mesh_axis in ta2ma else None for mesh_axis in xrange(mesh_ndims)])
[ "def", "mesh_axis_to_tensor_axis", "(", "self", ",", "mesh_ndims", ")", ":", "ta2ma", "=", "self", ".", "_tensor_axis_to_mesh_axis", "return", "tuple", "(", "[", "ta2ma", ".", "index", "(", "mesh_axis", ")", "if", "mesh_axis", "in", "ta2ma", "else", "None", ...
For each mesh axis, which Tensor axis maps to it. Args: mesh_ndims: int. Returns: Tuple of optional integers, with length mesh_ndims.
[ "For", "each", "mesh", "axis", "which", "Tensor", "axis", "maps", "to", "it", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L339-L351
train
222,758
tensorflow/mesh
mesh_tensorflow/ops.py
Graph.unique_name
def unique_name(self, name, mark_as_used=True): """Like tf.Graph.unique_name, returns a unique operation name for `name`. Args: name: The name for an operation. mark_as_used: whether to mark this name as being used. Returns: A string to use as the name for the operation. """ scope_name = tf.get_variable_scope().name if scope_name: name = scope_name + "/" + name # As in TensorFlow, treat names as case insensitive when deciding whether # they are in use. name_key = name.lower() i = self._names_in_use.get(name_key, 0) if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key while name_key in self._names_in_use: name_key = "%s_%d" % (base_name_key, i) i += 1 if mark_as_used: self._names_in_use[name_key] = 1 name = "%s_%d" % (name, i-1) return name
python
def unique_name(self, name, mark_as_used=True): """Like tf.Graph.unique_name, returns a unique operation name for `name`. Args: name: The name for an operation. mark_as_used: whether to mark this name as being used. Returns: A string to use as the name for the operation. """ scope_name = tf.get_variable_scope().name if scope_name: name = scope_name + "/" + name # As in TensorFlow, treat names as case insensitive when deciding whether # they are in use. name_key = name.lower() i = self._names_in_use.get(name_key, 0) if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key while name_key in self._names_in_use: name_key = "%s_%d" % (base_name_key, i) i += 1 if mark_as_used: self._names_in_use[name_key] = 1 name = "%s_%d" % (name, i-1) return name
[ "def", "unique_name", "(", "self", ",", "name", ",", "mark_as_used", "=", "True", ")", ":", "scope_name", "=", "tf", ".", "get_variable_scope", "(", ")", ".", "name", "if", "scope_name", ":", "name", "=", "scope_name", "+", "\"/\"", "+", "name", "# As in...
Like tf.Graph.unique_name, returns a unique operation name for `name`. Args: name: The name for an operation. mark_as_used: whether to mark this name as being used. Returns: A string to use as the name for the operation.
[ "Like", "tf", ".", "Graph", ".", "unique_name", "returns", "a", "unique", "operation", "name", "for", "name", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L384-L413
train
222,759
tensorflow/mesh
mesh_tensorflow/ops.py
Graph.combine_assignments
def combine_assignments(self, assignments): """Rewrite the current graph to combine "Assign" operations. Combine similar Assign operations into grouped Assign operations. This is useful when using the rewrite_stack_variables() optimization, since variables can only be stacked if they are present in the same set of Assign operations. This function takes a list of Assign operations and returns a possibly shorter list of Assign operations. The input Assignment operations are removed from the graph and become invalid. Args: assignments: a list of Assign objects Returns: a list of Assign objects """ group_by_fn = collections.defaultdict(list) for a in assignments: if not isinstance(a, Assign): raise ValueError("ops should be instances of mtf.Assign") group_by_fn[a.assign_fn].append(a) assignments_set = set(assignments) self._operations = [ op for op in self._operations if op not in assignments_set] ret = [] for fn, ops in six.iteritems(group_by_fn): variables = [] values = [] for a in ops: variables.extend(a.variables) values.extend(a.inputs) ret.append(Assign(variables, values, fn)) return ret
python
def combine_assignments(self, assignments): """Rewrite the current graph to combine "Assign" operations. Combine similar Assign operations into grouped Assign operations. This is useful when using the rewrite_stack_variables() optimization, since variables can only be stacked if they are present in the same set of Assign operations. This function takes a list of Assign operations and returns a possibly shorter list of Assign operations. The input Assignment operations are removed from the graph and become invalid. Args: assignments: a list of Assign objects Returns: a list of Assign objects """ group_by_fn = collections.defaultdict(list) for a in assignments: if not isinstance(a, Assign): raise ValueError("ops should be instances of mtf.Assign") group_by_fn[a.assign_fn].append(a) assignments_set = set(assignments) self._operations = [ op for op in self._operations if op not in assignments_set] ret = [] for fn, ops in six.iteritems(group_by_fn): variables = [] values = [] for a in ops: variables.extend(a.variables) values.extend(a.inputs) ret.append(Assign(variables, values, fn)) return ret
[ "def", "combine_assignments", "(", "self", ",", "assignments", ")", ":", "group_by_fn", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "a", "in", "assignments", ":", "if", "not", "isinstance", "(", "a", ",", "Assign", ")", ":", "raise", ...
Rewrite the current graph to combine "Assign" operations. Combine similar Assign operations into grouped Assign operations. This is useful when using the rewrite_stack_variables() optimization, since variables can only be stacked if they are present in the same set of Assign operations. This function takes a list of Assign operations and returns a possibly shorter list of Assign operations. The input Assignment operations are removed from the graph and become invalid. Args: assignments: a list of Assign objects Returns: a list of Assign objects
[ "Rewrite", "the", "current", "graph", "to", "combine", "Assign", "operations", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L534-L567
train
222,760
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.tensor_layout
def tensor_layout(self, arg): """Compute TensorLayout for a Tensor or a Shape. Args: arg: Tensor or Shape. Returns: TensorLayout. """ if isinstance(arg, Tensor): arg = arg.shape return self.layout_rules.tensor_layout(arg, self.shape)
python
def tensor_layout(self, arg): """Compute TensorLayout for a Tensor or a Shape. Args: arg: Tensor or Shape. Returns: TensorLayout. """ if isinstance(arg, Tensor): arg = arg.shape return self.layout_rules.tensor_layout(arg, self.shape)
[ "def", "tensor_layout", "(", "self", ",", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "Tensor", ")", ":", "arg", "=", "arg", ".", "shape", "return", "self", ".", "layout_rules", ".", "tensor_layout", "(", "arg", ",", "self", ".", "shape", "...
Compute TensorLayout for a Tensor or a Shape. Args: arg: Tensor or Shape. Returns: TensorLayout.
[ "Compute", "TensorLayout", "for", "a", "Tensor", "or", "a", "Shape", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L803-L814
train
222,761
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.mesh_axis_to_cumprod
def mesh_axis_to_cumprod(self, tensor_shape): """For each mesh axis, give the product of previous tensor axes. Args: tensor_shape: Shape. Returns: list with length self.ndims where each element is an integer or None. """ tensor_layout = self.tensor_layout(tensor_shape) ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims) ta2cumprod = tensor_shape.cumprod return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]
python
def mesh_axis_to_cumprod(self, tensor_shape): """For each mesh axis, give the product of previous tensor axes. Args: tensor_shape: Shape. Returns: list with length self.ndims where each element is an integer or None. """ tensor_layout = self.tensor_layout(tensor_shape) ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims) ta2cumprod = tensor_shape.cumprod return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]
[ "def", "mesh_axis_to_cumprod", "(", "self", ",", "tensor_shape", ")", ":", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "ma2ta", "=", "tensor_layout", ".", "mesh_axis_to_tensor_axis", "(", "self", ".", "ndims", ")", "ta2cumprod", ...
For each mesh axis, give the product of previous tensor axes. Args: tensor_shape: Shape. Returns: list with length self.ndims where each element is an integer or None.
[ "For", "each", "mesh", "axis", "give", "the", "product", "of", "previous", "tensor", "axes", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L816-L828
train
222,762
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.slice_shape
def slice_shape(self, tensor_shape): """Shape of each slice of the Tensor. Args: tensor_shape: Shape. Returns: list of integers with length tensor_shape.ndims. Raises: ValueError: If a Tensor dimension is not divisible by the corresponding Mesh dimension. """ tensor_layout = self.tensor_layout(tensor_shape) ret = [] for tensor_dim, mesh_axis in zip( tensor_shape, tensor_layout.tensor_axis_to_mesh_axis): if mesh_axis is None: ret.append(tensor_dim.size) else: mesh_dim = self.shape[mesh_axis] if tensor_dim.size % mesh_dim.size != 0: raise ValueError( "Tensor dimension size not divisible by mesh dimension size:" " tensor_shape=%s tensor_layout=%s" % (tensor_shape, tensor_layout)) ret.append(tensor_dim.size // mesh_dim.size) return ret
python
def slice_shape(self, tensor_shape): """Shape of each slice of the Tensor. Args: tensor_shape: Shape. Returns: list of integers with length tensor_shape.ndims. Raises: ValueError: If a Tensor dimension is not divisible by the corresponding Mesh dimension. """ tensor_layout = self.tensor_layout(tensor_shape) ret = [] for tensor_dim, mesh_axis in zip( tensor_shape, tensor_layout.tensor_axis_to_mesh_axis): if mesh_axis is None: ret.append(tensor_dim.size) else: mesh_dim = self.shape[mesh_axis] if tensor_dim.size % mesh_dim.size != 0: raise ValueError( "Tensor dimension size not divisible by mesh dimension size:" " tensor_shape=%s tensor_layout=%s" % (tensor_shape, tensor_layout)) ret.append(tensor_dim.size // mesh_dim.size) return ret
[ "def", "slice_shape", "(", "self", ",", "tensor_shape", ")", ":", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "ret", "=", "[", "]", "for", "tensor_dim", ",", "mesh_axis", "in", "zip", "(", "tensor_shape", ",", "tensor_layout...
Shape of each slice of the Tensor. Args: tensor_shape: Shape. Returns: list of integers with length tensor_shape.ndims. Raises: ValueError: If a Tensor dimension is not divisible by the corresponding Mesh dimension.
[ "Shape", "of", "each", "slice", "of", "the", "Tensor", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L830-L857
train
222,763
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.slice_begin
def slice_begin(self, tensor_shape, pnum): """Begin position for the tensor slice for the given processor. Args: tensor_shape: Shape. pnum: int <= self.size. Returns: list of integers with length tensor_shape.ndims. """ tensor_layout = self.tensor_layout(tensor_shape) coordinates = pnum_to_processor_coordinates(self.shape, pnum) ret = [] for dim_size, mesh_axis in zip( tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis): if mesh_axis is None: ret.append(0) else: ret.append( dim_size // self.shape[mesh_axis].size * coordinates[mesh_axis]) return ret
python
def slice_begin(self, tensor_shape, pnum): """Begin position for the tensor slice for the given processor. Args: tensor_shape: Shape. pnum: int <= self.size. Returns: list of integers with length tensor_shape.ndims. """ tensor_layout = self.tensor_layout(tensor_shape) coordinates = pnum_to_processor_coordinates(self.shape, pnum) ret = [] for dim_size, mesh_axis in zip( tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis): if mesh_axis is None: ret.append(0) else: ret.append( dim_size // self.shape[mesh_axis].size * coordinates[mesh_axis]) return ret
[ "def", "slice_begin", "(", "self", ",", "tensor_shape", ",", "pnum", ")", ":", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "coordinates", "=", "pnum_to_processor_coordinates", "(", "self", ".", "shape", ",", "pnum", ")", "ret"...
Begin position for the tensor slice for the given processor. Args: tensor_shape: Shape. pnum: int <= self.size. Returns: list of integers with length tensor_shape.ndims.
[ "Begin", "position", "for", "the", "tensor", "slice", "for", "the", "given", "processor", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L859-L879
train
222,764
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.Print
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name """Calls tf.Print. Args: x: LaidOutTensor. data: list of LaidOutTensor. message: str. **kwargs: keyword arguments to tf.print. Returns: LaidOutTensor. """ del data, message, kwargs tf.logging.warning("Warning - mtf.Print not implemented for this mesh type") return x
python
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name """Calls tf.Print. Args: x: LaidOutTensor. data: list of LaidOutTensor. message: str. **kwargs: keyword arguments to tf.print. Returns: LaidOutTensor. """ del data, message, kwargs tf.logging.warning("Warning - mtf.Print not implemented for this mesh type") return x
[ "def", "Print", "(", "self", ",", "x", ",", "data", ",", "message", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=invalid-name", "del", "data", ",", "message", ",", "kwargs", "tf", ".", "logging", ".", "warning", "(", "\"Warning - mtf.Print not imple...
Calls tf.Print. Args: x: LaidOutTensor. data: list of LaidOutTensor. message: str. **kwargs: keyword arguments to tf.print. Returns: LaidOutTensor.
[ "Calls", "tf", ".", "Print", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L908-L922
train
222,765
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.allsplit
def allsplit(self, x, mesh_axis, split_axis, which=None): """Inverse of allconcat - split each slice and keep only one piece of it. The number of ways to split is the number of processors in the group. The part that is kept corresponds to the processor's index in the group. Args: x: LaidOutTensor. mesh_axis: int, the mesh axis along which to split. split_axis: int, the Tensor axis along which to split. which: an optional LaidOutTensor of integer scalars. Selects the slice to to keep, instead of the coordinate. Returns: LaidOutTensor. """ if which is None: which = self.laid_out_pcoord(mesh_axis) num_splits = self.shape[mesh_axis].size def my_fn(x, which): slice_begin = [ dimsize // num_splits * which if i == split_axis else 0 for i, dimsize in enumerate(x.shape.as_list())] slice_size = [ dimsize // num_splits if i == split_axis else dimsize for i, dimsize in enumerate(x.shape.as_list())] return tf.slice(x, slice_begin, slice_size) return self.slicewise(my_fn, x, which)
python
def allsplit(self, x, mesh_axis, split_axis, which=None): """Inverse of allconcat - split each slice and keep only one piece of it. The number of ways to split is the number of processors in the group. The part that is kept corresponds to the processor's index in the group. Args: x: LaidOutTensor. mesh_axis: int, the mesh axis along which to split. split_axis: int, the Tensor axis along which to split. which: an optional LaidOutTensor of integer scalars. Selects the slice to to keep, instead of the coordinate. Returns: LaidOutTensor. """ if which is None: which = self.laid_out_pcoord(mesh_axis) num_splits = self.shape[mesh_axis].size def my_fn(x, which): slice_begin = [ dimsize // num_splits * which if i == split_axis else 0 for i, dimsize in enumerate(x.shape.as_list())] slice_size = [ dimsize // num_splits if i == split_axis else dimsize for i, dimsize in enumerate(x.shape.as_list())] return tf.slice(x, slice_begin, slice_size) return self.slicewise(my_fn, x, which)
[ "def", "allsplit", "(", "self", ",", "x", ",", "mesh_axis", ",", "split_axis", ",", "which", "=", "None", ")", ":", "if", "which", "is", "None", ":", "which", "=", "self", ".", "laid_out_pcoord", "(", "mesh_axis", ")", "num_splits", "=", "self", ".", ...
Inverse of allconcat - split each slice and keep only one piece of it. The number of ways to split is the number of processors in the group. The part that is kept corresponds to the processor's index in the group. Args: x: LaidOutTensor. mesh_axis: int, the mesh axis along which to split. split_axis: int, the Tensor axis along which to split. which: an optional LaidOutTensor of integer scalars. Selects the slice to to keep, instead of the coordinate. Returns: LaidOutTensor.
[ "Inverse", "of", "allconcat", "-", "split", "each", "slice", "and", "keep", "only", "one", "piece", "of", "it", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L937-L964
train
222,766
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.shift_by_n_processors
def shift_by_n_processors(self, x, mesh_axis, offset, wrap): """Receive the slice from processor pcoord - offset. Args: x: a LaidOutTensor mesh_axis: an integer offset: an integer wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros. """ n = self.shape[mesh_axis].size source_pcoord = [] for i in xrange(n): c = i - offset if c != c % n: if wrap: c = c % n else: c = None source_pcoord.append(c) return self.receive(x, mesh_axis, source_pcoord)
python
def shift_by_n_processors(self, x, mesh_axis, offset, wrap): """Receive the slice from processor pcoord - offset. Args: x: a LaidOutTensor mesh_axis: an integer offset: an integer wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros. """ n = self.shape[mesh_axis].size source_pcoord = [] for i in xrange(n): c = i - offset if c != c % n: if wrap: c = c % n else: c = None source_pcoord.append(c) return self.receive(x, mesh_axis, source_pcoord)
[ "def", "shift_by_n_processors", "(", "self", ",", "x", ",", "mesh_axis", ",", "offset", ",", "wrap", ")", ":", "n", "=", "self", ".", "shape", "[", "mesh_axis", "]", ".", "size", "source_pcoord", "=", "[", "]", "for", "i", "in", "xrange", "(", "n", ...
Receive the slice from processor pcoord - offset. Args: x: a LaidOutTensor mesh_axis: an integer offset: an integer wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
[ "Receive", "the", "slice", "from", "processor", "pcoord", "-", "offset", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1017-L1036
train
222,767
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.laid_out_pcoord
def laid_out_pcoord(self, mesh_axis): """Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar. """ divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:]) modulus = self.shape[mesh_axis].size def my_fn(pnum): return (pnum // divisor) % modulus return self.slicewise(my_fn, self.laid_out_pnum())
python
def laid_out_pcoord(self, mesh_axis): """Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar. """ divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:]) modulus = self.shape[mesh_axis].size def my_fn(pnum): return (pnum // divisor) % modulus return self.slicewise(my_fn, self.laid_out_pnum())
[ "def", "laid_out_pcoord", "(", "self", ",", "mesh_axis", ")", ":", "divisor", "=", "list_product", "(", "self", ".", "shape", ".", "to_integer_list", "[", "mesh_axis", "+", "1", ":", "]", ")", "modulus", "=", "self", ".", "shape", "[", "mesh_axis", "]", ...
Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar.
[ "Returns", "a", "LaidOutTensor", "containing", "the", "processor", "coordinate", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1046-L1059
train
222,768
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.laid_out_slice_num
def laid_out_slice_num(self, tensor_shape): """A LaidOutTensor with an int32 scalar, identical for identical slices. This is useful for synchronizing random operations. Args: tensor_shape: a TensorShape Returns: a LaidOutTensor where each slice is an integer scalar. """ ret = self.slicewise(lambda: tf.to_int32(0)) tensor_layout = self.tensor_layout(tensor_shape) for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis: if mesh_axis is not None: def my_fn(x, pcoord, mesh_dim_size): return x * mesh_dim_size + pcoord ret = self.slicewise( my_fn, ret, self.laid_out_pcoord(mesh_axis), self.shape[mesh_axis].size) return ret
python
def laid_out_slice_num(self, tensor_shape): """A LaidOutTensor with an int32 scalar, identical for identical slices. This is useful for synchronizing random operations. Args: tensor_shape: a TensorShape Returns: a LaidOutTensor where each slice is an integer scalar. """ ret = self.slicewise(lambda: tf.to_int32(0)) tensor_layout = self.tensor_layout(tensor_shape) for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis: if mesh_axis is not None: def my_fn(x, pcoord, mesh_dim_size): return x * mesh_dim_size + pcoord ret = self.slicewise( my_fn, ret, self.laid_out_pcoord(mesh_axis), self.shape[mesh_axis].size) return ret
[ "def", "laid_out_slice_num", "(", "self", ",", "tensor_shape", ")", ":", "ret", "=", "self", ".", "slicewise", "(", "lambda", ":", "tf", ".", "to_int32", "(", "0", ")", ")", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "f...
A LaidOutTensor with an int32 scalar, identical for identical slices. This is useful for synchronizing random operations. Args: tensor_shape: a TensorShape Returns: a LaidOutTensor where each slice is an integer scalar.
[ "A", "LaidOutTensor", "with", "an", "int32", "scalar", "identical", "for", "identical", "slices", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1061-L1080
train
222,769
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.broadcast_impl
def broadcast_impl(self, old_slices, old_shape, new_shape): """Implementation of a broadcast operation. Args: old_slices: LaidOutTensor. old_shape: Shape. new_shape: Shape. Returns: LaidOutTensor. """ new_slice_shape = self.slice_shape(new_shape) def tf_fn(x): return (tf.zeros(new_slice_shape, dtype=x.dtype) + _expand_dims(x, old_shape, new_shape)) return self.slicewise(tf_fn, old_slices)
python
def broadcast_impl(self, old_slices, old_shape, new_shape): """Implementation of a broadcast operation. Args: old_slices: LaidOutTensor. old_shape: Shape. new_shape: Shape. Returns: LaidOutTensor. """ new_slice_shape = self.slice_shape(new_shape) def tf_fn(x): return (tf.zeros(new_slice_shape, dtype=x.dtype) + _expand_dims(x, old_shape, new_shape)) return self.slicewise(tf_fn, old_slices)
[ "def", "broadcast_impl", "(", "self", ",", "old_slices", ",", "old_shape", ",", "new_shape", ")", ":", "new_slice_shape", "=", "self", ".", "slice_shape", "(", "new_shape", ")", "def", "tf_fn", "(", "x", ")", ":", "return", "(", "tf", ".", "zeros", "(", ...
Implementation of a broadcast operation. Args: old_slices: LaidOutTensor. old_shape: Shape. new_shape: Shape. Returns: LaidOutTensor.
[ "Implementation", "of", "a", "broadcast", "operation", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1082-L1097
train
222,770
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.make_slices
def make_slices(self, tf_tensor, tensor_shape): """Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size. """ tensor_layout = self.tensor_layout(tensor_shape) slice_shape = self.slice_shape(tensor_shape) def my_fn(pnum): if tensor_layout.is_fully_replicated: return tf_tensor else: slice_begin = self.slice_begin(tensor_shape, pnum) return tf.slice(tf_tensor, slice_begin, slice_shape) return parallel([tf_tensor.device] * self.size, my_fn, list(xrange(self.size)))
python
def make_slices(self, tf_tensor, tensor_shape): """Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size. """ tensor_layout = self.tensor_layout(tensor_shape) slice_shape = self.slice_shape(tensor_shape) def my_fn(pnum): if tensor_layout.is_fully_replicated: return tf_tensor else: slice_begin = self.slice_begin(tensor_shape, pnum) return tf.slice(tf_tensor, slice_begin, slice_shape) return parallel([tf_tensor.device] * self.size, my_fn, list(xrange(self.size)))
[ "def", "make_slices", "(", "self", ",", "tf_tensor", ",", "tensor_shape", ")", ":", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "slice_shape", "=", "self", ".", "slice_shape", "(", "tensor_shape", ")", "def", "my_fn", "(", "...
Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size.
[ "Turns", "a", "single", "tf", ".", "Tensor", "into", "a", "list", "of", "slices", "one", "for", "each", "processor", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1099-L1119
train
222,771
tensorflow/mesh
mesh_tensorflow/ops.py
MeshImpl.combine_slices
def combine_slices(self, slices, tensor_shape, device=None): """Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor. """ if tensor_shape.ndims == 0: return slices[0] ret = slices[:] tensor_layout = self.tensor_layout(tensor_shape) for mesh_dim, tensor_axis in zip( self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)): slice_size = len(ret) // mesh_dim.size if tensor_axis is None: ret = ret[:slice_size] else: if device: devices = [device] * slice_size else: devices = [ret[i].device for i in xrange(slice_size)] concat_inputs = [] for i in xrange(slice_size): concat_inputs.append( [ret[i + slice_size * j] for j in xrange(mesh_dim.size)]) ret = parallel( devices, tf.concat, concat_inputs, axis=[tensor_axis] * len(devices)) assert len(ret) == 1 return ret[0]
python
def combine_slices(self, slices, tensor_shape, device=None): """Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor. """ if tensor_shape.ndims == 0: return slices[0] ret = slices[:] tensor_layout = self.tensor_layout(tensor_shape) for mesh_dim, tensor_axis in zip( self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)): slice_size = len(ret) // mesh_dim.size if tensor_axis is None: ret = ret[:slice_size] else: if device: devices = [device] * slice_size else: devices = [ret[i].device for i in xrange(slice_size)] concat_inputs = [] for i in xrange(slice_size): concat_inputs.append( [ret[i + slice_size * j] for j in xrange(mesh_dim.size)]) ret = parallel( devices, tf.concat, concat_inputs, axis=[tensor_axis] * len(devices)) assert len(ret) == 1 return ret[0]
[ "def", "combine_slices", "(", "self", ",", "slices", ",", "tensor_shape", ",", "device", "=", "None", ")", ":", "if", "tensor_shape", ".", "ndims", "==", "0", ":", "return", "slices", "[", "0", "]", "ret", "=", "slices", "[", ":", "]", "tensor_layout",...
Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor.
[ "Turns", "a", "set", "of", "slices", "into", "a", "single", "tensor", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1121-L1155
train
222,772
tensorflow/mesh
mesh_tensorflow/ops.py
Operation._initialize_splittable_and_unsplittable_dims
def _initialize_splittable_and_unsplittable_dims( self, default_splittability, exception_dims_iterable=None): """Initializer for splittable_dims and unsplittable_dims. Helper method to categorize all dimensions in the input/output tensors as either splittable or unsplittable. Args: default_splittability: a string which is either "splittable" or "unsplittable". exception_dims_iterable: an optional iterable of names of dimensions which are exceptions to the default splittability. Returns: splittable_dims and unsplittable_dims, two frozensets of names of dimensions (strings) Raises: ValueError: default_splittability is not one of "splittable" or "unsplittable". """ default_dims = set() exception_dims = set() if exception_dims_iterable: exception_dims.update(exception_dims_iterable) for t in itertools.chain(self.inputs, self.outputs): for dim_name in t.shape.dimension_names: if dim_name not in exception_dims: default_dims.add(dim_name) if default_splittability == "splittable": return frozenset(default_dims), frozenset(exception_dims) elif default_splittability == "unsplittable": return frozenset(exception_dims), frozenset(default_dims) else: raise ValueError("default_splittability should be either \"splittable\" " "or \"unsplittable\" but was {}" .format(default_splittability))
python
def _initialize_splittable_and_unsplittable_dims( self, default_splittability, exception_dims_iterable=None): """Initializer for splittable_dims and unsplittable_dims. Helper method to categorize all dimensions in the input/output tensors as either splittable or unsplittable. Args: default_splittability: a string which is either "splittable" or "unsplittable". exception_dims_iterable: an optional iterable of names of dimensions which are exceptions to the default splittability. Returns: splittable_dims and unsplittable_dims, two frozensets of names of dimensions (strings) Raises: ValueError: default_splittability is not one of "splittable" or "unsplittable". """ default_dims = set() exception_dims = set() if exception_dims_iterable: exception_dims.update(exception_dims_iterable) for t in itertools.chain(self.inputs, self.outputs): for dim_name in t.shape.dimension_names: if dim_name not in exception_dims: default_dims.add(dim_name) if default_splittability == "splittable": return frozenset(default_dims), frozenset(exception_dims) elif default_splittability == "unsplittable": return frozenset(exception_dims), frozenset(default_dims) else: raise ValueError("default_splittability should be either \"splittable\" " "or \"unsplittable\" but was {}" .format(default_splittability))
[ "def", "_initialize_splittable_and_unsplittable_dims", "(", "self", ",", "default_splittability", ",", "exception_dims_iterable", "=", "None", ")", ":", "default_dims", "=", "set", "(", ")", "exception_dims", "=", "set", "(", ")", "if", "exception_dims_iterable", ":",...
Initializer for splittable_dims and unsplittable_dims. Helper method to categorize all dimensions in the input/output tensors as either splittable or unsplittable. Args: default_splittability: a string which is either "splittable" or "unsplittable". exception_dims_iterable: an optional iterable of names of dimensions which are exceptions to the default splittability. Returns: splittable_dims and unsplittable_dims, two frozensets of names of dimensions (strings) Raises: ValueError: default_splittability is not one of "splittable" or "unsplittable".
[ "Initializer", "for", "splittable_dims", "and", "unsplittable_dims", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1448-L1486
train
222,773
tensorflow/mesh
mesh_tensorflow/ops.py
ReshapeOperation.lower
def lower(self, lowering): """Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case """ old_shape = self.inputs[0].shape new_shape = self.outputs[0].shape mesh_impl = lowering.mesh_impl(self) slices = lowering.tensors[self.inputs[0]] mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape) mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape) # Figure out what needs to be done for different mesh-axes mesh_axes_allsplit = [] mesh_axes_allconcat = [] mesh_axes_alltoall = [] for mesh_axis, (old_cumprod, new_cumprod) in enumerate( zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)): if new_cumprod != old_cumprod: if old_cumprod is None: # split in new layout but not in old layout - we need an allsplit mesh_axes_allsplit.append(mesh_axis) elif new_cumprod is None: # split in old layout but not in new layout - we need an allconcat mesh_axes_allconcat.append(mesh_axis) else: # split differently in old and new layouts - we need an alltoall mesh_axes_alltoall.append(mesh_axis) laid_out_size = mesh_impl.laid_out_size(old_shape) for mesh_axis in mesh_axes_allsplit: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis) laid_out_size //= mesh_impl.shape[mesh_axis].size for mesh_axis in mesh_axes_alltoall: split_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if split_tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) concat_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert concat_tensor_axis is not None slices = mesh_impl.alltoall( slices, mesh_axis, split_tensor_axis, concat_tensor_axis) lowering.add_counter( "alltoall/%s/reshape_op" % mesh_axis, laid_out_size) for mesh_axis in mesh_axes_allconcat: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert tensor_axis is not None slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis) laid_out_size *= mesh_impl.shape[mesh_axis].size lowering.add_counter( "allconcat/%s/reshape_op" % mesh_axis, laid_out_size) # now reshape the slices old_slice_shape = mesh_impl.slice_shape(old_shape) new_slice_shape = mesh_impl.slice_shape(new_shape) if new_slice_shape != old_slice_shape: def reshape_fn(x): return tf.reshape(x, new_slice_shape) slices = mesh_impl.slicewise(reshape_fn, slices) lowering.set_tensor_lowering(self.outputs[0], slices)
python
def lower(self, lowering): """Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case """ old_shape = self.inputs[0].shape new_shape = self.outputs[0].shape mesh_impl = lowering.mesh_impl(self) slices = lowering.tensors[self.inputs[0]] mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape) mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape) # Figure out what needs to be done for different mesh-axes mesh_axes_allsplit = [] mesh_axes_allconcat = [] mesh_axes_alltoall = [] for mesh_axis, (old_cumprod, new_cumprod) in enumerate( zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)): if new_cumprod != old_cumprod: if old_cumprod is None: # split in new layout but not in old layout - we need an allsplit mesh_axes_allsplit.append(mesh_axis) elif new_cumprod is None: # split in old layout but not in new layout - we need an allconcat mesh_axes_allconcat.append(mesh_axis) else: # split differently in old and new layouts - we need an alltoall mesh_axes_alltoall.append(mesh_axis) laid_out_size = mesh_impl.laid_out_size(old_shape) for mesh_axis in mesh_axes_allsplit: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis) laid_out_size //= mesh_impl.shape[mesh_axis].size for mesh_axis in mesh_axes_alltoall: split_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if split_tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) concat_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert concat_tensor_axis is not None slices = mesh_impl.alltoall( slices, mesh_axis, split_tensor_axis, concat_tensor_axis) lowering.add_counter( "alltoall/%s/reshape_op" % mesh_axis, laid_out_size) for mesh_axis in mesh_axes_allconcat: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert tensor_axis is not None slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis) laid_out_size *= mesh_impl.shape[mesh_axis].size lowering.add_counter( "allconcat/%s/reshape_op" % mesh_axis, laid_out_size) # now reshape the slices old_slice_shape = mesh_impl.slice_shape(old_shape) new_slice_shape = mesh_impl.slice_shape(new_shape) if new_slice_shape != old_slice_shape: def reshape_fn(x): return tf.reshape(x, new_slice_shape) slices = mesh_impl.slicewise(reshape_fn, slices) lowering.set_tensor_lowering(self.outputs[0], slices)
[ "def", "lower", "(", "self", ",", "lowering", ")", ":", "old_shape", "=", "self", ".", "inputs", "[", "0", "]", ".", "shape", "new_shape", "=", "self", ".", "outputs", "[", "0", "]", ".", "shape", "mesh_impl", "=", "lowering", ".", "mesh_impl", "(", ...
Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case
[ "Lower", "the", "ReshapeOperation", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L3478-L3558
train
222,774
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
get_variable_dtype
def get_variable_dtype( master_dtype=tf.bfloat16, slice_dtype=tf.float32, activation_dtype=tf.float32): """Datatypes to use for the run. Args: master_dtype: string, datatype for checkpoints keep this the same between training and eval/inference slice_dtype: string, datatype for variables in memory must be tf.float32 for training activation_dtype: string, datatype for activations less memory usage if tf.bfloat16 but possible numerical issues Returns: a mtf.VariableDtype """ return mtf.VariableDType( master_dtype=tf.as_dtype(master_dtype), slice_dtype=tf.as_dtype(slice_dtype), activation_dtype=tf.as_dtype(activation_dtype))
python
def get_variable_dtype( master_dtype=tf.bfloat16, slice_dtype=tf.float32, activation_dtype=tf.float32): """Datatypes to use for the run. Args: master_dtype: string, datatype for checkpoints keep this the same between training and eval/inference slice_dtype: string, datatype for variables in memory must be tf.float32 for training activation_dtype: string, datatype for activations less memory usage if tf.bfloat16 but possible numerical issues Returns: a mtf.VariableDtype """ return mtf.VariableDType( master_dtype=tf.as_dtype(master_dtype), slice_dtype=tf.as_dtype(slice_dtype), activation_dtype=tf.as_dtype(activation_dtype))
[ "def", "get_variable_dtype", "(", "master_dtype", "=", "tf", ".", "bfloat16", ",", "slice_dtype", "=", "tf", ".", "float32", ",", "activation_dtype", "=", "tf", ".", "float32", ")", ":", "return", "mtf", ".", "VariableDType", "(", "master_dtype", "=", "tf", ...
Datatypes to use for the run. Args: master_dtype: string, datatype for checkpoints keep this the same between training and eval/inference slice_dtype: string, datatype for variables in memory must be tf.float32 for training activation_dtype: string, datatype for activations less memory usage if tf.bfloat16 but possible numerical issues Returns: a mtf.VariableDtype
[ "Datatypes", "to", "use", "for", "the", "run", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L38-L57
train
222,775
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
build_model
def build_model(model_type="bitransformer", input_vocab_size=gin.REQUIRED, output_vocab_size=gin.REQUIRED, layout_rules=None, mesh_shape=None): """Build a transformer model. Currently, three types of models are supported: "bitransformer": The traditional encoder-decoder architecture from "attention is all you need". Requires a non-text2self dataset. "lm": an autoregressive language model (one layer stack). This is similar to the decoder part of a bitransformer, but with no attention over an encoder, since there is no encoder. Requires a text2self dataset, with targets, but no inputs. "aligned": a non-autoregressive single-stack model (like BERT). Requires a non-text2self dataset with inputs and targets. The targets are aligned with the inputs. Args: model_type: a string - "bitransformer", "lm" or "aligned" input_vocab_size: an integer output_vocab_size: an integer layout_rules: optional - an input to mtf.convert_to_layout_rules mesh_shape: optional - an input to mtf.convert_to_shape Returns: a Unitransformer or Bitransformer """ if model_type == "bitransformer": return transformer.make_bitransformer( input_vocab_size=input_vocab_size, output_vocab_size=output_vocab_size, mesh_shape=mesh_shape, layout=layout_rules) elif model_type == "lm" or model_type == "aligned": return transformer.Unitransformer( autoregressive=model_type == "lm", layer_stack=transformer.make_layer_stack(), input_vocab_size=input_vocab_size, output_vocab_size=output_vocab_size, mesh_shape=mesh_shape, layout=layout_rules) else: raise ValueError("unknown model_type")
python
def build_model(model_type="bitransformer", input_vocab_size=gin.REQUIRED, output_vocab_size=gin.REQUIRED, layout_rules=None, mesh_shape=None): """Build a transformer model. Currently, three types of models are supported: "bitransformer": The traditional encoder-decoder architecture from "attention is all you need". Requires a non-text2self dataset. "lm": an autoregressive language model (one layer stack). This is similar to the decoder part of a bitransformer, but with no attention over an encoder, since there is no encoder. Requires a text2self dataset, with targets, but no inputs. "aligned": a non-autoregressive single-stack model (like BERT). Requires a non-text2self dataset with inputs and targets. The targets are aligned with the inputs. Args: model_type: a string - "bitransformer", "lm" or "aligned" input_vocab_size: an integer output_vocab_size: an integer layout_rules: optional - an input to mtf.convert_to_layout_rules mesh_shape: optional - an input to mtf.convert_to_shape Returns: a Unitransformer or Bitransformer """ if model_type == "bitransformer": return transformer.make_bitransformer( input_vocab_size=input_vocab_size, output_vocab_size=output_vocab_size, mesh_shape=mesh_shape, layout=layout_rules) elif model_type == "lm" or model_type == "aligned": return transformer.Unitransformer( autoregressive=model_type == "lm", layer_stack=transformer.make_layer_stack(), input_vocab_size=input_vocab_size, output_vocab_size=output_vocab_size, mesh_shape=mesh_shape, layout=layout_rules) else: raise ValueError("unknown model_type")
[ "def", "build_model", "(", "model_type", "=", "\"bitransformer\"", ",", "input_vocab_size", "=", "gin", ".", "REQUIRED", ",", "output_vocab_size", "=", "gin", ".", "REQUIRED", ",", "layout_rules", "=", "None", ",", "mesh_shape", "=", "None", ")", ":", "if", ...
Build a transformer model. Currently, three types of models are supported: "bitransformer": The traditional encoder-decoder architecture from "attention is all you need". Requires a non-text2self dataset. "lm": an autoregressive language model (one layer stack). This is similar to the decoder part of a bitransformer, but with no attention over an encoder, since there is no encoder. Requires a text2self dataset, with targets, but no inputs. "aligned": a non-autoregressive single-stack model (like BERT). Requires a non-text2self dataset with inputs and targets. The targets are aligned with the inputs. Args: model_type: a string - "bitransformer", "lm" or "aligned" input_vocab_size: an integer output_vocab_size: an integer layout_rules: optional - an input to mtf.convert_to_layout_rules mesh_shape: optional - an input to mtf.convert_to_shape Returns: a Unitransformer or Bitransformer
[ "Build", "a", "transformer", "model", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L94-L139
train
222,776
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
decode_from_file
def decode_from_file(estimator, vocabulary, model_type, batch_size, sequence_length, checkpoint_path="", input_filename=gin.REQUIRED, output_filename=gin.REQUIRED, eos_id=1): """Decode from a text file. Args: estimator: a TPUEstimator vocabulary: a mtf.transformer.vocabulary.Vocabulary model_type: a string batch_size: an integer sequence_length: an integer (maximum decode length) checkpoint_path: an optional string input_filename: a string output_filename: a string eos_id: EOS id """ with tf.gfile.Open(input_filename) as f: text = f.read() records = text.split("\n") inputs = [record.strip() for record in records] # Strip the last empty line. if not inputs[-1]: inputs.pop() n = len(inputs) # encode all inputs all_input_ids = [] for line in inputs: ids = inputs_vocabulary(vocabulary).encode(line.strip()) if model_type != "lm": # for text2self problems, the inputs represent a partial sequence # to be continued, and should not be terminated by EOS. # for sequence-to-sequence problems, the input needs to be EOS-terminated ids += [eos_id] if len(ids) > sequence_length: ids = ids[:sequence_length] else: ids.extend([0] * (sequence_length - len(ids))) all_input_ids.append(ids) # pad to make an integral number of batches all_input_ids.extend([all_input_ids[0]] * (-n % batch_size)) padded_n = len(all_input_ids) all_input_ids = np.array(all_input_ids, dtype=np.int32) def input_fn(params): del params dataset = tf.data.Dataset.from_tensor_slices({"inputs": all_input_ids}) dataset = dataset.batch(batch_size, drop_remainder=True) return dataset result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) vocab_size = targets_vocabulary(vocabulary).vocab_size decodes = [] for i, result in enumerate(result_iter): output_ids = clean_decodes(list(result["outputs"]), vocab_size) output_string = targets_vocabulary(vocabulary).decode( [int(x) for x in output_ids]) decodes.append(output_string) if i & (i - 1) == 0: if i < len(inputs): # LOG every power of 2, don't log if it's padded input i >= len(inputs) tf.logging.info("decode %d input = %s" % (i, inputs[i])) tf.logging.info(" output = %s" % output_string) # BUG WORKAROUND - on TF1.13 and earlier, the output for each batch is # repeated a number of times equal to the number of cores. if len(decodes) == padded_n: tf.logging.info("number of decodes matches number of inputs") elif len(decodes) % padded_n == 0: num_cores = len(decodes) // padded_n tf.logging.info("output is repeated num_cores times - removing extras") def keep(i): return i % (batch_size * num_cores) < batch_size decodes = [d for i, d in enumerate(decodes) if keep(i)] else: raise ValueError("unexpected number of outputs") output_file = tf.gfile.Open(output_filename, "w") decodes = decodes[:n] for d in decodes: output_file.write(d) output_file.write("\n") output_file.close()
python
def decode_from_file(estimator, vocabulary, model_type, batch_size, sequence_length, checkpoint_path="", input_filename=gin.REQUIRED, output_filename=gin.REQUIRED, eos_id=1): """Decode from a text file. Args: estimator: a TPUEstimator vocabulary: a mtf.transformer.vocabulary.Vocabulary model_type: a string batch_size: an integer sequence_length: an integer (maximum decode length) checkpoint_path: an optional string input_filename: a string output_filename: a string eos_id: EOS id """ with tf.gfile.Open(input_filename) as f: text = f.read() records = text.split("\n") inputs = [record.strip() for record in records] # Strip the last empty line. if not inputs[-1]: inputs.pop() n = len(inputs) # encode all inputs all_input_ids = [] for line in inputs: ids = inputs_vocabulary(vocabulary).encode(line.strip()) if model_type != "lm": # for text2self problems, the inputs represent a partial sequence # to be continued, and should not be terminated by EOS. # for sequence-to-sequence problems, the input needs to be EOS-terminated ids += [eos_id] if len(ids) > sequence_length: ids = ids[:sequence_length] else: ids.extend([0] * (sequence_length - len(ids))) all_input_ids.append(ids) # pad to make an integral number of batches all_input_ids.extend([all_input_ids[0]] * (-n % batch_size)) padded_n = len(all_input_ids) all_input_ids = np.array(all_input_ids, dtype=np.int32) def input_fn(params): del params dataset = tf.data.Dataset.from_tensor_slices({"inputs": all_input_ids}) dataset = dataset.batch(batch_size, drop_remainder=True) return dataset result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) vocab_size = targets_vocabulary(vocabulary).vocab_size decodes = [] for i, result in enumerate(result_iter): output_ids = clean_decodes(list(result["outputs"]), vocab_size) output_string = targets_vocabulary(vocabulary).decode( [int(x) for x in output_ids]) decodes.append(output_string) if i & (i - 1) == 0: if i < len(inputs): # LOG every power of 2, don't log if it's padded input i >= len(inputs) tf.logging.info("decode %d input = %s" % (i, inputs[i])) tf.logging.info(" output = %s" % output_string) # BUG WORKAROUND - on TF1.13 and earlier, the output for each batch is # repeated a number of times equal to the number of cores. if len(decodes) == padded_n: tf.logging.info("number of decodes matches number of inputs") elif len(decodes) % padded_n == 0: num_cores = len(decodes) // padded_n tf.logging.info("output is repeated num_cores times - removing extras") def keep(i): return i % (batch_size * num_cores) < batch_size decodes = [d for i, d in enumerate(decodes) if keep(i)] else: raise ValueError("unexpected number of outputs") output_file = tf.gfile.Open(output_filename, "w") decodes = decodes[:n] for d in decodes: output_file.write(d) output_file.write("\n") output_file.close()
[ "def", "decode_from_file", "(", "estimator", ",", "vocabulary", ",", "model_type", ",", "batch_size", ",", "sequence_length", ",", "checkpoint_path", "=", "\"\"", ",", "input_filename", "=", "gin", ".", "REQUIRED", ",", "output_filename", "=", "gin", ".", "REQUI...
Decode from a text file. Args: estimator: a TPUEstimator vocabulary: a mtf.transformer.vocabulary.Vocabulary model_type: a string batch_size: an integer sequence_length: an integer (maximum decode length) checkpoint_path: an optional string input_filename: a string output_filename: a string eos_id: EOS id
[ "Decode", "from", "a", "text", "file", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L385-L473
train
222,777
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
clean_decodes
def clean_decodes(ids, vocab_size, eos_id=1): """Stop at EOS or padding or OOV. Args: ids: a list of integers vocab_size: an integer eos_id: EOS id Returns: a list of integers """ ret = [] for i in ids: if i == eos_id: break if i >= vocab_size: break ret.append(int(i)) return ret
python
def clean_decodes(ids, vocab_size, eos_id=1): """Stop at EOS or padding or OOV. Args: ids: a list of integers vocab_size: an integer eos_id: EOS id Returns: a list of integers """ ret = [] for i in ids: if i == eos_id: break if i >= vocab_size: break ret.append(int(i)) return ret
[ "def", "clean_decodes", "(", "ids", ",", "vocab_size", ",", "eos_id", "=", "1", ")", ":", "ret", "=", "[", "]", "for", "i", "in", "ids", ":", "if", "i", "==", "eos_id", ":", "break", "if", "i", ">=", "vocab_size", ":", "break", "ret", ".", "appen...
Stop at EOS or padding or OOV. Args: ids: a list of integers vocab_size: an integer eos_id: EOS id Returns: a list of integers
[ "Stop", "at", "EOS", "or", "padding", "or", "OOV", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L477-L495
train
222,778
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
auto_batch_size
def auto_batch_size(sequence_length, mesh_shape, layout_rules, tokens_per_split=2048): """Automatically compute batch size. Args: sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() tokens_per_split: an integer Returns: an integer """ num_splits = mtf.tensor_dim_to_mesh_dim_size( layout_rules, mesh_shape, mtf.Dimension("batch", 0)) ret = max(1, tokens_per_split // sequence_length) * num_splits tf.logging.info( "AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s" " sequence_length=%s batch_size=%s" % (tokens_per_split, num_splits, sequence_length, ret)) return ret
python
def auto_batch_size(sequence_length, mesh_shape, layout_rules, tokens_per_split=2048): """Automatically compute batch size. Args: sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() tokens_per_split: an integer Returns: an integer """ num_splits = mtf.tensor_dim_to_mesh_dim_size( layout_rules, mesh_shape, mtf.Dimension("batch", 0)) ret = max(1, tokens_per_split // sequence_length) * num_splits tf.logging.info( "AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s" " sequence_length=%s batch_size=%s" % (tokens_per_split, num_splits, sequence_length, ret)) return ret
[ "def", "auto_batch_size", "(", "sequence_length", ",", "mesh_shape", ",", "layout_rules", ",", "tokens_per_split", "=", "2048", ")", ":", "num_splits", "=", "mtf", ".", "tensor_dim_to_mesh_dim_size", "(", "layout_rules", ",", "mesh_shape", ",", "mtf", ".", "Dimens...
Automatically compute batch size. Args: sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() tokens_per_split: an integer Returns: an integer
[ "Automatically", "compute", "batch", "size", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L499-L520
train
222,779
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
evaluate
def evaluate(estimator, eval_args): """Runs evaluation on the latest model checkpoint & logs to tensorboard. Args: estimator: A tf.Estimator object. eval_args: Dictionary of {eval_name: (input_fn, eval_steps)} where eval_name is the name of the evaluation set, e.g. "train" or "val", input_fn is an input function returning a tuple (features, labels), and eval_steps is the number of steps for which to evaluate the model. If None, evaluates until input_fn raises an end-of-input exception. Returns: A dict of metric values from the evaluation. May be empty, e.g. if the training job has not yet saved a checkpoint or the checkpoint is deleted by the time the TPU worker initializes. """ values = {} # Default return value if evaluation fails. checkpoint_path = estimator.latest_checkpoint() if not checkpoint_path: # This is expected if the training job has not yet saved a checkpoint. return values tf.logging.info("Starting evaluation on checkpoint %s", checkpoint_path) for eval_name in eval_args: input_fn, eval_steps = eval_args[eval_name] metric_values = estimator.evaluate( input_fn, steps=eval_steps, name=eval_name, checkpoint_path=checkpoint_path) for key, val in metric_values.iteritems(): values[eval_name + "/" + key] = val tf.logging.info(values) return values
python
def evaluate(estimator, eval_args): """Runs evaluation on the latest model checkpoint & logs to tensorboard. Args: estimator: A tf.Estimator object. eval_args: Dictionary of {eval_name: (input_fn, eval_steps)} where eval_name is the name of the evaluation set, e.g. "train" or "val", input_fn is an input function returning a tuple (features, labels), and eval_steps is the number of steps for which to evaluate the model. If None, evaluates until input_fn raises an end-of-input exception. Returns: A dict of metric values from the evaluation. May be empty, e.g. if the training job has not yet saved a checkpoint or the checkpoint is deleted by the time the TPU worker initializes. """ values = {} # Default return value if evaluation fails. checkpoint_path = estimator.latest_checkpoint() if not checkpoint_path: # This is expected if the training job has not yet saved a checkpoint. return values tf.logging.info("Starting evaluation on checkpoint %s", checkpoint_path) for eval_name in eval_args: input_fn, eval_steps = eval_args[eval_name] metric_values = estimator.evaluate( input_fn, steps=eval_steps, name=eval_name, checkpoint_path=checkpoint_path) for key, val in metric_values.iteritems(): values[eval_name + "/" + key] = val tf.logging.info(values) return values
[ "def", "evaluate", "(", "estimator", ",", "eval_args", ")", ":", "values", "=", "{", "}", "# Default return value if evaluation fails.", "checkpoint_path", "=", "estimator", ".", "latest_checkpoint", "(", ")", "if", "not", "checkpoint_path", ":", "# This is expected i...
Runs evaluation on the latest model checkpoint & logs to tensorboard. Args: estimator: A tf.Estimator object. eval_args: Dictionary of {eval_name: (input_fn, eval_steps)} where eval_name is the name of the evaluation set, e.g. "train" or "val", input_fn is an input function returning a tuple (features, labels), and eval_steps is the number of steps for which to evaluate the model. If None, evaluates until input_fn raises an end-of-input exception. Returns: A dict of metric values from the evaluation. May be empty, e.g. if the training job has not yet saved a checkpoint or the checkpoint is deleted by the time the TPU worker initializes.
[ "Runs", "evaluation", "on", "the", "latest", "model", "checkpoint", "&", "logs", "to", "tensorboard", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L715-L750
train
222,780
tensorflow/mesh
mesh_tensorflow/simd_mesh_impl.py
_ring_2d
def _ring_2d(m, n): """Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs """ if m == 1: return [(0, i) for i in range(n)] if n == 1: return [(i, 0) for i in range(m)] if m % 2 != 0: tf.logging.warning("Odd dimension") return [(i % m, i // m) for i in range(n * m)] ret = [(0, 0)] for i in range(m // 2): for j in range(1, n): ret.append((2 * i, j)) for j in range(n-1, 0, -1): ret.append((2 * i + 1, j)) for i in range(m-1, 0, -1): ret.append((i, 0)) return ret
python
def _ring_2d(m, n): """Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs """ if m == 1: return [(0, i) for i in range(n)] if n == 1: return [(i, 0) for i in range(m)] if m % 2 != 0: tf.logging.warning("Odd dimension") return [(i % m, i // m) for i in range(n * m)] ret = [(0, 0)] for i in range(m // 2): for j in range(1, n): ret.append((2 * i, j)) for j in range(n-1, 0, -1): ret.append((2 * i + 1, j)) for i in range(m-1, 0, -1): ret.append((i, 0)) return ret
[ "def", "_ring_2d", "(", "m", ",", "n", ")", ":", "if", "m", "==", "1", ":", "return", "[", "(", "0", ",", "i", ")", "for", "i", "in", "range", "(", "n", ")", "]", "if", "n", "==", "1", ":", "return", "[", "(", "i", ",", "0", ")", "for",...
Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs
[ "Ring", "-", "order", "of", "a", "mxn", "mesh", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/simd_mesh_impl.py#L568-L592
train
222,781
tensorflow/mesh
mesh_tensorflow/simd_mesh_impl.py
tile_2d
def tile_2d(physical_shape, tile_shape, outer_name="outer", inner_name="inner", cores_name=None): """2D tiling of a 3d physical mesh. The "outer" mesh dimension corresponds to which tile. The "inner" mesh dimension corresponds to the position within a tile of processors. Optionally, if cores_name is specified, then a 3 dimensional logical mesh is returned, with the third dimension representing the two different cores within a chip. If cores_name is not specified, then the cores-in-a-chip dimension is folded into the inner dimension. TODO(noam): explain this better. Example: tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4]) The "inner" dimension has size 4x4x2=32 and corresponds to the position within a 4x4 tile of processors. The "outer" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8 tiles in the mesh. Args: physical_shape: a triple of integers [X, Y, cores] tile_shape: a pair outer_name: a string inner_name: a string cores_name: an optional string Returns: mesh_shape: a mtf.Shape logical_to_physical: a list """ logical_to_physical = [] p0, p1, p2 = physical_shape t0, t1 = tile_shape tile_ring = _ring_2d(t0, t1) tiles_ring = _ring_2d(p0 // t0, p1 // t1) for logical_pnum in range(p0 * p1 * p2): core_on_chip = logical_pnum % p2 logical_chip_num = logical_pnum // p2 logical_pos_in_tile = logical_chip_num % (t0 * t1) logical_tile_num = logical_chip_num // (t0 * t1) tile_i, tile_j = tile_ring[logical_pos_in_tile] tiles_i, tiles_j = tiles_ring[logical_tile_num] physical_pnum = core_on_chip + p2 * ( tile_i * p1 + tile_j + tiles_i * p1 * t0 + tiles_j * t1) logical_to_physical.append(physical_pnum) assert sorted(logical_to_physical) == list(range(p0 * p1 * p2)) tile_size = t0 * t1 * p2 num_tiles = p0 * p1 // (t0 * t1) if cores_name: mesh_shape = mtf.Shape( [mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(t0 * t1)), mtf.Dimension(cores_name, int(p2))]) else: mesh_shape = mtf.Shape( [mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(tile_size))]) return mesh_shape, logical_to_physical
python
def tile_2d(physical_shape, tile_shape, outer_name="outer", inner_name="inner", cores_name=None): """2D tiling of a 3d physical mesh. The "outer" mesh dimension corresponds to which tile. The "inner" mesh dimension corresponds to the position within a tile of processors. Optionally, if cores_name is specified, then a 3 dimensional logical mesh is returned, with the third dimension representing the two different cores within a chip. If cores_name is not specified, then the cores-in-a-chip dimension is folded into the inner dimension. TODO(noam): explain this better. Example: tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4]) The "inner" dimension has size 4x4x2=32 and corresponds to the position within a 4x4 tile of processors. The "outer" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8 tiles in the mesh. Args: physical_shape: a triple of integers [X, Y, cores] tile_shape: a pair outer_name: a string inner_name: a string cores_name: an optional string Returns: mesh_shape: a mtf.Shape logical_to_physical: a list """ logical_to_physical = [] p0, p1, p2 = physical_shape t0, t1 = tile_shape tile_ring = _ring_2d(t0, t1) tiles_ring = _ring_2d(p0 // t0, p1 // t1) for logical_pnum in range(p0 * p1 * p2): core_on_chip = logical_pnum % p2 logical_chip_num = logical_pnum // p2 logical_pos_in_tile = logical_chip_num % (t0 * t1) logical_tile_num = logical_chip_num // (t0 * t1) tile_i, tile_j = tile_ring[logical_pos_in_tile] tiles_i, tiles_j = tiles_ring[logical_tile_num] physical_pnum = core_on_chip + p2 * ( tile_i * p1 + tile_j + tiles_i * p1 * t0 + tiles_j * t1) logical_to_physical.append(physical_pnum) assert sorted(logical_to_physical) == list(range(p0 * p1 * p2)) tile_size = t0 * t1 * p2 num_tiles = p0 * p1 // (t0 * t1) if cores_name: mesh_shape = mtf.Shape( [mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(t0 * t1)), mtf.Dimension(cores_name, int(p2))]) else: mesh_shape = mtf.Shape( [mtf.Dimension(outer_name, int(num_tiles)), mtf.Dimension(inner_name, int(tile_size))]) return mesh_shape, logical_to_physical
[ "def", "tile_2d", "(", "physical_shape", ",", "tile_shape", ",", "outer_name", "=", "\"outer\"", ",", "inner_name", "=", "\"inner\"", ",", "cores_name", "=", "None", ")", ":", "logical_to_physical", "=", "[", "]", "p0", ",", "p1", ",", "p2", "=", "physical...
2D tiling of a 3d physical mesh. The "outer" mesh dimension corresponds to which tile. The "inner" mesh dimension corresponds to the position within a tile of processors. Optionally, if cores_name is specified, then a 3 dimensional logical mesh is returned, with the third dimension representing the two different cores within a chip. If cores_name is not specified, then the cores-in-a-chip dimension is folded into the inner dimension. TODO(noam): explain this better. Example: tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4]) The "inner" dimension has size 4x4x2=32 and corresponds to the position within a 4x4 tile of processors. The "outer" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8 tiles in the mesh. Args: physical_shape: a triple of integers [X, Y, cores] tile_shape: a pair outer_name: a string inner_name: a string cores_name: an optional string Returns: mesh_shape: a mtf.Shape logical_to_physical: a list
[ "2D", "tiling", "of", "a", "3d", "physical", "mesh", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/simd_mesh_impl.py#L595-L661
train
222,782
tensorflow/mesh
mesh_tensorflow/simd_mesh_impl.py
SimdMeshImpl.slice
def slice(self, tf_tensor, tensor_shape): """"Slice out the corresponding part of tensor given the pnum variable.""" tensor_layout = self.tensor_layout(tensor_shape) if tensor_layout.is_fully_replicated: return self.LaidOutTensor([tf_tensor]) else: slice_shape = self.slice_shape(tensor_shape) slice_begins = [ self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size) ] slice_begins_tensor = tf.stack(slice_begins) # slice on source device selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor) return self.LaidOutTensor( [tf.slice(tf_tensor, selected_slice_begin, slice_shape)])
python
def slice(self, tf_tensor, tensor_shape): """"Slice out the corresponding part of tensor given the pnum variable.""" tensor_layout = self.tensor_layout(tensor_shape) if tensor_layout.is_fully_replicated: return self.LaidOutTensor([tf_tensor]) else: slice_shape = self.slice_shape(tensor_shape) slice_begins = [ self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size) ] slice_begins_tensor = tf.stack(slice_begins) # slice on source device selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor) return self.LaidOutTensor( [tf.slice(tf_tensor, selected_slice_begin, slice_shape)])
[ "def", "slice", "(", "self", ",", "tf_tensor", ",", "tensor_shape", ")", ":", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "if", "tensor_layout", ".", "is_fully_replicated", ":", "return", "self", ".", "LaidOutTensor", "(", "["...
Slice out the corresponding part of tensor given the pnum variable.
[ "Slice", "out", "the", "corresponding", "part", "of", "tensor", "given", "the", "pnum", "variable", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/simd_mesh_impl.py#L443-L458
train
222,783
tensorflow/mesh
examples/mnist_dataset.py
read32
def read32(bytestream): """Read 4 bytes from bytestream as an unsigned 32-bit integer.""" dt = np.dtype(np.uint32).newbyteorder('>') return np.frombuffer(bytestream.read(4), dtype=dt)[0]
python
def read32(bytestream): """Read 4 bytes from bytestream as an unsigned 32-bit integer.""" dt = np.dtype(np.uint32).newbyteorder('>') return np.frombuffer(bytestream.read(4), dtype=dt)[0]
[ "def", "read32", "(", "bytestream", ")", ":", "dt", "=", "np", ".", "dtype", "(", "np", ".", "uint32", ")", ".", "newbyteorder", "(", "'>'", ")", "return", "np", ".", "frombuffer", "(", "bytestream", ".", "read", "(", "4", ")", ",", "dtype", "=", ...
Read 4 bytes from bytestream as an unsigned 32-bit integer.
[ "Read", "4", "bytes", "from", "bytestream", "as", "an", "unsigned", "32", "-", "bit", "integer", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist_dataset.py#L45-L48
train
222,784
tensorflow/mesh
examples/mnist_dataset.py
check_image_file_header
def check_image_file_header(filename): """Validate that filename corresponds to images for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_images, unused rows = read32(f) cols = read32(f) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name)) if rows != 28 or cols != 28: raise ValueError( 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' % (f.name, rows, cols))
python
def check_image_file_header(filename): """Validate that filename corresponds to images for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_images, unused rows = read32(f) cols = read32(f) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name)) if rows != 28 or cols != 28: raise ValueError( 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' % (f.name, rows, cols))
[ "def", "check_image_file_header", "(", "filename", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "magic", "=", "read32", "(", "f", ")", "read32", "(", "f", ")", "# num_images, unused", "rows", "="...
Validate that filename corresponds to images for the MNIST dataset.
[ "Validate", "that", "filename", "corresponds", "to", "images", "for", "the", "MNIST", "dataset", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist_dataset.py#L51-L64
train
222,785
tensorflow/mesh
examples/mnist_dataset.py
check_labels_file_header
def check_labels_file_header(filename): """Validate that filename corresponds to labels for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_items, unused if magic != 2049: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name))
python
def check_labels_file_header(filename): """Validate that filename corresponds to labels for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_items, unused if magic != 2049: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name))
[ "def", "check_labels_file_header", "(", "filename", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "magic", "=", "read32", "(", "f", ")", "read32", "(", "f", ")", "# num_items, unused", "if", "magi...
Validate that filename corresponds to labels for the MNIST dataset.
[ "Validate", "that", "filename", "corresponds", "to", "labels", "for", "the", "MNIST", "dataset", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist_dataset.py#L67-L74
train
222,786
tensorflow/mesh
examples/mnist_dataset.py
dataset
def dataset(directory, images_file, labels_file): """Download and parse MNIST dataset.""" images_file = download(directory, images_file) labels_file = download(directory, labels_file) check_image_file_header(images_file) check_labels_file_header(labels_file) def decode_image(image): # Normalize from [0, 255] to [0.0, 1.0] image = tf.decode_raw(image, tf.uint8) image = tf.cast(image, tf.float32) image = tf.reshape(image, [784]) return image / 255.0 def decode_label(label): label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8] label = tf.reshape(label, []) # label is a scalar return tf.to_int32(label) images = tf.data.FixedLengthRecordDataset( images_file, 28 * 28, header_bytes=16).map(decode_image) labels = tf.data.FixedLengthRecordDataset( labels_file, 1, header_bytes=8).map(decode_label) return tf.data.Dataset.zip((images, labels))
python
def dataset(directory, images_file, labels_file): """Download and parse MNIST dataset.""" images_file = download(directory, images_file) labels_file = download(directory, labels_file) check_image_file_header(images_file) check_labels_file_header(labels_file) def decode_image(image): # Normalize from [0, 255] to [0.0, 1.0] image = tf.decode_raw(image, tf.uint8) image = tf.cast(image, tf.float32) image = tf.reshape(image, [784]) return image / 255.0 def decode_label(label): label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8] label = tf.reshape(label, []) # label is a scalar return tf.to_int32(label) images = tf.data.FixedLengthRecordDataset( images_file, 28 * 28, header_bytes=16).map(decode_image) labels = tf.data.FixedLengthRecordDataset( labels_file, 1, header_bytes=8).map(decode_label) return tf.data.Dataset.zip((images, labels))
[ "def", "dataset", "(", "directory", ",", "images_file", ",", "labels_file", ")", ":", "images_file", "=", "download", "(", "directory", ",", "images_file", ")", "labels_file", "=", "download", "(", "directory", ",", "labels_file", ")", "check_image_file_header", ...
Download and parse MNIST dataset.
[ "Download", "and", "parse", "MNIST", "dataset", "." ]
3921196e5e43302e820da0a87329f25d7e2a3016
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist_dataset.py#L95-L120
train
222,787
mattjj/pyhsmm
pyhsmm/util/stats.py
sample_discrete
def sample_discrete(distn,size=[],dtype=np.int32): 'samples from a one-dimensional finite pmf' distn = np.atleast_1d(distn) assert (distn >=0).all() and distn.ndim == 1 if (0 == distn).all(): return np.random.randint(distn.shape[0],size=size) cumvals = np.cumsum(distn) return np.sum(np.array(random(size))[...,na] * cumvals[-1] > cumvals, axis=-1,dtype=dtype)
python
def sample_discrete(distn,size=[],dtype=np.int32): 'samples from a one-dimensional finite pmf' distn = np.atleast_1d(distn) assert (distn >=0).all() and distn.ndim == 1 if (0 == distn).all(): return np.random.randint(distn.shape[0],size=size) cumvals = np.cumsum(distn) return np.sum(np.array(random(size))[...,na] * cumvals[-1] > cumvals, axis=-1,dtype=dtype)
[ "def", "sample_discrete", "(", "distn", ",", "size", "=", "[", "]", ",", "dtype", "=", "np", ".", "int32", ")", ":", "distn", "=", "np", ".", "atleast_1d", "(", "distn", ")", "assert", "(", "distn", ">=", "0", ")", ".", "all", "(", ")", "and", ...
samples from a one-dimensional finite pmf
[ "samples", "from", "a", "one", "-", "dimensional", "finite", "pmf" ]
a9a39c2bfd539048e35877cb13283552eadc24e2
https://github.com/mattjj/pyhsmm/blob/a9a39c2bfd539048e35877cb13283552eadc24e2/pyhsmm/util/stats.py#L116-L123
train
222,788
mattjj/pyhsmm
pyhsmm/models.py
_HMMBase.used_states
def used_states(self): 'a list of the used states in the order they appear' c = itertools.count() canonical_ids = collections.defaultdict(lambda: next(c)) for s in self.states_list: for state in s.stateseq: canonical_ids[state] return list(map(operator.itemgetter(0), sorted(canonical_ids.items(),key=operator.itemgetter(1))))
python
def used_states(self): 'a list of the used states in the order they appear' c = itertools.count() canonical_ids = collections.defaultdict(lambda: next(c)) for s in self.states_list: for state in s.stateseq: canonical_ids[state] return list(map(operator.itemgetter(0), sorted(canonical_ids.items(),key=operator.itemgetter(1))))
[ "def", "used_states", "(", "self", ")", ":", "c", "=", "itertools", ".", "count", "(", ")", "canonical_ids", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "next", "(", "c", ")", ")", "for", "s", "in", "self", ".", "states_list", ":", "f...
a list of the used states in the order they appear
[ "a", "list", "of", "the", "used", "states", "in", "the", "order", "they", "appear" ]
a9a39c2bfd539048e35877cb13283552eadc24e2
https://github.com/mattjj/pyhsmm/blob/a9a39c2bfd539048e35877cb13283552eadc24e2/pyhsmm/models.py#L188-L196
train
222,789
mattjj/pyhsmm
pyhsmm/util/plot.py
plot_gaussian_2D
def plot_gaussian_2D(mu, lmbda, color='b', centermarker=True,label='',alpha=1.,ax=None,artists=None): ''' Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix. ''' assert len(mu) == 2 ax = ax if ax else plt.gca() # TODO use artists! t = np.hstack([np.arange(0,2*np.pi,0.01),0]) circle = np.vstack([np.sin(t),np.cos(t)]) ellipse = np.dot(np.linalg.cholesky(lmbda),circle) if artists is None: point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha) \ if centermarker else None line, = ax.plot(ellipse[0,:] + mu[0], ellipse[1,:] + mu[1],linestyle='-', linewidth=2,color=color,label=label,alpha=alpha) else: line, point = artists if centermarker: point.set_offsets(np.atleast_2d(mu)) line.set_xdata(ellipse[0,:] + mu[0]) line.set_ydata(ellipse[1,:] + mu[1]) line.set_alpha(alpha) line.set_color(color) return line, point
python
def plot_gaussian_2D(mu, lmbda, color='b', centermarker=True,label='',alpha=1.,ax=None,artists=None): ''' Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix. ''' assert len(mu) == 2 ax = ax if ax else plt.gca() # TODO use artists! t = np.hstack([np.arange(0,2*np.pi,0.01),0]) circle = np.vstack([np.sin(t),np.cos(t)]) ellipse = np.dot(np.linalg.cholesky(lmbda),circle) if artists is None: point = ax.scatter([mu[0]],[mu[1]],marker='D',color=color,s=4,alpha=alpha) \ if centermarker else None line, = ax.plot(ellipse[0,:] + mu[0], ellipse[1,:] + mu[1],linestyle='-', linewidth=2,color=color,label=label,alpha=alpha) else: line, point = artists if centermarker: point.set_offsets(np.atleast_2d(mu)) line.set_xdata(ellipse[0,:] + mu[0]) line.set_ydata(ellipse[1,:] + mu[1]) line.set_alpha(alpha) line.set_color(color) return line, point
[ "def", "plot_gaussian_2D", "(", "mu", ",", "lmbda", ",", "color", "=", "'b'", ",", "centermarker", "=", "True", ",", "label", "=", "''", ",", "alpha", "=", "1.", ",", "ax", "=", "None", ",", "artists", "=", "None", ")", ":", "assert", "len", "(", ...
Plots mean and cov ellipsoid into current axes. Must be 2D. lmbda is a covariance matrix.
[ "Plots", "mean", "and", "cov", "ellipsoid", "into", "current", "axes", ".", "Must", "be", "2D", ".", "lmbda", "is", "a", "covariance", "matrix", "." ]
a9a39c2bfd539048e35877cb13283552eadc24e2
https://github.com/mattjj/pyhsmm/blob/a9a39c2bfd539048e35877cb13283552eadc24e2/pyhsmm/util/plot.py#L7-L34
train
222,790
mattjj/pyhsmm
pyhsmm/basic/abstractions.py
DurationDistribution.resample_with_censoring
def resample_with_censoring(self,data=[],censored_data=[]): ''' censored_data is full of observations that were censored, meaning a value of x really could have been anything >= x, so this method samples them out to be at least that large ''' filled_in = self._uncensor_data(censored_data) return self.resample(data=combinedata((data,filled_in)))
python
def resample_with_censoring(self,data=[],censored_data=[]): ''' censored_data is full of observations that were censored, meaning a value of x really could have been anything >= x, so this method samples them out to be at least that large ''' filled_in = self._uncensor_data(censored_data) return self.resample(data=combinedata((data,filled_in)))
[ "def", "resample_with_censoring", "(", "self", ",", "data", "=", "[", "]", ",", "censored_data", "=", "[", "]", ")", ":", "filled_in", "=", "self", ".", "_uncensor_data", "(", "censored_data", ")", "return", "self", ".", "resample", "(", "data", "=", "co...
censored_data is full of observations that were censored, meaning a value of x really could have been anything >= x, so this method samples them out to be at least that large
[ "censored_data", "is", "full", "of", "observations", "that", "were", "censored", "meaning", "a", "value", "of", "x", "really", "could", "have", "been", "anything", ">", "=", "x", "so", "this", "method", "samples", "them", "out", "to", "be", "at", "least", ...
a9a39c2bfd539048e35877cb13283552eadc24e2
https://github.com/mattjj/pyhsmm/blob/a9a39c2bfd539048e35877cb13283552eadc24e2/pyhsmm/basic/abstractions.py#L70-L77
train
222,791
mattjj/pyhsmm
pyhsmm/util/general.py
scoreatpercentile
def scoreatpercentile(data,per,axis=0): 'like the function in scipy.stats but with an axis argument and works on arrays' a = np.sort(data,axis=axis) idx = per/100. * (data.shape[axis]-1) if (idx % 1 == 0): return a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] else: lowerweight = 1-(idx % 1) upperweight = (idx % 1) idx = int(np.floor(idx)) return lowerweight * a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] \ + upperweight * a[[slice(None) if ii != axis else idx+1 for ii in range(a.ndim)]]
python
def scoreatpercentile(data,per,axis=0): 'like the function in scipy.stats but with an axis argument and works on arrays' a = np.sort(data,axis=axis) idx = per/100. * (data.shape[axis]-1) if (idx % 1 == 0): return a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] else: lowerweight = 1-(idx % 1) upperweight = (idx % 1) idx = int(np.floor(idx)) return lowerweight * a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] \ + upperweight * a[[slice(None) if ii != axis else idx+1 for ii in range(a.ndim)]]
[ "def", "scoreatpercentile", "(", "data", ",", "per", ",", "axis", "=", "0", ")", ":", "a", "=", "np", ".", "sort", "(", "data", ",", "axis", "=", "axis", ")", "idx", "=", "per", "/", "100.", "*", "(", "data", ".", "shape", "[", "axis", "]", "...
like the function in scipy.stats but with an axis argument and works on arrays
[ "like", "the", "function", "in", "scipy", ".", "stats", "but", "with", "an", "axis", "argument", "and", "works", "on", "arrays" ]
a9a39c2bfd539048e35877cb13283552eadc24e2
https://github.com/mattjj/pyhsmm/blob/a9a39c2bfd539048e35877cb13283552eadc24e2/pyhsmm/util/general.py#L119-L131
train
222,792
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.content_type
def content_type(self, mime_type: Optional[MimeType] = None) -> str: """Get a random HTTP content type. :return: Content type. :Example: Content-Type: application/json """ fmt = self.__file.mime_type(type_=mime_type) return 'Content-Type: {}'.format(fmt)
python
def content_type(self, mime_type: Optional[MimeType] = None) -> str: """Get a random HTTP content type. :return: Content type. :Example: Content-Type: application/json """ fmt = self.__file.mime_type(type_=mime_type) return 'Content-Type: {}'.format(fmt)
[ "def", "content_type", "(", "self", ",", "mime_type", ":", "Optional", "[", "MimeType", "]", "=", "None", ")", "->", "str", ":", "fmt", "=", "self", ".", "__file", ".", "mime_type", "(", "type_", "=", "mime_type", ")", "return", "'Content-Type: {}'", "."...
Get a random HTTP content type. :return: Content type. :Example: Content-Type: application/json
[ "Get", "a", "random", "HTTP", "content", "type", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L46-L55
train
222,793
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.ip_v4
def ip_v4(self, with_port: bool = False) -> str: """Generate a random IPv4 address. :param with_port: Add port to IP. :return: Random IPv4 address. :Example: 19.121.223.58 """ ip = '.'.join(str(self.random.randint(0, 255)) for _ in range(4)) if with_port: ip += ':{}'.format(self.port()) return ip
python
def ip_v4(self, with_port: bool = False) -> str: """Generate a random IPv4 address. :param with_port: Add port to IP. :return: Random IPv4 address. :Example: 19.121.223.58 """ ip = '.'.join(str(self.random.randint(0, 255)) for _ in range(4)) if with_port: ip += ':{}'.format(self.port()) return ip
[ "def", "ip_v4", "(", "self", ",", "with_port", ":", "bool", "=", "False", ")", "->", "str", ":", "ip", "=", "'.'", ".", "join", "(", "str", "(", "self", ".", "random", ".", "randint", "(", "0", ",", "255", ")", ")", "for", "_", "in", "range", ...
Generate a random IPv4 address. :param with_port: Add port to IP. :return: Random IPv4 address. :Example: 19.121.223.58
[ "Generate", "a", "random", "IPv4", "address", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L87-L101
train
222,794
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.ip_v6
def ip_v6(self) -> str: """Generate a random IPv6 address. :return: Random IPv6 address. :Example: 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3 """ ipv6 = IPv6Address( self.random.randint( 0, 2 ** 128 - 1, ), ) return str(ipv6)
python
def ip_v6(self) -> str: """Generate a random IPv6 address. :return: Random IPv6 address. :Example: 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3 """ ipv6 = IPv6Address( self.random.randint( 0, 2 ** 128 - 1, ), ) return str(ipv6)
[ "def", "ip_v6", "(", "self", ")", "->", "str", ":", "ipv6", "=", "IPv6Address", "(", "self", ".", "random", ".", "randint", "(", "0", ",", "2", "**", "128", "-", "1", ",", ")", ",", ")", "return", "str", "(", "ipv6", ")" ]
Generate a random IPv6 address. :return: Random IPv6 address. :Example: 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3
[ "Generate", "a", "random", "IPv6", "address", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L103-L116
train
222,795
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.mac_address
def mac_address(self) -> str: """Generate a random MAC address. :return: Random MAC address. :Example: 00:16:3e:25:e7:b1 """ mac_hex = [ 0x00, 0x16, 0x3e, self.random.randint(0x00, 0x7f), self.random.randint(0x00, 0xff), self.random.randint(0x00, 0xff), ] mac = map(lambda x: '%02x' % x, mac_hex) return ':'.join(mac)
python
def mac_address(self) -> str: """Generate a random MAC address. :return: Random MAC address. :Example: 00:16:3e:25:e7:b1 """ mac_hex = [ 0x00, 0x16, 0x3e, self.random.randint(0x00, 0x7f), self.random.randint(0x00, 0xff), self.random.randint(0x00, 0xff), ] mac = map(lambda x: '%02x' % x, mac_hex) return ':'.join(mac)
[ "def", "mac_address", "(", "self", ")", "->", "str", ":", "mac_hex", "=", "[", "0x00", ",", "0x16", ",", "0x3e", ",", "self", ".", "random", ".", "randint", "(", "0x00", ",", "0x7f", ")", ",", "self", ".", "random", ".", "randint", "(", "0x00", "...
Generate a random MAC address. :return: Random MAC address. :Example: 00:16:3e:25:e7:b1
[ "Generate", "a", "random", "MAC", "address", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L118-L133
train
222,796
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.image_placeholder
def image_placeholder(width: Union[int, str] = 1920, height: Union[int, str] = 1080) -> str: """Generate a link to the image placeholder. :param width: Width of image. :param height: Height of image. :return: URL to image placeholder. """ url = 'http://placehold.it/{width}x{height}' return url.format(width=width, height=height)
python
def image_placeholder(width: Union[int, str] = 1920, height: Union[int, str] = 1080) -> str: """Generate a link to the image placeholder. :param width: Width of image. :param height: Height of image. :return: URL to image placeholder. """ url = 'http://placehold.it/{width}x{height}' return url.format(width=width, height=height)
[ "def", "image_placeholder", "(", "width", ":", "Union", "[", "int", ",", "str", "]", "=", "1920", ",", "height", ":", "Union", "[", "int", ",", "str", "]", "=", "1080", ")", "->", "str", ":", "url", "=", "'http://placehold.it/{width}x{height}'", "return"...
Generate a link to the image placeholder. :param width: Width of image. :param height: Height of image. :return: URL to image placeholder.
[ "Generate", "a", "link", "to", "the", "image", "placeholder", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L146-L155
train
222,797
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.hashtags
def hashtags(self, quantity: int = 4) -> Union[str, list]: """Generate a list of hashtags. :param quantity: The quantity of hashtags. :return: The list of hashtags. :raises NonEnumerableError: if category is not in Hashtag. :Example: ['#love', '#sky', '#nice'] """ tags = ['#' + self.random.choice(HASHTAGS) for _ in range(quantity)] if int(quantity) == 1: return tags[0] return tags
python
def hashtags(self, quantity: int = 4) -> Union[str, list]: """Generate a list of hashtags. :param quantity: The quantity of hashtags. :return: The list of hashtags. :raises NonEnumerableError: if category is not in Hashtag. :Example: ['#love', '#sky', '#nice'] """ tags = ['#' + self.random.choice(HASHTAGS) for _ in range(quantity)] if int(quantity) == 1: return tags[0] return tags
[ "def", "hashtags", "(", "self", ",", "quantity", ":", "int", "=", "4", ")", "->", "Union", "[", "str", ",", "list", "]", ":", "tags", "=", "[", "'#'", "+", "self", ".", "random", ".", "choice", "(", "HASHTAGS", ")", "for", "_", "in", "range", "...
Generate a list of hashtags. :param quantity: The quantity of hashtags. :return: The list of hashtags. :raises NonEnumerableError: if category is not in Hashtag. :Example: ['#love', '#sky', '#nice']
[ "Generate", "a", "list", "of", "hashtags", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L191-L207
train
222,798
lk-geimfari/mimesis
mimesis/providers/internet.py
Internet.home_page
def home_page(self, tld_type: Optional[TLDType] = None) -> str: """Generate a random home page. :param tld_type: TLD type. :return: Random home page. :Example: http://www.fontir.info """ resource = self.random.choice(USERNAMES) domain = self.top_level_domain( tld_type=tld_type, ) return 'http://www.{}{}'.format( resource, domain)
python
def home_page(self, tld_type: Optional[TLDType] = None) -> str: """Generate a random home page. :param tld_type: TLD type. :return: Random home page. :Example: http://www.fontir.info """ resource = self.random.choice(USERNAMES) domain = self.top_level_domain( tld_type=tld_type, ) return 'http://www.{}{}'.format( resource, domain)
[ "def", "home_page", "(", "self", ",", "tld_type", ":", "Optional", "[", "TLDType", "]", "=", "None", ")", "->", "str", ":", "resource", "=", "self", ".", "random", ".", "choice", "(", "USERNAMES", ")", "domain", "=", "self", ".", "top_level_domain", "(...
Generate a random home page. :param tld_type: TLD type. :return: Random home page. :Example: http://www.fontir.info
[ "Generate", "a", "random", "home", "page", "." ]
4b16ee7a8dba6281a904654a88dbb4b052869fc5
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/internet.py#L209-L224
train
222,799