diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..dcedaa21bc91eb125694ed9fd625ff5d90706e8a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses.py @@ -0,0 +1,25 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Loss operations for use in neural networks. + +Note: All the losses are added to the `GraphKeys.LOSSES` collection by default. + +API docstring: tensorflow.losses +""" + +# pylint: disable=wildcard-import +from tensorflow.python.ops.losses.losses_impl import * +from tensorflow.python.ops.losses.util import * +# pylint: enable=wildcard-import diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses_impl.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a2b27faa46b3f39d17d09f99640a53c9c41aa9 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/losses_impl.py @@ -0,0 +1,1102 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of Loss operations for use in neural networks.""" + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import confusion_matrix +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import weights_broadcast_ops +from tensorflow.python.ops.losses import util +from tensorflow.python.util import dispatch +from tensorflow.python.util.deprecation import deprecated_args +from tensorflow.python.util.deprecation import deprecated_argument_lookup +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["losses.Reduction"]) +class Reduction: + """Types of loss reduction. + + Contains the following values: + + * `NONE`: Un-reduced weighted losses with the same shape as input. + * `SUM`: Scalar sum of weighted losses. + * `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED. + * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. + * `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero + weights. DEPRECATED. + * `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED. + """ + + NONE = "none" + SUM = "weighted_sum" + SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size" + MEAN = "weighted_mean" + SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights" + SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS + + @classmethod + def all(cls): + return ( + cls.NONE, + cls.SUM, + cls.MEAN, + cls.SUM_OVER_BATCH_SIZE, + cls.SUM_OVER_NONZERO_WEIGHTS, + cls.SUM_BY_NONZERO_WEIGHTS) + + @classmethod + def validate(cls, key): + if key not in cls.all(): + raise ValueError(f"Invalid Reduction Key {key}. Key should be one of " + f"{cls.all()}.") + + +def _safe_mean(losses, num_present): + """Computes a safe mean of the losses. + + Args: + losses: `Tensor` whose elements contain individual loss measurements. + num_present: The number of measurable elements in `losses`. + + Returns: + A scalar representing the mean of `losses`. If `num_present` is zero, + then zero is returned. + """ + total_loss = math_ops.reduce_sum(losses) + return math_ops.div_no_nan(total_loss, num_present, name="value") + + +def _num_present(losses, weights, per_batch=False): + """Computes the number of elements in the loss function induced by `weights`. + + A given weights tensor induces different numbers of usable elements in the + `losses` tensor. The `weights` tensor is broadcast across `losses` for all + possible dimensions. For example, if `losses` is a tensor of dimension + `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is, + in effect, tiled to match the shape of `losses`. Following this effective + tile, the total number of present elements is the number of non-zero weights. + + Args: + losses: `Tensor` of shape `[batch_size, d1, ... dN]`. + weights: `Tensor` of shape `[]`, `[batch_size]` or + `[batch_size, d1, ... dK]`, where K < N. + per_batch: Whether to return the number of elements per batch or as a sum + total. + + Returns: + The number of present (non-zero) elements in the losses tensor. If + `per_batch` is `True`, the value is returned as a tensor of size + `[batch_size]`. Otherwise, a single scalar tensor is returned. + """ + if ((isinstance(weights, float) and weights != 0.0) or + (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access + and not math_ops.equal(weights, 0.0))): + return _num_elements(losses) + with ops.name_scope(None, "num_present", (losses, weights)) as scope: + weights = math_ops.cast(weights, dtype=dtypes.float32) + present = array_ops.where( + math_ops.equal(weights, 0.0), + array_ops.zeros_like(weights), + array_ops.ones_like(weights)) + present = weights_broadcast_ops.broadcast_weights(present, losses) + if per_batch: + return math_ops.reduce_sum( + present, + axis=math_ops.range(1, array_ops.rank(present)), + keepdims=True, + name=scope) + return math_ops.reduce_sum(present, name=scope) + + +def _num_elements(losses): + """Computes the number of elements in `losses` tensor.""" + with ops.name_scope(None, "num_elements", values=[losses]) as scope: + return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype) + + +@tf_export(v1=["losses.compute_weighted_loss"]) +@dispatch.add_dispatch_support +def compute_weighted_loss( + losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Computes the weighted loss. + + Args: + losses: `Tensor` of shape `[batch_size, d1, ... dN]`. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `losses`, and must be broadcastable to `losses` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + scope: the scope for the operations performed in computing the loss. + loss_collection: the loss will be added to these collections. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss `Tensor` of the same type as `losses`. If `reduction` is + `NONE`, this has the same shape as `losses`; otherwise, it is scalar. + + Raises: + ValueError: If `weights` is `None` or the shape is not compatible with + `losses`, or if the number of dimensions (rank) of either `losses` or + `weights` is missing. + + Note: + When calculating the gradient of a weighted loss contributions from + both `losses` and `weights` are considered. If your `weights` depend + on some model parameters but you do not want this to affect the loss + gradient, you need to apply `tf.stop_gradient` to `weights` before + passing them to `compute_weighted_loss`. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + Reduction.validate(reduction) + with ops.name_scope(scope, "weighted_loss", (losses, weights)): + # Save the `reduction` argument for loss normalization when distributing + # to multiple replicas. Used only for estimator + v1 optimizer flow. + ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access + + def compute_loss(losses, weights, loss_collection, reduction): + losses = ops.convert_to_tensor(losses) + input_dtype = losses.dtype + losses = math_ops.cast(losses, dtype=dtypes.float32) + weights = math_ops.cast(weights, dtype=dtypes.float32) + weighted_losses = math_ops.multiply(losses, weights) + if reduction == Reduction.NONE: + loss = weighted_losses + else: + loss = math_ops.reduce_sum(weighted_losses) + if reduction == Reduction.MEAN: + loss = _safe_mean( + loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights)) + elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or + reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS): + loss = _safe_mean(loss, _num_present(losses, weights)) + elif reduction == Reduction.SUM_OVER_BATCH_SIZE: + loss = _safe_mean(loss, _num_elements(losses)) + + # Convert the result back to the input type. + loss = math_ops.cast(loss, input_dtype) + util.add_loss(loss, loss_collection) + return loss + + # Skip the assert_broadcastable in XLA context because asserts are not + # supported so it only causes unnecessary ops. Also skip it because it uses + # a DenseToDenseSetOperation op that is incompatible with XLA when + # the shape(s) are dynamic. + if control_flow_ops.get_enclosing_xla_context() is not None: + return compute_loss(losses, weights, loss_collection, reduction) + else: + with ops.control_dependencies( + (weights_broadcast_ops.assert_broadcastable(weights, losses),)): + return compute_loss(losses, weights, loss_collection, reduction) + + +@tf_export(v1=["losses.absolute_difference"]) +@dispatch.add_dispatch_support +def absolute_difference( + labels, predictions, weights=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Adds an Absolute Difference loss to the training procedure. + + `weights` acts as a coefficient for the loss. If a scalar is provided, then + the loss is simply scaled by the given value. If `weights` is a `Tensor` of + shape `[batch_size]`, then the total loss for each sample of the batch is + rescaled by the corresponding element in the `weights` vector. If the shape of + `weights` matches the shape of `predictions`, then the loss of each + measurable element of `predictions` is scaled by the corresponding value of + `weights`. + + Args: + labels: The ground truth output tensor, same dimensions as 'predictions'. + predictions: The predicted outputs. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which this loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same + shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If the shape of `predictions` doesn't match that of + `labels` or if the shape of `weights` is invalid or if `labels` + or `predictions` is None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if predictions is None: + raise ValueError("Argument `predictions` must not be None.") + with ops.name_scope(scope, "absolute_difference", + (predictions, labels, weights)) as scope: + predictions = math_ops.cast(predictions, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + predictions.get_shape().assert_is_compatible_with(labels.get_shape()) + losses = math_ops.abs(math_ops.subtract(predictions, labels)) + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +@tf_export(v1=["losses.cosine_distance"]) +@dispatch.add_dispatch_support +@deprecated_args(None, "dim is deprecated, use axis instead", "dim") +def cosine_distance( + labels, predictions, axis=None, weights=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS, + dim=None): + """Adds a cosine-distance loss to the training procedure. + + Note that the function assumes that `predictions` and `labels` are already + unit-normalized. + + Args: + labels: `Tensor` whose shape matches 'predictions' + predictions: An arbitrary matrix. + axis: The dimension along which the cosine distance is computed. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which this loss will be added. + reduction: Type of reduction to apply to loss. + dim: The old (deprecated) name for `axis`. + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same + shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If `predictions` shape doesn't match `labels` shape, or + `axis`, `labels`, `predictions` or `weights` is `None`. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + axis = deprecated_argument_lookup("axis", axis, "dim", dim) + if axis is None: + raise ValueError("You must specify argument `axis`.") + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if predictions is None: + raise ValueError("Argument `predictions` must not be None.") + with ops.name_scope(scope, "cosine_distance_loss", + (predictions, labels, weights)) as scope: + predictions = math_ops.cast(predictions, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + predictions.get_shape().assert_is_compatible_with(labels.get_shape()) + + radial_diffs = math_ops.multiply(predictions, labels) + losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True) + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +@tf_export(v1=["losses.hinge_loss"]) +@dispatch.add_dispatch_support +def hinge_loss(labels, logits, weights=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Adds a hinge loss to the training procedure. + + Args: + labels: The ground truth output tensor. Its shape should match the shape of + logits. The values of the tensor are expected to be 0.0 or 1.0. Internally + the {0,1} labels are converted to {-1,1} when calculating the hinge loss. + logits: The logits, a float tensor. Note that logits are assumed to be + unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive + (resp. negative) binary prediction. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same + shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If the shapes of `logits` and `labels` don't match or + if `labels` or `logits` is None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if logits is None: + raise ValueError("Argument `logits` must not be None.") + with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope: + logits = math_ops.cast(logits, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + logits.get_shape().assert_is_compatible_with(labels.get_shape()) + # We first need to convert binary labels to -1/1 labels (as floats). + all_ones = array_ops.ones_like(labels) + labels = math_ops.subtract(2 * labels, all_ones) + losses = nn_ops.relu( + math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +@tf_export(v1=["losses.huber_loss"]) +@dispatch.add_dispatch_support +def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Adds a [Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) term to the training procedure. + + For each value x in `error=labels-predictions`, the following is calculated: + + ``` + 0.5 * x^2 if |x| <= d + 0.5 * d^2 + d * (|x| - d) if |x| > d + ``` + + where d is `delta`. + + `weights` acts as a coefficient for the loss. If a scalar is provided, then + the loss is simply scaled by the given value. If `weights` is a tensor of size + `[batch_size]`, then the total loss for each sample of the batch is rescaled + by the corresponding element in the `weights` vector. If the shape of + `weights` matches the shape of `predictions`, then the loss of each + measurable element of `predictions` is scaled by the corresponding value of + `weights`. + + Args: + labels: The ground truth output tensor, same dimensions as 'predictions'. + predictions: The predicted outputs. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + delta: `float`, the point where the huber loss function changes from a + quadratic to linear. + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same + shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If the shape of `predictions` doesn't match that of `labels` or + if the shape of `weights` is invalid. Also if `labels` or + `predictions` is None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if predictions is None: + raise ValueError("Argument `predictions` must not be None.") + with ops.name_scope(scope, "huber_loss", + (predictions, labels, weights)) as scope: + predictions = math_ops.cast(predictions, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + predictions.get_shape().assert_is_compatible_with(labels.get_shape()) + error = math_ops.subtract(predictions, labels) + abs_error = math_ops.abs(error) + quadratic = math_ops.minimum(abs_error, delta) + # The following expression is the same in value as + # tf.maximum(abs_error - delta, 0), but importantly the gradient for the + # expression when abs_error == delta is 0 (for tf.maximum it would be 1). + # This is necessary to avoid doubling the gradient, since there is already a + # nonzero contribution to the gradient from the quadratic term. + linear = math_ops.subtract(abs_error, quadratic) + losses = math_ops.add( + math_ops.multiply( + ops.convert_to_tensor(0.5, dtype=quadratic.dtype), + math_ops.multiply(quadratic, quadratic)), + math_ops.multiply(delta, linear)) + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +@tf_export(v1=["losses.log_loss"]) +@dispatch.add_dispatch_support +def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Adds a Log Loss term to the training procedure. + + `weights` acts as a coefficient for the loss. If a scalar is provided, then + the loss is simply scaled by the given value. If `weights` is a tensor of size + `[batch_size]`, then the total loss for each sample of the batch is rescaled + by the corresponding element in the `weights` vector. If the shape of + `weights` matches the shape of `predictions`, then the loss of each + measurable element of `predictions` is scaled by the corresponding value of + `weights`. + + Args: + labels: The ground truth output tensor, same dimensions as 'predictions'. + predictions: The predicted outputs. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + epsilon: A small increment to add to avoid taking a log of zero. + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same + shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If the shape of `predictions` doesn't match that of `labels` or + if the shape of `weights` is invalid. Also if `labels` or `predictions` + is None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if predictions is None: + raise ValueError("Argument `predictions` must not be None.") + with ops.name_scope(scope, "log_loss", + (predictions, labels, weights)) as scope: + predictions = math_ops.cast(predictions, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + predictions.get_shape().assert_is_compatible_with(labels.get_shape()) + losses = -math_ops.multiply( + labels, + math_ops.log(predictions + epsilon)) - math_ops.multiply( + (1 - labels), math_ops.log(1 - predictions + epsilon)) + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +# TODO(b/37208492): Add reduction arg. +@tf_export(v1=["losses.mean_pairwise_squared_error"]) +@dispatch.add_dispatch_support +def mean_pairwise_squared_error( + labels, predictions, weights=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES): + """Adds a pairwise-errors-squared loss to the training procedure. + + Unlike `mean_squared_error`, which is a measure of the differences between + corresponding elements of `predictions` and `labels`, + `mean_pairwise_squared_error` is a measure of the differences between pairs of + corresponding elements of `predictions` and `labels`. + + For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are + three pairs of differences are summed to compute the loss: + loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3 + + Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the + corresponding pairs are computed within each batch sample but not across + samples within a batch. For example, if `predictions` represents a batch of + 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs + is drawn from each image, but not across images. + + `weights` acts as a coefficient for the loss. If a scalar is provided, then + the loss is simply scaled by the given value. If `weights` is a tensor of size + `[batch_size]`, then the total loss for each sample of the batch is rescaled + by the corresponding element in the `weights` vector. + + Args: + labels: The ground truth output tensor, whose shape must match the shape of + `predictions`. + predictions: The predicted outputs, a tensor of size + `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in + `predictions`. + weights: Coefficients for the loss a scalar, a tensor of shape + `[batch_size]` or a tensor whose shape matches `predictions`. + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + + Returns: + A scalar `Tensor` that returns the weighted loss. + + Raises: + ValueError: If the shape of `predictions` doesn't match that of `labels` or + if the shape of `weights` is invalid. Also if `labels` or `predictions` + is None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if predictions is None: + raise ValueError("Argument `predictions` must not be None.") + with ops.name_scope(scope, "mean_pairwise_squared_error", + (predictions, labels, weights)) as scope: + weights = math_ops.cast(weights, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + + def compute_loss(labels, predictions, weights, loss_collection): + predictions = math_ops.cast(predictions, dtype=dtypes.float32) + predictions.get_shape().assert_is_compatible_with(labels.get_shape()) + + diffs = math_ops.subtract(predictions, labels) + + axis = math_ops.range(1, array_ops.rank(diffs)) + + sum_squares_diff_per_batch = math_ops.reduce_sum( + math_ops.square(diffs), axis=axis, keepdims=True) + num_present_per_batch = _num_present(diffs, weights, per_batch=True) + + term1 = 2.0 * math_ops.div_no_nan( + sum_squares_diff_per_batch, + math_ops.maximum(num_present_per_batch - 1, 0), + name="value") + + sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True) + term2 = 2.0 * math_ops.div_no_nan( + math_ops.square(sum_diff), + math_ops.maximum( + math_ops.multiply(num_present_per_batch, + num_present_per_batch - 1), 0), + name="value") + + weighted_losses = math_ops.multiply(term1 - term2, weights) + loss = math_ops.reduce_sum(weighted_losses) + + mean_loss = array_ops.where( + math_ops.reduce_sum(num_present_per_batch) > 0, + loss, + array_ops.zeros_like(loss), + name="value") + util.add_loss(mean_loss, loss_collection) + return mean_loss + + # Skip the assert_broadcastable in XLA context because asserts are not + # supported so it only causes unnecessary ops. Also skip it because it uses + # a DenseToDenseSetOperation op that is incompatible with XLA when + # the shape(s) are dynamic. + if control_flow_ops.get_enclosing_xla_context() is not None: + return compute_loss(labels, predictions, weights, loss_collection) + else: + with ops.control_dependencies( + (weights_broadcast_ops.assert_broadcastable(weights, labels),)): + return compute_loss(labels, predictions, weights, loss_collection) + + +@tf_export(v1=["losses.mean_squared_error"]) +@dispatch.add_dispatch_support +def mean_squared_error( + labels, predictions, weights=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Adds a Sum-of-Squares loss to the training procedure. + + `weights` acts as a coefficient for the loss. If a scalar is provided, then + the loss is simply scaled by the given value. If `weights` is a tensor of size + `[batch_size]`, then the total loss for each sample of the batch is rescaled + by the corresponding element in the `weights` vector. If the shape of + `weights` matches the shape of `predictions`, then the loss of each + measurable element of `predictions` is scaled by the corresponding value of + `weights`. + + Args: + labels: The ground truth output tensor, same dimensions as 'predictions'. + predictions: The predicted outputs. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `losses` dimension). + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same + shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If the shape of `predictions` doesn't match that of `labels` or + if the shape of `weights` is invalid. Also if `labels` or `predictions` + is None. + + @compatibility(TF2) + + `tf.compat.v1.losses.mean_squared_error` is mostly compatible with eager + execution and `tf.function`. But, the `loss_collection` argument is + ignored when executing eagerly and no loss will be written to the loss + collections. You will need to either hold on to the return value manually + or rely on `tf.keras.Model` loss tracking. + + + To switch to native TF2 style, instantiate the + `tf.keras.losses.MeanSquaredError` class and call the object instead. + + + #### Structural Mapping to Native TF2 + + Before: + + ```python + loss = tf.compat.v1.losses.mean_squared_error( + labels=labels, + predictions=predictions, + weights=weights, + reduction=reduction) + ``` + + After: + + ```python + loss_fn = tf.keras.losses.MeanSquaredError( + reduction=reduction) + loss = loss_fn( + y_true=labels, + y_pred=predictions, + sample_weight=weights) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :--------------- | :------------------------- | + | `labels` | `y_true` | In `__call__()` method | + | `predictions` | `y_pred` | In `__call__()` method | + | `weights` | `sample_weight` | In `__call__()` method. | + : : : The shape requirements for `sample_weight` is different from : + : : : `weights`. Please check the [argument definition][api_docs] for : + : : : details. : + | `scope` | Not supported | - | + | `loss_collection` | Not supported | Losses should be tracked | + : : : explicitly or with Keras APIs, for example, [add_loss][add_loss], : + : : : instead of via collections : + | `reduction` | `reduction` | In constructor. Value of | + : : : `tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE`, : + : : : `tf.compat.v1.losses.Reduction.SUM`, : + : : : `tf.compat.v1.losses.Reduction.NONE` in : + : : : `tf.compat.v1.losses.softmax_cross_entropy` correspond to : + : : : `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`, : + : : : `tf.keras.losses.Reduction.SUM`, : + : : : `tf.keras.losses.Reduction.NONE`, respectively. If you : + : : : used other value for `reduction`, including the default value : + : : : `tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS`, there is : + : : : no directly corresponding value. Please modify the loss : + : : : implementation manually. : + + [add_loss]:https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_loss + [api_docs]:https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanSquaredError#__call__ + + + #### Before & After Usage Example + + Before: + + >>> y_true = [1, 2, 3] + >>> y_pred = [1, 3, 5] + >>> weights = [0, 1, 0.25] + >>> # samples with zero-weight are excluded from calculation when `reduction` + >>> # argument is set to default value `Reduction.SUM_BY_NONZERO_WEIGHTS` + >>> tf.compat.v1.losses.mean_squared_error( + ... labels=y_true, + ... predictions=y_pred, + ... weights=weights).numpy() + 1.0 + + >>> tf.compat.v1.losses.mean_squared_error( + ... labels=y_true, + ... predictions=y_pred, + ... weights=weights, + ... reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE).numpy() + 0.66667 + + After: + + >>> y_true = [[1.0], [2.0], [3.0]] + >>> y_pred = [[1.0], [3.0], [5.0]] + >>> weights = [1, 1, 0.25] + >>> mse = tf.keras.losses.MeanSquaredError( + ... reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE) + >>> mse(y_true=y_true, y_pred=y_pred, sample_weight=weights).numpy() + 0.66667 + + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if predictions is None: + raise ValueError("Argument `predictions` must not be None.") + with ops.name_scope(scope, "mean_squared_error", + (predictions, labels, weights)) as scope: + predictions = math_ops.cast(predictions, dtype=dtypes.float32) + labels = math_ops.cast(labels, dtype=dtypes.float32) + predictions.get_shape().assert_is_compatible_with(labels.get_shape()) + losses = math_ops.squared_difference(predictions, labels) + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +@tf_export(v1=["losses.sigmoid_cross_entropy"]) +@dispatch.add_dispatch_support +def sigmoid_cross_entropy( + multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits. + + `weights` acts as a coefficient for the loss. If a scalar is provided, + then the loss is simply scaled by the given value. If `weights` is a + tensor of shape `[batch_size]`, then the loss weights apply to each + corresponding sample. + + If `label_smoothing` is nonzero, smooth the labels towards 1/2: + + new_multiclass_labels = multiclass_labels * (1 - label_smoothing) + + 0.5 * label_smoothing + + Args: + multi_class_labels: `[batch_size, num_classes]` target integer labels in + `{0, 1}`. + logits: Float `[batch_size, num_classes]` logits outputs of the network. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `multi_class_labels`, and must be broadcastable to `multi_class_labels` + (i.e., all dimensions must be either `1`, or the same as the + corresponding `losses` dimension). + label_smoothing: If greater than `0` then smooth the labels. + scope: The scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss `Tensor` of the same type as `logits`. If `reduction` is + `NONE`, this has the same shape as `logits`; otherwise, it is scalar. + + Raises: + ValueError: If the shape of `logits` doesn't match that of + `multi_class_labels` or if the shape of `weights` is invalid, or if + `weights` is None. Also if `multi_class_labels` or `logits` is None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if multi_class_labels is None: + raise ValueError("Argument `multi_class_labels` must not be None.") + if logits is None: + raise ValueError("Argument `logits` must not be None.") + with ops.name_scope(scope, "sigmoid_cross_entropy_loss", + (logits, multi_class_labels, weights)) as scope: + logits = ops.convert_to_tensor(logits) + multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype) + logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape()) + + if label_smoothing > 0: + multi_class_labels = (multi_class_labels * (1 - label_smoothing) + + 0.5 * label_smoothing) + + losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels, + logits=logits, + name="xentropy") + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +@tf_export(v1=["losses.softmax_cross_entropy"]) +@dispatch.add_dispatch_support +def softmax_cross_entropy( + onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + r"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2. + + `weights` acts as a coefficient for the loss. If a scalar is provided, + then the loss is simply scaled by the given value. If `weights` is a + tensor of shape `[batch_size]`, then the loss weights apply to each + corresponding sample. + + If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes: + new_onehot_labels = onehot_labels * (1 - label_smoothing) + + label_smoothing / num_classes + + Note that `onehot_labels` and `logits` must have the same shape, + e.g. `[batch_size, num_classes]`. The shape of `weights` must be + broadcastable to loss, whose shape is decided by the shape of `logits`. + In case the shape of `logits` is `[batch_size, num_classes]`, loss is + a `Tensor` of shape `[batch_size]`. + + Args: + onehot_labels: One-hot-encoded labels. + logits: Logits outputs of the network. + weights: Optional `Tensor` that is broadcastable to loss. + label_smoothing: If greater than 0 then smooth the labels. + scope: the scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss `Tensor` of the same type as `logits`. If `reduction` is + `NONE`, this has shape `[batch_size]`; otherwise, it is scalar. + + Raises: + ValueError: If the shape of `logits` doesn't match that of `onehot_labels` + or if the shape of `weights` is invalid or if `weights` is None. Also if + `onehot_labels` or `logits` is None. + + @compatibility(TF2) + + `tf.compat.v1.losses.softmax_cross_entropy` is mostly compatible with eager + execution and `tf.function`. But, the `loss_collection` argument is + ignored when executing eagerly and no loss will be written to the loss + collections. You will need to either hold on to the return value manually + or rely on `tf.keras.Model` loss tracking. + + + To switch to native TF2 style, instantiate the + `tf.keras.losses.CategoricalCrossentropy` class with `from_logits` set + as `True` and call the object instead. + + + #### Structural Mapping to Native TF2 + + Before: + + ```python + loss = tf.compat.v1.losses.softmax_cross_entropy( + onehot_labels=onehot_labels, + logits=logits, + weights=weights, + label_smoothing=smoothing) + ``` + + After: + + ```python + loss_fn = tf.keras.losses.CategoricalCrossentropy( + from_logits=True, + label_smoothing=smoothing) + loss = loss_fn( + y_true=onehot_labels, + y_pred=logits, + sample_weight=weights) + ``` + + #### How to Map Arguments + + | TF1 Arg Name | TF2 Arg Name | Note | + | :-------------------- | :--------------- | :------------------------- | + | - | `from_logits` | Set `from_logits` as True | + : : : to have identical behavior : + | `onehot_labels` | `y_true` | In `__call__()` method | + | `logits` | `y_pred` | In `__call__()` method | + | `weights` | `sample_weight` | In `__call__()` method | + | `label_smoothing` | `label_smoothing`| In constructor | + | `scope` | Not supported | - | + | `loss_collection` | Not supported | Losses should be tracked | + : : : explicitly or with Keras : + : : : APIs, for example, : + : : : [add_loss][add_loss], : + : : : instead of via collections : + | `reduction` | `reduction` | In constructor. Value of | + : : : `tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE`, : + : : : `tf.compat.v1.losses.Reduction.SUM`, : + : : : `tf.compat.v1.losses.Reduction.NONE` in : + : : : `tf.compat.v1.losses.softmax_cross_entropy` correspond to : + : : : `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`, : + : : : `tf.keras.losses.Reduction.SUM`, : + : : : `tf.keras.losses.Reduction.NONE`, respectively. If you : + : : : used other value for `reduction`, including the default value : + : : : `tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS`, there is : + : : : no directly corresponding value. Please modify the loss : + : : : implementation manually. : + + [add_loss]:https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_loss + + + #### Before & After Usage Example + + Before: + + >>> y_true = [[0, 1, 0], [0, 0, 1]] + >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] + >>> weights = [0.3, 0.7] + >>> smoothing = 0.2 + >>> tf.compat.v1.losses.softmax_cross_entropy(y_true, y_pred, weights=weights, + ... label_smoothing=smoothing).numpy() + 0.57618 + + After: + + >>> cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True, + ... label_smoothing=smoothing) + >>> cce(y_true, y_pred, sample_weight=weights).numpy() + 0.57618 + + @end_compatibility + """ + if onehot_labels is None: + raise ValueError("Argument `onehot_labels` must not be None.") + if logits is None: + raise ValueError("Argument `logits` must not be None.") + with ops.name_scope(scope, "softmax_cross_entropy_loss", + (logits, onehot_labels, weights)) as scope: + logits = ops.convert_to_tensor(logits) + onehot_labels = math_ops.cast(onehot_labels, logits.dtype) + logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape()) + + if label_smoothing > 0: + num_classes = math_ops.cast( + array_ops.shape(onehot_labels)[-1], logits.dtype) + smooth_positives = 1.0 - label_smoothing + smooth_negatives = label_smoothing / num_classes + onehot_labels = onehot_labels * smooth_positives + smooth_negatives + + onehot_labels = array_ops.stop_gradient( + onehot_labels, name="labels_stop_gradient") + losses = nn.softmax_cross_entropy_with_logits_v2( + labels=onehot_labels, logits=logits, name="xentropy") + + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) + + +# TODO(ptucker): Merge this with similar method in metrics_impl. +def _remove_squeezable_dimensions( + labels, predictions, weights=None, expected_rank_diff=0): + """Internal version of _remove_squeezable_dimensions which handles weights. + + Squeezes `predictions` and `labels` if their ranks differ from expected by + exactly 1. + Squeezes `weights` if its rank is 1 more than the new rank of `predictions` + + This will use static shape if available. Otherwise, it will add graph + operations, which could result in a performance hit. + + Args: + labels: Label values, a `Tensor` whose dimensions match `predictions`. + predictions: Predicted values, a `Tensor` of arbitrary dimensions. + weights: Optional weight `Tensor`. It will be squeezed if it's not scalar, + and its rank is 1 more than the new rank of `labels`. + expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. + + Returns: + Tuple of `predictions`, `labels` and `weights`, possibly with the last + dimension squeezed. + """ + labels, predictions = confusion_matrix.remove_squeezable_dimensions( + labels, predictions, expected_rank_diff=expected_rank_diff) + + if weights is not None: + weights = ops.convert_to_tensor(weights) + labels_rank = labels.get_shape().ndims + weights_shape = weights.get_shape() + weights_rank = weights_shape.ndims + + if (labels_rank is not None) and (weights_rank is not None): + # Use static rank. + rank_diff = weights_rank - labels_rank + if rank_diff == 1: + weights = array_ops.squeeze(weights, [-1]) + return labels, predictions, weights + + # Use dynamic rank. + rank_diff = array_ops.rank(weights) - array_ops.rank(labels) + if (weights_rank is None) or ( + weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)): + weights = cond.cond( + math_ops.equal(1, rank_diff), + lambda: array_ops.squeeze(weights, [-1]), + lambda: weights) + + return labels, predictions, weights + + +@tf_export(v1=["losses.sparse_softmax_cross_entropy"]) +@dispatch.add_dispatch_support +def sparse_softmax_cross_entropy( + labels, logits, weights=1.0, scope=None, + loss_collection=ops.GraphKeys.LOSSES, + reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): + """Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`. + + `weights` acts as a coefficient for the loss. If a scalar is provided, + then the loss is simply scaled by the given value. If `weights` is a + tensor of shape `[batch_size]`, then the loss weights apply to each + corresponding sample. + + Args: + labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of + `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` + must be an index in `[0, num_classes)`. Other values will raise an + exception when this op is run on CPU, and return `NaN` for corresponding + loss and gradient rows on GPU. + logits: Unscaled log probabilities of shape + `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or + `float64`. + weights: Coefficients for the loss. This must be scalar or broadcastable to + `labels` (i.e. same rank and each dimension is either 1 or the same). + scope: the scope for the operations performed in computing the loss. + loss_collection: collection to which the loss will be added. + reduction: Type of reduction to apply to loss. + + Returns: + Weighted loss `Tensor` of the same type as `logits`. If `reduction` is + `NONE`, this has the same shape as `labels`; otherwise, it is scalar. + + Raises: + ValueError: If the shapes of `logits`, `labels`, and `weights` are + incompatible, or if any of them are None. + + @compatibility(eager) + The `loss_collection` argument is ignored when executing eagerly. Consider + holding on to the return value or collecting losses via a `tf.keras.Model`. + @end_compatibility + """ + if labels is None: + raise ValueError("Argument `labels` must not be None.") + if logits is None: + raise ValueError("Argument `logits` must not be None.") + with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", + (logits, labels, weights)) as scope: + # As documented above in Args, labels contain class IDs and logits contains + # 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1; + # therefore, expected_rank_diff=1. + labels, logits, weights = _remove_squeezable_dimensions( + labels, logits, weights, expected_rank_diff=1) + losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels, + logits=logits, + name="xentropy") + return compute_weighted_loss( + losses, weights, scope, loss_collection, reduction=reduction) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/util.py new file mode 100644 index 0000000000000000000000000000000000000000..2678b3ee7cad72f1ee537658eb36aa237c19179d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/util.py @@ -0,0 +1,263 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for manipulating the loss collections.""" + +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import confusion_matrix +from tensorflow.python.ops import math_ops +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util.tf_export import tf_export + + +def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None): + """Squeeze or expand last dimension if needed. + + 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1 + (using `confusion_matrix.remove_squeezable_dimensions`). + 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1 + from the new rank of `y_pred`. + If `sample_weight` is scalar, it is kept scalar. + + This will use static shape if available. Otherwise, it will add graph + operations, which could result in a performance hit. + + Args: + y_pred: Predicted values, a `Tensor` of arbitrary dimensions. + y_true: Optional label `Tensor` whose dimensions match `y_pred`. + sample_weight: Optional weight scalar or `Tensor` whose dimensions match + `y_pred`. + + Returns: + Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has + the last dimension squeezed, + `sample_weight` could be extended by one dimension. + If `sample_weight` is None, (y_pred, y_true) is returned. + """ + y_pred_shape = y_pred.shape + y_pred_rank = y_pred_shape.ndims + if y_true is not None: + + # If sparse matrix is provided as `y_true`, the last dimension in `y_pred` + # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)), + # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3)) + # In this case, we should not try to remove squeezable dimension. + y_true_shape = y_true.shape + y_true_rank = y_true_shape.ndims + if (y_true_rank is not None) and (y_pred_rank is not None): + # Use static rank for `y_true` and `y_pred`. + if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1: + y_true, y_pred = confusion_matrix.remove_squeezable_dimensions( + y_true, y_pred) + else: + # Use dynamic rank. + rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true) + squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions( # pylint: disable=g-long-lambda + y_true, y_pred) + is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1]) + maybe_squeeze_dims = lambda: cond.cond( # pylint: disable=g-long-lambda + is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred)) + y_true, y_pred = cond.cond( + math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims) + + if sample_weight is None: + return y_pred, y_true + + weights_shape = sample_weight.shape + weights_rank = weights_shape.ndims + if weights_rank == 0: # If weights is scalar, do nothing. + return y_pred, y_true, sample_weight + + if (y_pred_rank is not None) and (weights_rank is not None): + # Use static rank. + if weights_rank - y_pred_rank == 1: + sample_weight = array_ops.squeeze(sample_weight, [-1]) + elif y_pred_rank - weights_rank == 1: + sample_weight = array_ops.expand_dims(sample_weight, [-1]) + return y_pred, y_true, sample_weight + + # Use dynamic rank. + weights_rank_tensor = array_ops.rank(sample_weight) + rank_diff = weights_rank_tensor - array_ops.rank(y_pred) + maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1]) + + def _maybe_expand_weights(): + expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1]) + return cond.cond( + math_ops.equal(rank_diff, -1), expand_weights, lambda: sample_weight) + + def _maybe_adjust_weights(): + return cond.cond( + math_ops.equal(rank_diff, 1), maybe_squeeze_weights, + _maybe_expand_weights) + + # squeeze or expand last dim of `sample_weight` if its rank differs by 1 + # from the new rank of `y_pred`. + sample_weight = cond.cond( + math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight, + _maybe_adjust_weights) + return y_pred, y_true, sample_weight + + +def scale_losses_by_sample_weight(losses, sample_weight): + """Scales loss values by the given sample weights. + + `sample_weight` dimensions are updated to match with the dimension of `losses` + if possible by using squeeze/expand/broadcast. + + Args: + losses: Loss tensor. + sample_weight: Sample weights tensor. + + Returns: + `losses` scaled by `sample_weight` with dtype float32. + """ + # TODO(psv): Handle the casting here in a better way, eg. if losses is float64 + # we do not want to lose precision. + losses = math_ops.cast(losses, dtypes.float32) + sample_weight = math_ops.cast(sample_weight, dtypes.float32) + + # Update dimensions of `sample_weight` to match with `losses` if possible. + losses, _, sample_weight = squeeze_or_expand_dimensions( + losses, None, sample_weight) + return math_ops.multiply(losses, sample_weight) + + +@tf_contextlib.contextmanager +def check_per_example_loss_rank(per_example_loss): + """Context manager that checks that the rank of per_example_loss is at least 1. + + Args: + per_example_loss: Per example loss tensor. + + Yields: + A context manager. + """ + loss_rank = per_example_loss.shape.rank + if loss_rank is not None: + # Handle static rank. + if loss_rank == 0: + raise ValueError( + "Invalid value passed for `per_example_loss`. Expected a tensor with " + f"at least rank 1. Received per_example_loss={per_example_loss} with " + f"rank {loss_rank}") + yield + else: + # Handle dynamic rank. + with ops.control_dependencies([ + check_ops.assert_greater_equal( + array_ops.rank(per_example_loss), + math_ops.cast(1, dtype=dtypes.int32), + message="Invalid value passed for `per_example_loss`. Expected a " + "tensor with at least rank 1.") + ]): + yield + + +@tf_export(v1=["losses.add_loss"]) +def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES): + """Adds a externally defined loss to the collection of losses. + + Args: + loss: A loss `Tensor`. + loss_collection: Optional collection to add the loss to. + """ + # Since we have no way of figuring out when a training iteration starts or + # ends, holding on to a loss when executing eagerly is indistinguishable from + # leaking memory. We instead leave the collection empty. + if loss_collection and not context.executing_eagerly(): + ops.add_to_collection(loss_collection, loss) + + +@tf_export(v1=["losses.get_losses"]) +def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES): + """Gets the list of losses from the loss_collection. + + Args: + scope: An optional scope name for filtering the losses to return. + loss_collection: Optional losses collection. + + Returns: + a list of loss tensors. + """ + return ops.get_collection(loss_collection, scope) + + +@tf_export(v1=["losses.get_regularization_losses"]) +def get_regularization_losses(scope=None): + """Gets the list of regularization losses. + + Args: + scope: An optional scope name for filtering the losses to return. + + Returns: + A list of regularization losses as Tensors. + """ + return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope) + + +@tf_export(v1=["losses.get_regularization_loss"]) +def get_regularization_loss(scope=None, name="total_regularization_loss"): + """Gets the total regularization loss. + + Args: + scope: An optional scope name for filtering the losses to return. + name: The name of the returned tensor. + + Returns: + A scalar regularization loss. + """ + losses = get_regularization_losses(scope) + if losses: + return math_ops.add_n(losses, name=name) + else: + return constant_op.constant(0.0) + + +@tf_export(v1=["losses.get_total_loss"]) +def get_total_loss(add_regularization_losses=True, + name="total_loss", + scope=None): + """Returns a tensor whose value represents the total loss. + + In particular, this adds any losses you have added with `tf.add_loss()` to + any regularization losses that have been added by regularization parameters + on layers constructors e.g. `tf.layers`. Be very sure to use this if you + are constructing a loss_op manually. Otherwise regularization arguments + on `tf.layers` methods will not function. + + Args: + add_regularization_losses: A boolean indicating whether or not to use the + regularization losses in the sum. + name: The name of the returned tensor. + scope: An optional scope name for filtering the losses to return. Note that + this filters the losses added with `tf.add_loss()` as well as the + regularization losses to that scope. + + Returns: + A `Tensor` whose value represents the total loss. + + Raises: + ValueError: if `losses` is not iterable. + """ + losses = get_losses(scope=scope) + if add_regularization_losses: + losses += get_regularization_losses(scope=scope) + return math_ops.add_n(losses, name=name) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15191f3b9341efd435a3c00d86d21d9fd1fba434 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__init__.py @@ -0,0 +1,171 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""# tf.experimental.numpy: NumPy API on TensorFlow. + +This module provides a subset of NumPy API, built on top of TensorFlow +operations. APIs are based on and have been tested with NumPy 1.16 version. + +The set of supported APIs may be expanded over time. Also future releases may +change the baseline version of NumPy API being supported. A list of some +systematic differences with NumPy is listed later in the "Differences with +NumPy" section. + +## Getting Started + +Please also see [TensorFlow NumPy Guide]( +https://www.tensorflow.org/guide/tf_numpy). + +In the code snippets below, we will assume that `tf.experimental.numpy` is +imported as `tnp` and NumPy is imported as `np` + +```python +print(tnp.ones([2,1]) + np.ones([1, 2])) +``` + +## Types + +The module provides an `ndarray` class which wraps an immutable `tf.Tensor`. +Additional functions are provided which accept array-like objects. Here +array-like objects include `ndarrays` as defined by this module, as well as +`tf.Tensor`, in addition to types accepted by NumPy. + +A subset of NumPy dtypes are supported. Type promotion* follows NumPy +semantics. + +**Note**: A new type promotion that offers a lot of advantages over the old +type promotion is now available. Learn more about enabling the new +type promotion +[here](https://www.tensorflow.org/guide/tf_numpy_type_promotion). + +```python +print(tnp.ones([1, 2], dtype=tnp.int16) + tnp.ones([2, 1], dtype=tnp.uint8)) +``` + +## Array Interface + +The `ndarray` class implements the `__array__` interface. This should allow +these objects to be passed into contexts that expect a NumPy or array-like +object (e.g. matplotlib). + +```python +np.sum(tnp.ones([1, 2]) + np.ones([2, 1])) +``` + + +## TF Interoperability + +The TF-NumPy API calls can be interleaved with TensorFlow calls +without incurring Tensor data copies. This is true even if the `ndarray` or +`tf.Tensor` is placed on a non-CPU device. + +In general, the expected behavior should be on par with that of code involving +`tf.Tensor` and running stateless TensorFlow functions on them. + +```python +tnp.sum(tnp.ones([1, 2]) + tf.ones([2, 1])) +``` + +Note that the `__array_priority__` is currently chosen to be lower than +`tf.Tensor`. Hence the `+` operator above returns a `tf.Tensor`. + +Additional examples of interoperability include: + +* using `with tf.GradientTape()` scope to compute gradients through the + TF-NumPy API calls. +* using `tf.distribution.Strategy` scope for distributed execution +* using `tf.vectorized_map()` for speeding up code using auto-vectorization + + + +## Device Support + +Given that `ndarray` and functions wrap TensorFlow constructs, the code will +have GPU and TPU support on par with TensorFlow. Device placement can be +controlled by using `with tf.device` scopes. Note that these devices could +be local or remote. + +```python +with tf.device("GPU:0"): + x = tnp.ones([1, 2]) +print(tf.convert_to_tensor(x).device) +``` + +## Graph and Eager Modes + +Eager mode execution should typically match NumPy semantics of executing +op-by-op. However the same code can be executed in graph mode, by putting it +inside a `tf.function`. The function body can contain NumPy code, and the inputs +can be `ndarray` as well. + +```python +@tf.function +def f(x, y): + return tnp.sum(x + y) + +f(tnp.ones([1, 2]), tf.ones([2, 1])) +``` +Python control flow based on `ndarray` values will be translated by +[autograph](https://www.tensorflow.org/code/tensorflow/python/autograph/g3doc/reference/index.md) +into `tf.cond` and `tf.while_loop` constructs. The code can be XLA compiled +for further optimizations. + +However, note that graph mode execution can change behavior of certain +operations since symbolic execution may not have information that is computed +during runtime. Some differences are: + +* Shapes can be incomplete or unknown in graph mode. This means that + `ndarray.shape`, `ndarray.size` and `ndarray.ndim` can return `ndarray` + objects instead of returning integer (or tuple of integer) values. +* `__len__`, `__iter__` and `__index__` properties of `ndarray` + may similarly not be supported in graph mode. Code using these + may need to change to explicit shape operations or control flow + constructs. +* Also note the [autograph limitations]( +https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md). + + +## Mutation and Variables + +`ndarrays` currently wrap immutable `tf.Tensor`. Hence mutation +operations like slice assigns are not supported. This may change in the future. +Note however that one can directly construct a `tf.Variable` and use that with +the TF-NumPy APIs. + +```python +tf_var = tf.Variable(2.0) +tf_var.assign_add(tnp.square(tf_var)) +``` + +## Differences with NumPy + +Here is a non-exhaustive list of differences: + +* Not all dtypes are currently supported. e.g. `np.float96`, `np.float128`. + `np.object_`, `np.str_`, `np.recarray` types are not supported. +* `ndarray` storage is in C order only. Fortran order, views, `stride_tricks` + are not supported. +* Only a subset of functions and modules are supported. This set will be + expanded over time. For supported functions, some arguments or argument + values may not be supported. These differences are generally provided in the + function comments. Full `ufunc` support is also not provided. +* Buffer mutation is currently not supported. `ndarrays` wrap immutable + tensors. This means that output buffer arguments (e.g. `out` in ufuncs) are + not supported. +* NumPy C API is not supported. NumPy's Cython and Swig integration are not + supported. + +API docstring: tensorflow.experimental.numpy +""" +# TODO(wangpeng): Append `tf_export`ed symbols to the comments above. diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fca2d34fa2632228caf3c31939d289fbadfffab2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_array_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca493bf052254c148cb40c17af53f7a04ac87012 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_array_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_arrays.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_arrays.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9e3991dc46d9180e47da36f25677434115b7f7d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_arrays.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_config.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b401202893378e85383a7dee26284a4e34c514ab Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_config.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_dtypes.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6cd17c39e2a7c660cd4b37f6e9b3bd7cde1e3a9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_dtypes.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_math_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_math_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e713d8d11f0d63c90c94877373aafc98f81060e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_math_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_random.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c05cf7ae6d2d01f9e7fdccaa39db0e132e62a00 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_random.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35f3cba0ea555de4b9bfcfdb6b179563cfd4483a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/__pycache__/np_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_array_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2784104ff94025fcefe46ba8faa13b0af9349ff9 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_array_ops.py @@ -0,0 +1,2116 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common array methods.""" +# pylint: disable=g-direct-tensorflow-import + +import builtins +import enum +import functools +import math +import numbers + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import clip_ops +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import manip_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sort_ops +from tensorflow.python.ops.numpy_ops import np_arrays +from tensorflow.python.ops.numpy_ops import np_dtypes +from tensorflow.python.ops.numpy_ops import np_utils +from tensorflow.python.types import core as core_tf_types +from tensorflow.python.util import nest +from tensorflow.python.util import tf_export + + +newaxis = np.newaxis +tf_export.tf_export('experimental.numpy.newaxis', v1=[]).export_constant( + __name__, 'newaxis' +) + + +@tf_export.tf_export('experimental.numpy.empty', v1=[]) +@np_utils.np_doc('empty') +def empty(shape, dtype=float): # pylint: disable=redefined-outer-name + return zeros(shape, dtype) + + +@tf_export.tf_export('experimental.numpy.empty_like', v1=[]) +@np_utils.np_doc('empty_like') +def empty_like(a, dtype=None): + return zeros_like(a, dtype) + + +@tf_export.tf_export('experimental.numpy.zeros', v1=[]) +@np_utils.np_doc('zeros') +def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name + dtype = ( + np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type() + ) + return array_ops.zeros(shape, dtype=dtype) + + +@tf_export.tf_export('experimental.numpy.zeros_like', v1=[]) +@np_utils.np_doc('zeros_like') +def zeros_like(a, dtype=None): # pylint: disable=missing-docstring + dtype = np_utils.result_type_unary(a, dtype) + + dtype = dtypes.as_dtype(dtype) # Work around b/149877262 + return array_ops.zeros_like(a, dtype) + + +@tf_export.tf_export('experimental.numpy.ones', v1=[]) +@np_utils.np_doc('ones') +def ones(shape, dtype=float): # pylint: disable=redefined-outer-name + if dtype: + dtype = np_utils.result_type(dtype) + return array_ops.ones(shape, dtype=dtype) + + +@tf_export.tf_export('experimental.numpy.ones_like', v1=[]) +@np_utils.np_doc('ones_like') +def ones_like(a, dtype=None): + dtype = np_utils.result_type_unary(a, dtype) + return array_ops.ones_like(a, dtype) + + +@tf_export.tf_export('experimental.numpy.eye', v1=[]) +@np_utils.np_doc('eye') +def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring + if dtype: + dtype = np_utils.result_type(dtype) + if not M: + M = N + # Making sure N, M and k are `int` + N = int(N) + M = int(M) + k = int(k) + if k >= M or -k >= N: + # tf.linalg.diag will raise an error in this case + return zeros([N, M], dtype=dtype) + if k == 0: + return linalg_ops.eye(N, M, dtype=dtype) + # We need the precise length, otherwise tf.linalg.diag will raise an error + diag_len = builtins.min(N, M) + if k > 0: + if N >= M: + diag_len -= k + elif N + k > M: + diag_len = M - k + elif k <= 0: + if M >= N: + diag_len += k + elif M - k > N: + diag_len = N + k + diagonal_ = array_ops.ones([diag_len], dtype=dtype) + return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k) + + +@tf_export.tf_export('experimental.numpy.identity', v1=[]) +@np_utils.np_doc('identity') +def identity(n, dtype=float): + return eye(N=n, M=n, dtype=dtype) + + +@tf_export.tf_export('experimental.numpy.full', v1=[]) +@np_utils.np_doc('full') +def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name + if not isinstance(shape, np_arrays.ndarray): + shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32)) + shape = atleast_1d(shape) + fill_value = asarray(fill_value, dtype=dtype) + return array_ops.broadcast_to(fill_value, shape) + + +# Using doc only here since np full_like signature doesn't seem to have the +# shape argument (even though it exists in the documentation online). +@tf_export.tf_export('experimental.numpy.full_like', v1=[]) +@np_utils.np_doc_only('full_like') +def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name + """order, subok and shape arguments mustn't be changed.""" + if order != 'K': + raise ValueError('Non-standard orders are not supported.') + if not subok: + raise ValueError('subok being False is not supported.') + if shape: + raise ValueError('Overriding the shape is not supported.') + + a = asarray(a) + dtype = dtype or np_utils.result_type(a) + fill_value = asarray(fill_value, dtype=dtype) + return array_ops.broadcast_to(fill_value, array_ops.shape(a)) + + +def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name + """Main implementation of np.array().""" + result_t = val + + if not isinstance(result_t, tensor_lib.Tensor): + dtype = np_utils.result_type_unary(result_t, dtype) + # We can't call `convert_to_tensor(result_t, dtype=dtype)` here because + # convert_to_tensor doesn't allow incompatible arguments such as (5.5, int) + # while np.array allows them. We need to convert-then-cast. + + # EagerTensor conversion complains about "mixed types" when converting + # tensors with no dtype information. This is because it infers types based + # on one selected item in the list. So e.g. when converting [2., 2j] + # to a tensor, it will select float32 as the inferred type and not be able + # to convert the list to a float 32 tensor. + # Since we have some information about the final dtype we care about, we + # supply that information so that convert_to_tensor will do best-effort + # conversion to that dtype first. + result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype) + result_t = math_ops.cast(result_t, dtype=dtype) + elif dtype: + result_t = math_ops.cast(result_t, dtype) + + if copy: + result_t = array_ops.identity(result_t) + + max_ndmin = 32 + if ndmin > max_ndmin: + raise ValueError( + f'ndmin bigger than allowable number of dimensions: {max_ndmin}.' + ) + + if ndmin == 0: + return result_t + + ndims = array_ops.rank(result_t) + + def true_fn(): + old_shape = array_ops.shape(result_t) + new_shape = array_ops.concat( + [array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0 + ) + return array_ops.reshape(result_t, new_shape) + + result_t = np_utils.cond( + np_utils.greater(ndmin, ndims), true_fn, lambda: result_t + ) + return result_t + + +# TODO(wangpeng): investigate whether we can make `copy` default to False. +# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args +@tf_export.tf_export('experimental.numpy.array', v1=[]) +@np_utils.np_doc_only('array') +def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name + """Since Tensors are immutable, a copy is made only if val is placed on a + + different device than the current one. Even if `copy` is False, a new Tensor + may need to be built to satisfy `dtype` and `ndim`. This is used only if `val` + is an ndarray or a Tensor. + """ # pylint:disable=g-docstring-missing-newline + if dtype: + dtype = np_utils.result_type(dtype) + return _array_internal(val, dtype, copy, ndmin) + + +# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args + + +@tf_export.tf_export('experimental.numpy.asarray', v1=[]) +@np_utils.np_doc('asarray') +def asarray(a, dtype=None): + if dtype: + dtype = np_utils.result_type(dtype) + if isinstance(a, np_arrays.ndarray) and ( + not dtype or dtype == a.dtype.as_numpy_dtype + ): + return a + return array(a, dtype, copy=False) + + +@tf_export.tf_export('experimental.numpy.asanyarray', v1=[]) +@np_utils.np_doc('asanyarray') +def asanyarray(a, dtype=None): + return asarray(a, dtype) + + +@tf_export.tf_export('experimental.numpy.ascontiguousarray', v1=[]) +@np_utils.np_doc('ascontiguousarray') +def ascontiguousarray(a, dtype=None): + return array(a, dtype, ndmin=1) + + +# Numerical ranges. +@tf_export.tf_export('experimental.numpy.arange', v1=[]) +@np_utils.np_doc('arange') +def arange(start, stop=None, step=1, dtype=None): + """Returns `step`-separated values in the range [start, stop). + + Args: + start: Start of the interval. Included in the range. + stop: End of the interval. If not specified, `start` is treated as 0 and + `start` value is used as `stop`. If specified, it is not included in the + range if `step` is integer. When `step` is floating point, it may or may + not be included. + step: The difference between 2 consecutive values in the output range. It is + recommended to use `linspace` instead of using non-integer values for + `step`. + dtype: Optional. Type of the resulting ndarray. Could be a python type, a + NumPy type or a TensorFlow `DType`. If not provided, the largest type of + `start`, `stop`, `step` is used. + + Raises: + ValueError: If step is zero. + """ + if not step: + raise ValueError('step must be non-zero.') + if dtype: + dtype = np_utils.result_type(dtype) + else: + if stop is None: + dtype = np_utils.result_type(start, step) + else: + dtype = np_utils.result_type(start, step, stop) + if step > 0 and ( + (stop is not None and start > stop) or (stop is None and start < 0) + ): + return array([], dtype=dtype) + if step < 0 and ( + (stop is not None and start < stop) or (stop is None and start > 0) + ): + return array([], dtype=dtype) + # TODO(srbs): There are some bugs when start or stop is float type and dtype + # is integer type. + return math_ops.cast( + math_ops.range(start, limit=stop, delta=step), dtype=dtype + ) + + +# Building matrices. +@tf_export.tf_export('experimental.numpy.diag', v1=[]) +@np_utils.np_doc('diag') +def diag(v, k=0): # pylint: disable=missing-docstring + """Raises an error if input is not 1- or 2-d.""" + v = asarray(v) + v_rank = array_ops.rank(v) + + v.shape.with_rank_at_most(2) + + # TODO(nareshmodi): Consider a np_utils.Assert version that will fail during + # tracing time if the shape is known. + control_flow_assert.Assert( + np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)), + [v_rank], + ) + + def _diag(v, k): + return np_utils.cond( + math_ops.equal(array_ops.size(v), 0), + lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype), + lambda: array_ops.matrix_diag(v, k=k), + ) + + def _diag_part(v, k): + v_shape = array_ops.shape(v) + v, k = np_utils.cond( + np_utils.logical_or( + np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)), + np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)), + ), + lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), + lambda: (v, k), + ) + result = array_ops.matrix_diag_part(v, k=k) + return result + + result = np_utils.cond( + math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k) + ) + return result + + +@tf_export.tf_export('experimental.numpy.diagonal', v1=[]) +@np_utils.np_doc('diagonal') +def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring + a = asarray(a) + + maybe_rank = a.shape.rank + if ( + maybe_rank is not None + and offset == 0 + and (axis1 == maybe_rank - 2 or axis1 == -2) + and (axis2 == maybe_rank - 1 or axis2 == -1) + ): + return array_ops.matrix_diag_part(a) + + a = moveaxis(a, (axis1, axis2), (-2, -1)) + + a_shape = array_ops.shape(a) + + def _zeros(): # pylint: disable=missing-docstring + return ( + array_ops.zeros( + array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype + ), + 0, + ) + + # All zeros since diag_part doesn't handle all possible k (aka offset). + # Written this way since cond will run shape inference on both branches, + # and diag_part shape inference will fail when offset is out of bounds. + a, offset = np_utils.cond( + np_utils.logical_or( + np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)), + np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)), + ), + _zeros, + lambda: (a, offset), + ) + + a = array_ops.matrix_diag_part(a, k=offset) + return a + + +@tf_export.tf_export('experimental.numpy.diagflat', v1=[]) +@np_utils.np_doc('diagflat') +def diagflat(v, k=0): + v = asarray(v) + return diag(array_ops.reshape(v, [-1]), k) + + +def _promote_dtype(*arrays): + dtype = np_utils.result_type(*arrays) + + def _fast_asarray(a): + if isinstance(a, np_arrays.ndarray) and dtype == a.dtype.as_numpy_dtype: + return a + return _array_internal(a, dtype=dtype, copy=False) + + return [_fast_asarray(a) for a in arrays] + + +def _promote_dtype_binary(t1, t2): + dtype = np_utils._result_type_binary(t1, t2) # pylint: disable=protected-access + if not ( + isinstance(t1, np_arrays.ndarray) and dtype == t1.dtype.as_numpy_dtype + ): + t1 = _array_internal(t1, dtype=dtype, copy=False) + if not ( + isinstance(t2, np_arrays.ndarray) and dtype == t2.dtype.as_numpy_dtype + ): + t2 = _array_internal(t2, dtype=dtype, copy=False) + return t1, t2 + + +@tf_export.tf_export('experimental.numpy.all', v1=[]) +@np_utils.np_doc('all') +def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin + a = asarray(a, dtype=bool) + return math_ops.reduce_all(input_tensor=a, axis=axis, keepdims=keepdims) + + +@tf_export.tf_export('experimental.numpy.any', v1=[]) +@np_utils.np_doc('any') +def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin + a = asarray(a, dtype=bool) + return math_ops.reduce_any(input_tensor=a, axis=axis, keepdims=keepdims) + + +@tf_export.tf_export('experimental.numpy.compress', v1=[]) +@np_utils.np_doc('compress') +def compress(condition, a, axis=None): # pylint: disable=redefined-outer-name,missing-function-docstring + condition = asarray(condition, dtype=bool) + a = asarray(a) + + if condition.ndim != 1: + raise ValueError('condition must be a 1-d array.') + # `np.compress` treats scalars as 1-d arrays. + if a.ndim == 0: + a = ravel(a) + + if axis is None: + a = ravel(a) + axis = 0 + + if axis < 0: + axis += a.ndim + + assert axis >= 0 and axis < a.ndim + + # `tf.boolean_mask` requires the first dimensions of array and condition to + # match. `np.compress` pads condition with False when it is shorter. + condition_t = condition + a_t = a + if condition.shape[0] < a.shape[axis]: + padding = array_ops.fill([a.shape[axis] - condition.shape[0]], False) + condition_t = array_ops.concat([condition_t, padding], axis=0) + return array_ops.boolean_mask(tensor=a_t, mask=condition_t, axis=axis) + + +@tf_export.tf_export('experimental.numpy.copy', v1=[]) +@np_utils.np_doc('copy') +def copy(a): + return array(a, copy=True) + + +def _maybe_promote_to_int(a): + if dtypes.as_dtype(a.dtype).is_integer: + # If a is an integer type and its precision is less than that of `int`, + # the output type will be `int`. + a_numpy_dtype = a.dtype.as_numpy_dtype + output_type = np.promote_types(a_numpy_dtype, int) + if output_type != a_numpy_dtype: + a = asarray(a, dtype=output_type) + + return a + + +@tf_export.tf_export('experimental.numpy.cumprod', v1=[]) +@np_utils.np_doc('cumprod') +def cumprod(a, axis=None, dtype=None): # pylint: disable=missing-docstring + a = asarray(a, dtype=dtype) + + if dtype is None: + a = _maybe_promote_to_int(a) + + # If axis is None, the input is flattened. + if axis is None: + a = ravel(a) + axis = 0 + elif axis < 0: + axis += array_ops.rank(a) + return math_ops.cumprod(a, axis) + + +@tf_export.tf_export('experimental.numpy.cumsum', v1=[]) +@np_utils.np_doc('cumsum') +def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring + a = asarray(a, dtype=dtype) + + if dtype is None: + a = _maybe_promote_to_int(a) + + # If axis is None, the input is flattened. + if axis is None: + a = ravel(a) + axis = 0 + elif axis < 0: + axis += array_ops.rank(a) + return math_ops.cumsum(a, axis) + + +@tf_export.tf_export('experimental.numpy.imag', v1=[]) +@np_utils.np_doc('imag') +def imag(val): + val = asarray(val) + # TODO(srbs): np.imag returns a scalar if `val` is a scalar, whereas we always + # return an ndarray. + return math_ops.imag(val) + + +_TO_INT_ = 0 +_TO_FLOAT = 1 + + +def _reduce( + tf_fn, + a, + axis=None, + dtype=None, + keepdims=None, + promote_int=_TO_INT_, + tf_bool_fn=None, + preserve_bool=False, +): + """A general reduction function. + + Args: + tf_fn: the TF reduction function. + a: the array to be reduced. + axis: (optional) the axis along which to do the reduction. If None, all + dimensions are reduced. + dtype: (optional) the dtype of the result. + keepdims: (optional) whether to keep the reduced dimension(s). + promote_int: how to promote integer and bool inputs. There are three + choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2) + `_TO_FLOAT` always promotes them to a float type (determined by + dtypes.default_float_type); (3) None: don't promote. + tf_bool_fn: (optional) the TF reduction function for bool inputs. It will + only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype + is `np.bool_` and `preserve_bool` is True. + preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype + is `np.bool_` (some reductions such as np.sum convert bools to integers, + while others such as np.max preserve bools. + + Returns: + An ndarray. + """ + if dtype: + dtype = np_utils.result_type(dtype) + if keepdims is None: + keepdims = False + a = asarray(a, dtype=dtype) + if ( + dtype == np.bool_ or preserve_bool and a.dtype == np.bool_ + ) and tf_bool_fn is not None: + return tf_bool_fn(input_tensor=a, axis=axis, keepdims=keepdims) + if dtype is None: + dtype = a.dtype.as_numpy_dtype + if np.issubdtype(dtype, np.integer) or dtype == np.bool_: + if promote_int == _TO_INT_: + # If a is an integer/bool type and whose bit width is less than np.int_, + # numpy up-casts it to np.int_ based on the documentation at + # https://numpy.org/doc/1.18/reference/generated/numpy.sum.html + if dtype == np.bool_: + is_signed = True + width = 8 # We can use any number here that is less than 64 + else: + is_signed = np.issubdtype(dtype, np.signedinteger) + width = np.iinfo(dtype).bits + # Numpy int_ and uint are defined as 'long' and 'unsigned long', so + # should have the same bit width. + if ops.is_auto_dtype_conversion_enabled(): + # We default to 32 bits when using auto dtype conversion semantics. + if width < np.iinfo(np.int32).bits: + if is_signed: + dtype = np.int32 + else: + dtype = np.uint32 + else: + if width < np.iinfo(np.int_).bits: + if is_signed: + dtype = np.int_ + else: + dtype = np.uint + a = math_ops.cast(a, dtype) + elif promote_int == _TO_FLOAT: + # Use a default float type. + a = math_ops.cast(a, np_utils.result_type(float)) + + if isinstance(axis, tensor_lib.Tensor) and axis.dtype not in ( + dtypes.int32, + dtypes.int64, + ): + axis = math_ops.cast(axis, dtypes.int64) + + return tf_fn(input_tensor=a, axis=axis, keepdims=keepdims) + + +# TODO (DarrenZhang01): Add `axis` support to the `size` API. +@tf_export.tf_export('experimental.numpy.size', v1=[]) +@np_utils.np_doc('size') +def size(x, axis=None): # pylint: disable=missing-docstring + if axis is not None: + raise NotImplementedError( + 'axis argument is not supported in the current `np.size` implementation' + ) + if isinstance(x, (int, float, np.int32, np.int64, np.float32, np.float64)): + return 1 + x = asarray(x) + if x.shape.is_fully_defined(): + return np.prod(x.shape.as_list(), dtype=int) + else: + return array_ops.size_v2(x) + + +@tf_export.tf_export('experimental.numpy.sum', v1=[]) +@np_utils.np_doc('sum') +def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin + return _reduce( + math_ops.reduce_sum, + a, + axis=axis, + dtype=dtype, + keepdims=keepdims, + tf_bool_fn=math_ops.reduce_any, + ) + + +@tf_export.tf_export('experimental.numpy.prod', v1=[]) +@np_utils.np_doc('prod') +def prod(a, axis=None, dtype=None, keepdims=None): + return _reduce( + math_ops.reduce_prod, + a, + axis=axis, + dtype=dtype, + keepdims=keepdims, + tf_bool_fn=math_ops.reduce_all, + ) + + +@tf_export.tf_export('experimental.numpy.mean', v1=[]) +@np_utils.np_doc('mean', unsupported_params=['out']) +def mean(a, axis=None, dtype=None, out=None, keepdims=None): + if out is not None: + raise ValueError('Setting out is not supported.') + return _reduce( + math_ops.reduce_mean, + a, + axis=axis, + dtype=dtype, + keepdims=keepdims, + promote_int=_TO_FLOAT, + ) + + +@tf_export.tf_export('experimental.numpy.amax', v1=[]) +@np_utils.np_doc('amax', unsupported_params=['out']) +def amax(a, axis=None, out=None, keepdims=None): + if out is not None: + raise ValueError('Setting out is not supported.') + return _reduce( + math_ops.reduce_max, + a, + axis=axis, + dtype=None, + keepdims=keepdims, + promote_int=None, + tf_bool_fn=math_ops.reduce_any, + preserve_bool=True, + ) + + +@tf_export.tf_export('experimental.numpy.amin', v1=[]) +@np_utils.np_doc('amin', unsupported_params=['out']) +def amin(a, axis=None, out=None, keepdims=None): + if out is not None: + raise ValueError('Setting out is not supported.') + return _reduce( + math_ops.reduce_min, + a, + axis=axis, + dtype=None, + keepdims=keepdims, + promote_int=None, + tf_bool_fn=math_ops.reduce_all, + preserve_bool=True, + ) + + +@tf_export.tf_export('experimental.numpy.var', v1=[]) +@np_utils.np_doc('var') +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None): # pylint: disable=missing-docstring + if dtype: + working_dtype = np_utils.result_type(a, dtype) + else: + working_dtype = None + if out is not None: + raise ValueError('Setting out is not supported.') + if ddof != 0: + # TF reduce_variance doesn't support ddof, so calculate it using raw ops. + def reduce_fn(input_tensor, axis, keepdims): + means = math_ops.reduce_mean(input_tensor, axis=axis, keepdims=True) + centered = input_tensor - means + if input_tensor.dtype in (dtypes.complex64, dtypes.complex128): + centered = math_ops.cast( + math_ops.real(centered * math_ops.conj(centered)), + input_tensor.dtype, + ) + else: + centered = math_ops.square(centered) + squared_deviations = math_ops.reduce_sum( + centered, axis=axis, keepdims=keepdims + ) + + if axis is None: + n = array_ops.size(input_tensor) + else: + if axis < 0: + axis += array_ops.rank(input_tensor) + n = math_ops.reduce_prod( + array_ops.gather(array_ops.shape(input_tensor), axis) + ) + n = math_ops.cast(n - ddof, input_tensor.dtype) + + return math_ops.cast(math_ops.divide(squared_deviations, n), dtype) + + else: + reduce_fn = math_ops.reduce_variance + + result = _reduce( + reduce_fn, + a, + axis=axis, + dtype=working_dtype, + keepdims=keepdims, + promote_int=_TO_FLOAT, + ) + if dtype: + result = math_ops.cast(result, dtype) + return result + + +@tf_export.tf_export('experimental.numpy.std', v1=[]) +@np_utils.np_doc('std') +def std(a, axis=None, keepdims=None): # pylint: disable=missing-function-docstring + return _reduce( + math_ops.reduce_std, + a, + axis=axis, + dtype=None, + keepdims=keepdims, + promote_int=_TO_FLOAT, + ) + + +@tf_export.tf_export('experimental.numpy.ravel', v1=[]) +@np_utils.np_doc('ravel') +def ravel(a): # pylint: disable=missing-docstring + a = asarray(a) + return array_ops.reshape(a, [-1]) + + +@tf_export.tf_export('experimental.numpy.real', v1=[]) +@np_utils.np_doc('real') +def real(val): + val = asarray(val) + # TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always + # return an ndarray. + return math_ops.real(val) + + +@tf_export.tf_export('experimental.numpy.repeat', v1=[]) +@np_utils.np_doc('repeat') +def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring + a = asarray(a) + original_shape = a._shape_as_list() # pylint: disable=protected-access + # Best effort recovery of the shape. + known_shape = original_shape is not None and None not in original_shape + if known_shape: + if not original_shape: + original_shape = (repeats,) + else: + repeats_np = np.ravel(np.array(repeats)) + if repeats_np.size == 1: + repeats_np = repeats_np.item() + if axis is None: + original_shape = (repeats_np * np.prod(original_shape),) + else: + original_shape[axis] = repeats_np * original_shape[axis] + else: + if axis is None: + original_shape = (repeats_np.sum(),) + else: + original_shape[axis] = repeats_np.sum() + + repeats = asarray(repeats) + result = array_ops.repeat(a, repeats, axis) + if known_shape: + result.set_shape(original_shape) + + return result + + +@tf_export.tf_export('experimental.numpy.around', v1=[]) +@np_utils.np_doc('around') +def around(a, decimals=0): # pylint: disable=missing-docstring + a = asarray(a) + dtype = a.dtype.as_numpy_dtype + factor = math.pow(10, decimals) + if np.issubdtype(dtype, np.inexact): + factor = math_ops.cast(factor, dtype) + else: + # Use float as the working dtype when a.dtype is exact (e.g. integer), + # because `decimals` can be negative. + float_dtype = np_utils.result_type(float) + a = a.astype(float_dtype) + factor = math_ops.cast(factor, float_dtype) + a = math_ops.multiply(a, factor) + a = math_ops.round(a) + a = math_ops.divide(a, factor) + return a.astype(dtype) + + +setattr(np_arrays.ndarray, '__round__', around) + + +@tf_export.tf_export('experimental.numpy.reshape', v1=[]) +@np_utils.np_doc('reshape') +def reshape(a, newshape, order='C'): + """order argument can only b 'C' or 'F'.""" + if order not in {'C', 'F'}: + raise ValueError('Unsupported order argument {}'.format(order)) + + a = asarray(a) + if isinstance(newshape, int): + newshape = [newshape] + + if order == 'F': + r = array_ops.transpose( + array_ops.reshape(array_ops.transpose(a), newshape[::-1]) + ) + else: + r = array_ops.reshape(a, newshape) + + return r + + +def _reshape_method_wrapper(a, *newshape, **kwargs): + order = kwargs.pop('order', 'C') + if kwargs: + raise ValueError('Unsupported arguments: {}'.format(kwargs.keys())) + + if len(newshape) == 1 and not isinstance(newshape[0], int): + newshape = newshape[0] + + return reshape(a, newshape, order=order) + + +@tf_export.tf_export('experimental.numpy.expand_dims', v1=[]) +@np_utils.np_doc('expand_dims') +def expand_dims(a, axis): + a = asarray(a) + return array_ops.expand_dims(a, axis=axis) + + +@tf_export.tf_export('experimental.numpy.squeeze', v1=[]) +@np_utils.np_doc('squeeze') +def squeeze(a, axis=None): + a = asarray(a) + return array_ops.squeeze(a, axis) + + +@tf_export.tf_export('experimental.numpy.flatten', v1=[]) +@np_utils.np_doc('flatten', link=np_utils.NoLink()) +def flatten(a, order='C'): + a = asarray(a) + if order == 'C' or order == 'A' or order == 'K': + # Row major. + return array_ops.reshape(a, [-1]) + elif order == 'F': + # Column major + return array_ops.reshape(array_ops.transpose(a), [-1]) + else: + raise ValueError( + 'order can only be C, A, K (all row major) or F (column major).' + ) + + +@tf_export.tf_export('experimental.numpy.transpose', v1=[]) +@np_utils.np_doc('transpose') +def transpose(a, axes=None): + a = asarray(a) + if axes is not None: + axes = asarray(axes) + return array_ops.transpose(a=a, perm=axes) + + +@tf_export.tf_export('experimental.numpy.swapaxes', v1=[]) +@np_utils.np_doc('swapaxes') +def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring + a = asarray(a) + + def adjust_axes(axes, rank): + def f(x): + if isinstance(x, int): + if x < 0: + x = x + rank + else: + x = array_ops.where_v2(x < 0, np_utils.add(x, a_rank), x) + return x + + return nest.map_structure(f, axes) + + if ( + a.shape.rank is not None + and isinstance(axis1, int) + and isinstance(axis2, int) + ): + # This branch makes sure `perm` is statically known, to avoid a + # not-compile-time-constant XLA error. + a_rank = a.shape.rank + axis1, axis2 = adjust_axes((axis1, axis2), a_rank) + perm = list(range(a_rank)) + perm[axis1] = axis2 + perm[axis2] = axis1 + else: + a_rank = array_ops.rank(a) + axis1, axis2 = adjust_axes((axis1, axis2), a_rank) + perm = math_ops.range(a_rank) + perm = array_ops.tensor_scatter_update( + perm, [[axis1], [axis2]], [axis2, axis1] + ) + a = array_ops.transpose(a, perm) + return a + + +@tf_export.tf_export('experimental.numpy.moveaxis', v1=[]) +@np_utils.np_doc('moveaxis') +def moveaxis(a, source, destination): # pylint: disable=missing-docstring + """Raises ValueError if source, destination not in (-ndim(a), ndim(a)).""" + if not source and not destination: + return a + + a = asarray(a) + + if isinstance(source, int): + source = (source,) + if isinstance(destination, int): + destination = (destination,) + if len(source) != len(destination): + raise ValueError('The lengths of source and destination must equal') + + a_rank = np_utils._maybe_static(array_ops.rank(a)) # pylint: disable=protected-access + + def _correct_axis(axis, rank): + if axis < 0: + return axis + rank + return axis + + source = tuple(_correct_axis(axis, a_rank) for axis in source) + destination = tuple(_correct_axis(axis, a_rank) for axis in destination) + + if a.shape.rank is not None: + perm = [i for i in range(a_rank) if i not in source] + for dest, src in sorted(zip(destination, source)): + assert dest <= len(perm) + perm.insert(dest, src) + else: + r = math_ops.range(a_rank) + + def _remove_indices(a, b): + """Remove indices (`b`) from `a`.""" + items = array_ops_stack.unstack( + sort_ops.sort(array_ops_stack.stack(b)), num=len(b) + ) + + i = 0 + result = [] + + for item in items: + result.append(a[i:item]) + i = item + 1 + + result.append(a[i:]) + + return array_ops.concat(result, 0) + + minus_sources = _remove_indices(r, source) + minus_dest = _remove_indices(r, destination) + + perm = array_ops.scatter_nd( + array_ops.expand_dims(minus_dest, 1), minus_sources, [a_rank] + ) + perm = array_ops.tensor_scatter_update( + perm, array_ops.expand_dims(destination, 1), source + ) + a = array_ops.transpose(a, perm) + + return a + + +@tf_export.tf_export('experimental.numpy.pad', v1=[]) +@np_utils.np_doc('pad') +def pad(array, pad_width, mode, **kwargs): # pylint: disable=redefined-outer-name + """Only supports modes 'constant', 'reflect' and 'symmetric' currently.""" + constant_values = kwargs.get('constant_values', 0) + if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'): + raise ValueError('Unsupported padding mode: ' + mode) + mode = mode.upper() + array = asarray(array) + pad_width = asarray(pad_width, dtype=dtypes.int32) + return array_ops.pad( + tensor=array, + paddings=pad_width, + mode=mode, + constant_values=constant_values, + ) + + +@tf_export.tf_export('experimental.numpy.take', v1=[]) +@np_utils.np_doc('take') +def take(a, indices, axis=None, out=None, mode='clip'): + """out argument is not supported, and default mode is clip.""" + if out is not None: + raise ValueError('out argument is not supported in take.') + + if mode not in {'raise', 'clip', 'wrap'}: + raise ValueError("Invalid mode '{}' for take".format(mode)) + + a = asarray(a) + indices = asarray(indices) + + if axis is None: + a = array_ops.reshape(a, [-1]) + axis = 0 + + axis_size = array_ops.shape(a, out_type=indices.dtype)[axis] + if mode == 'clip': + indices = clip_ops.clip_by_value(indices, 0, axis_size - 1) + elif mode == 'wrap': + indices = math_ops.floormod(indices, axis_size) + else: + raise ValueError("The 'raise' mode to take is not supported.") + + return array_ops.gather(a, indices, axis=axis) + + +@tf_export.tf_export('experimental.numpy.where', v1=[]) +@np_utils.np_doc_only('where') +def where(condition, x=None, y=None): + """Raises ValueError if exactly one of x or y is not None.""" + condition = asarray(condition, dtype=np.bool_) + if x is None and y is None: + return nonzero(condition) + elif x is not None and y is not None: + x, y = _promote_dtype(x, y) + return array_ops.where_v2(condition, x, y) + raise ValueError('Both x and y must be ndarrays, or both must be None.') + + +@tf_export.tf_export('experimental.numpy.select', v1=[]) +@np_utils.np_doc('select') +def select(condlist, choicelist, default=0): # pylint: disable=missing-docstring + if len(condlist) != len(choicelist): + msg = 'condlist must have length equal to choicelist ({} vs {})' + raise ValueError(msg.format(len(condlist), len(choicelist))) + if not condlist: + raise ValueError('condlist must be non-empty') + choices = _promote_dtype(default, *choicelist) + choicelist = choices[1:] + output = choices[0] + # The traversal is in reverse order so we can return the first value in + # choicelist where condlist is True. + for cond, choice in zip(condlist[::-1], choicelist[::-1]): + output = where(cond, choice, output) + return output + + +@tf_export.tf_export('experimental.numpy.shape', v1=[]) +@np_utils.np_doc( + 'shape', + link=np_utils.Link( + 'https://numpy.org/doc/1.18/reference/generated/numpy.shape.html' + ), +) +def shape(a): + a = asarray(a) + return a.shape + + +@tf_export.tf_export('experimental.numpy.ndim', v1=[]) +@np_utils.np_doc('ndim', link=np_utils.NoLink()) +def ndim(a): + a = asarray(a) + return a.ndim + + +@tf_export.tf_export('experimental.numpy.isscalar', v1=[]) +@np_utils.np_doc('isscalar') +def isscalar(num): + return ndim(num) == 0 + + +def _boundaries_to_sizes(a, boundaries, axis): + """Converting boundaries of splits to sizes of splits. + + Args: + a: the array to be split. + boundaries: the boundaries, as in np.split. + axis: the axis along which to split. + + Returns: + A list of sizes of the splits, as in tf.split. + """ + if axis >= len(a.shape): + raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape)) + total_size = a.shape[axis] + sizes = [] + sizes_sum = 0 + prev = 0 + for i, b in enumerate(boundaries): + size = b - prev + if size < 0: + raise ValueError( + 'The %s-th boundary %s is smaller than the previous boundary %s' + % (i, b, prev) + ) + size = builtins.min(size, builtins.max(0, total_size - sizes_sum)) + sizes.append(size) + sizes_sum += size + prev = b + sizes.append(builtins.max(0, total_size - sizes_sum)) + return sizes + + +@tf_export.tf_export('experimental.numpy.split', v1=[]) +@np_utils.np_doc('split') +def split(ary, indices_or_sections, axis=0): + ary = asarray(ary) + if not isinstance(indices_or_sections, int): + indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis) + return array_ops.split(ary, indices_or_sections, axis=axis) + + +def _split_on_axis(np_fun_name, axis): # pylint: disable=missing-function-docstring + @np_utils.np_doc(np_fun_name) + def f(ary, indices_or_sections): + # for 1-D array, hsplit becomes vsplit + new_axis = np_utils.cond( + math_ops.equal(axis, 1), + lambda: np_utils.cond( # pylint: disable=g-long-lambda + math_ops.equal(array_ops.rank(ary), 1), lambda: 0, lambda: axis + ), + lambda: axis, + ) + if isinstance(indices_or_sections, int): + ary_shape = ary.shape[new_axis] + if ary_shape is not None and ary_shape % indices_or_sections: + raise ValueError('array split does not result in an equal division') + return split(ary, indices_or_sections, axis=new_axis) + + return f + + +vsplit = tf_export.tf_export('experimental.numpy.vsplit', v1=[])( + _split_on_axis('vsplit', axis=0) +) +hsplit = tf_export.tf_export('experimental.numpy.hsplit', v1=[])( + _split_on_axis('hsplit', axis=1) +) +dsplit = tf_export.tf_export('experimental.numpy.dsplit', v1=[])( + _split_on_axis('dsplit', axis=2) +) + + +@tf_export.tf_export('experimental.numpy.broadcast_to', v1=[]) +@np_utils.np_doc('broadcast_to') +def broadcast_to(array, shape): # pylint: disable=redefined-outer-name + return full(shape, array) + + +@tf_export.tf_export('experimental.numpy.stack', v1=[]) +@np_utils.np_doc('stack') +def stack(arrays, axis=0): # pylint: disable=missing-function-docstring + if isinstance(arrays, (np_arrays.ndarray, tensor_lib.Tensor)): + arrays = asarray(arrays) + if axis == 0: + return arrays + else: + return swapaxes(arrays, 0, axis) + arrays = _promote_dtype(*arrays) # pylint: disable=protected-access + unwrapped_arrays = [ + a if isinstance(a, np_arrays.ndarray) else a for a in arrays + ] + return asarray(array_ops_stack.stack(unwrapped_arrays, axis)) + + +@tf_export.tf_export('experimental.numpy.hstack', v1=[]) +@np_utils.np_doc('hstack') +def hstack(tup): + arrays = [atleast_1d(a) for a in tup] + arrays = _promote_dtype(*arrays) # pylint: disable=protected-access + unwrapped_arrays = [ + a if isinstance(a, np_arrays.ndarray) else a for a in arrays + ] + rank = array_ops.rank(unwrapped_arrays[0]) + return np_utils.cond( + math_ops.equal(rank, 1), + lambda: array_ops.concat(unwrapped_arrays, axis=0), + lambda: array_ops.concat(unwrapped_arrays, axis=1), + ) + + +@tf_export.tf_export('experimental.numpy.vstack', v1=[]) +@np_utils.np_doc('vstack') +def vstack(tup): + arrays = [atleast_2d(a) for a in tup] + arrays = _promote_dtype(*arrays) # pylint: disable=protected-access + unwrapped_arrays = [ + a if isinstance(a, np_arrays.ndarray) else a for a in arrays + ] + return array_ops.concat(unwrapped_arrays, axis=0) + + +@tf_export.tf_export('experimental.numpy.dstack', v1=[]) +@np_utils.np_doc('dstack') +def dstack(tup): + arrays = [atleast_3d(a) for a in tup] + arrays = _promote_dtype(*arrays) # pylint: disable=protected-access + unwrapped_arrays = [ + a if isinstance(a, np_arrays.ndarray) else a for a in arrays + ] + return array_ops.concat(unwrapped_arrays, axis=2) + + +def _pad_left_to(n, old_shape): + old_shape = asarray(old_shape, dtype=np.int32) + new_shape = array_ops.pad( + old_shape, + [[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]], + constant_values=1, + ) + return asarray(new_shape) + + +def _atleast_nd(n, new_shape, *arys): + """Reshape arrays to be at least `n`-dimensional. + + Args: + n: The minimal rank. + new_shape: a function that takes `n` and the old shape and returns the + desired new shape. + *arys: ndarray(s) to be reshaped. + + Returns: + The reshaped array(s). + """ + + def f(x): + # pylint: disable=g-long-lambda + x = asarray(x) + return asarray( + np_utils.cond( + np_utils.greater(n, array_ops.rank(x)), + lambda: reshape(x, new_shape(n, array_ops.shape(x))), + lambda: x, + ) + ) + + arys = list(map(f, arys)) + if len(arys) == 1: + return arys[0] + else: + return arys + + +@tf_export.tf_export('experimental.numpy.atleast_1d', v1=[]) +@np_utils.np_doc('atleast_1d') +def atleast_1d(*arys): + return _atleast_nd(1, _pad_left_to, *arys) + + +@tf_export.tf_export('experimental.numpy.atleast_2d', v1=[]) +@np_utils.np_doc('atleast_2d') +def atleast_2d(*arys): + return _atleast_nd(2, _pad_left_to, *arys) + + +@tf_export.tf_export('experimental.numpy.atleast_3d', v1=[]) +@np_utils.np_doc('atleast_3d') +def atleast_3d(*arys): # pylint: disable=missing-docstring + def new_shape(_, old_shape): + # pylint: disable=g-long-lambda + ndim_ = array_ops.size(old_shape) + return np_utils.cond( + math_ops.equal(ndim_, 0), + lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32), + lambda: np_utils.cond( + math_ops.equal(ndim_, 1), + lambda: array_ops.pad(old_shape, [[1, 1]], constant_values=1), + lambda: array_ops.pad(old_shape, [[0, 1]], constant_values=1), + ), + ) + + return _atleast_nd(3, new_shape, *arys) + + +@tf_export.tf_export('experimental.numpy.nonzero', v1=[]) +@np_utils.np_doc('nonzero') +def nonzero(a): + a = atleast_1d(a) + if a.shape.rank is None: + raise ValueError( + "The rank of `a` is unknown, so we can't decide how many " + 'arrays to return.' + ) + return array_ops_stack.unstack( + array_ops.where_v2(math_ops.cast(a, dtypes.bool)), a.shape.rank, axis=1 + ) + + +@tf_export.tf_export('experimental.numpy.diag_indices', v1=[]) +@np_utils.np_doc('diag_indices') +def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name + if n < 0: + raise ValueError( + 'n argument to diag_indices must be nonnegative, got {}'.format(n) + ) + if ndim < 0: + raise ValueError( + 'ndim argument to diag_indices must be nonnegative, got {}'.format(ndim) + ) + + return (math_ops.range(n),) * ndim + + +@tf_export.tf_export('experimental.numpy.tri', v1=[]) +@np_utils.np_doc('tri') +def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring + M = M if M is not None else N + if dtype is not None: + dtype = np_utils.result_type(dtype) + else: + # Use a default float type. + dtype = np_utils.result_type(float) + + if k < 0: + lower = -k - 1 + if lower > N: + r = array_ops.zeros([N, M], dtype) + else: + # Keep as tf bool, since we create an upper triangular matrix and invert + # it. + o = array_ops.ones([N, M], dtype=dtypes.bool) + r = math_ops.cast( + math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype + ) + else: + o = array_ops.ones([N, M], dtype) + if k > M: + r = o + else: + r = array_ops.matrix_band_part(o, -1, k) + return r + + +@tf_export.tf_export('experimental.numpy.tril', v1=[]) +@np_utils.np_doc('tril') +def tril(m, k=0): # pylint: disable=missing-docstring + m = asarray(m) + if m.shape.ndims is None: + raise ValueError('Argument to tril should have known rank') + m_shape = m.shape.as_list() + + if len(m_shape) < 2: + raise ValueError('Argument to tril must have rank at least 2') + + if m_shape[-1] is None or m_shape[-2] is None: + raise ValueError( + 'Currently, the last two dimensions of the input array ' + 'need to be known.' + ) + + z = constant_op.constant(0, m.dtype) + + mask = tri(*m_shape[-2:], k=k, dtype=bool) + return array_ops.where_v2( + array_ops.broadcast_to(mask, array_ops.shape(m)), m, z + ) + + +@tf_export.tf_export('experimental.numpy.triu', v1=[]) +@np_utils.np_doc('triu') +def triu(m, k=0): # pylint: disable=missing-docstring + m = asarray(m) + if m.shape.ndims is None: + raise ValueError('Argument to triu should have known rank') + m_shape = m.shape.as_list() + + if len(m_shape) < 2: + raise ValueError('Argument to triu must have rank at least 2') + + if m_shape[-1] is None or m_shape[-2] is None: + raise ValueError( + 'Currently, the last two dimensions of the input array ' + 'need to be known.' + ) + + z = constant_op.constant(0, m.dtype) + + mask = tri(*m_shape[-2:], k=k - 1, dtype=bool) + return array_ops.where_v2( + array_ops.broadcast_to(mask, array_ops.shape(m)), z, m + ) + + +@tf_export.tf_export('experimental.numpy.flip', v1=[]) +@np_utils.np_doc('flip') +def flip(m, axis=None): # pylint: disable=missing-docstring + m = asarray(m) + + if axis is None: + return array_ops.reverse(m, math_ops.range(array_ops.rank(m))) + + axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access + + return array_ops.reverse(m, [axis]) + + +@tf_export.tf_export('experimental.numpy.flipud', v1=[]) +@np_utils.np_doc('flipud') +def flipud(m): # pylint: disable=missing-docstring + return flip(m, 0) + + +@tf_export.tf_export('experimental.numpy.fliplr', v1=[]) +@np_utils.np_doc('fliplr') +def fliplr(m): # pylint: disable=missing-docstring + return flip(m, 1) + + +@tf_export.tf_export('experimental.numpy.roll', v1=[]) +@np_utils.np_doc('roll') +def roll(a, shift, axis=None): # pylint: disable=missing-docstring + a = asarray(a) + + if axis is not None: + return manip_ops.roll(a, shift, axis) + + # If axis is None, the roll happens as a 1-d tensor. + original_shape = array_ops.shape(a) + a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0) + return array_ops.reshape(a, original_shape) + + +@tf_export.tf_export('experimental.numpy.rot90', v1=[]) +@np_utils.np_doc('rot90') +def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring + m_rank = array_ops.rank(m) + ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access + + k = k % 4 + if k == 0: + return m + elif k == 2: + return flip(flip(m, ax1), ax2) + else: + perm = math_ops.range(m_rank) + perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1]) + + if k == 1: + return transpose(flip(m, ax2), perm) + else: + return flip(transpose(m, perm), ax2) + + +@tf_export.tf_export('experimental.numpy.vander', v1=[]) +@np_utils.np_doc('vander') +def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name + x = asarray(x) + + x_shape = array_ops.shape(x) + if N is None: + N = x_shape[0] + + N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name + if N_temp is not None: + N = N_temp + if N < 0: + raise ValueError('N must be nonnegative') + else: + control_flow_assert.Assert(N >= 0, [N]) + + rank = array_ops.rank(x) + rank_temp = np_utils.get_static_value(rank) + if rank_temp is not None: + rank = rank_temp + if rank != 1: + raise ValueError('x must be a one-dimensional array') + else: + control_flow_assert.Assert(math_ops.equal(rank, 1), [rank]) + + if increasing: + start = 0 + limit = N + delta = 1 + else: + start = N - 1 + limit = -1 + delta = -1 + + x = array_ops.expand_dims(x, -1) + return math_ops.pow( + x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype) + ) + + +@tf_export.tf_export('experimental.numpy.ix_', v1=[]) +@np_utils.np_doc('ix_') +def ix_(*args): # pylint: disable=missing-docstring + n = len(args) + output = [] + for i, a in enumerate(args): + a = asarray(a) + a_rank = array_ops.rank(a) + a_rank_temp = np_utils.get_static_value(a_rank) + if a_rank_temp is not None: + a_rank = a_rank_temp + if a_rank != 1: + raise ValueError( + 'Arguments must be 1-d, got arg {} of rank {}'.format(i, a_rank) + ) + else: + control_flow_assert.Assert(math_ops.equal(a_rank, 1), [a_rank]) + + new_shape = [1] * n + new_shape[i] = -1 + dtype = a.dtype + if dtype == dtypes.bool: + output.append(array_ops.reshape(nonzero(a)[0], new_shape)) + elif dtype.is_integer: + output.append(array_ops.reshape(a, new_shape)) + else: + raise ValueError( + 'Only integer and bool dtypes are supported, got {}'.format(dtype) + ) + + return output + + +@tf_export.tf_export('experimental.numpy.broadcast_arrays', v1=[]) +@np_utils.np_doc('broadcast_arrays') +def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring + subok = kwargs.pop('subok', False) + if subok: + raise ValueError('subok=True is not supported.') + if kwargs: + raise ValueError('Received unsupported arguments {}'.format(kwargs.keys())) + + args = [asarray(arg) for arg in args] + return np_utils.tf_broadcast(*args) + + +@tf_export.tf_export('experimental.numpy.sign', v1=[]) +@np_utils.np_doc_only('sign') +def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name + if out: + raise ValueError('tf.numpy doesnt support setting out.') + if where: + raise ValueError('tf.numpy doesnt support setting where.') + if kwargs: + raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys())) + + x = asarray(x) + + # Numpy 2.x and later uses the same definition of sign. + if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0': + return math_ops.sign(x) + + dtype = x.dtype.as_numpy_dtype + if np.issubdtype(dtype, np.complexfloating): + result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype) + else: + result = math_ops.sign(x) + + return result + + +# Note that np.take_along_axis may not be present in some supported versions of +# numpy. +@tf_export.tf_export('experimental.numpy.take_along_axis', v1=[]) +@np_utils.np_doc('take_along_axis') +def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring + arr = asarray(arr) + indices = asarray(indices) + + if axis is None: + return take_along_axis(arr.ravel(), indices, 0) + + rank = array_ops.rank(arr) + axis = axis + rank if axis < 0 else axis + + # Broadcast shapes to match, ensure that the axis of interest is not + # broadcast. + arr_shape_original = array_ops.shape(arr, out_type=indices.dtype) + indices_shape_original = array_ops.shape(indices, out_type=indices.dtype) + arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1]) + indices_shape = array_ops.tensor_scatter_update( + indices_shape_original, [[axis]], [1] + ) + broadcasted_shape = array_ops.broadcast_dynamic_shape( + arr_shape, indices_shape + ) + arr_shape = array_ops.tensor_scatter_update( + broadcasted_shape, [[axis]], [arr_shape_original[axis]] + ) + indices_shape = array_ops.tensor_scatter_update( + broadcasted_shape, [[axis]], [indices_shape_original[axis]] + ) + arr = array_ops.broadcast_to(arr, arr_shape) + indices = array_ops.broadcast_to(indices, indices_shape) + + # Save indices shape so we can restore it later. + possible_result_shape = indices.shape + + # Correct indices since gather doesn't correctly handle negative indices. + indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices) + + swapaxes_ = lambda t: swapaxes(t, axis, -1) + + dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1)) + arr = np_utils.cond( + dont_move_axis_to_end, lambda: arr, lambda: swapaxes_(arr) + ) + indices = np_utils.cond( + dont_move_axis_to_end, lambda: indices, lambda: swapaxes_(indices) + ) + + arr_shape = array_ops.shape(arr) + arr = array_ops.reshape(arr, [-1, arr_shape[-1]]) + + indices_shape = array_ops.shape(indices) + indices = array_ops.reshape(indices, [-1, indices_shape[-1]]) + + result = array_ops.gather(arr, indices, batch_dims=1) + result = array_ops.reshape(result, indices_shape) + result = np_utils.cond( + dont_move_axis_to_end, lambda: result, lambda: swapaxes_(result) + ) + result.set_shape(possible_result_shape) + + return result + + +# pylint: disable=redefined-builtin,undefined-variable +@tf_export.tf_export('experimental.numpy.max', v1=[]) +@np_utils.np_doc('max', link=np_utils.AliasOf('amax')) +def max(a, axis=None, keepdims=None): + return amax(a, axis=axis, keepdims=keepdims) + + +@tf_export.tf_export('experimental.numpy.min', v1=[]) +@np_utils.np_doc('min', link=np_utils.AliasOf('amin')) +def min(a, axis=None, keepdims=None): + return amin(a, axis=axis, keepdims=keepdims) + + +@tf_export.tf_export('experimental.numpy.round', v1=[]) +@np_utils.np_doc('round', link=np_utils.AliasOf('around')) +def round(a, decimals=0): + return around(a, decimals=decimals) + + +# pylint: enable=redefined-builtin,undefined-variable + + +_SLICE_ERROR = ( + 'only integers, slices (`:`), ellipsis (`...`), ' + 'numpy.newaxis (`None`) and integer or boolean arrays are valid indices' +) + + +def _as_index(idx, need_scalar=True): + """Helper function to parse idx as an index. + + Args: + idx: index + need_scalar: If idx needs to be a scalar value. + + Returns: + A pair, (indx, bool). First one is the parsed index and can be a tensor, + or scalar integer / Dimension. Second one is True if rank is known to be 0. + + Raises: + IndexError: For incorrect indices. + """ + if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)): + return idx, True + data = asarray(idx) + if data.dtype == dtypes.bool: + if data.shape.ndims != 1: + # TODO(agarwal): handle higher rank boolean masks. + raise NotImplementedError('Need rank 1 for bool index %s' % idx) + data = array_ops.where_v2(data) + data = array_ops.reshape(data, [-1]) + if need_scalar and data.shape.rank not in (None, 0): + raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx)) + np_dtype = data.dtype.as_numpy_dtype + if not np.issubdtype(np_dtype, np.integer): + raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx)) + if data.dtype not in (dtypes.int64, dtypes.int32): + # TF slicing can only handle int32/int64. So we need to cast. + promoted_dtype = np.promote_types(np.int32, np_dtype) + if promoted_dtype == np.int32: + data = math_ops.cast(data, dtypes.int32) + elif promoted_dtype == np.int64: + data = math_ops.cast(data, dtypes.int64) + else: + raise IndexError(_SLICE_ERROR + ', got {!r}'.format(idx)) + return data, data.shape.rank == 0 + + +class _UpdateMethod(enum.Enum): + UPDATE = 0 + ADD = 1 + MIN = 2 + MAX = 3 + + +def _slice_helper(tensor, slice_spec, update_method=None, updates=None): + """Helper function for __getitem__ and _with_index_update_helper. + + This function collects the indices in `slice_spec` into two buckets, which we + can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2 + `gather`. They also correspond to "basic indices" and "advanced indices" in + numpy. This function supports both reading and writing at the indices. The + reading path can be summarized as `gather(stride_slice(tensor, idx1), + idx2)`. The writing path can be summarized as `strided_slice_update(tensor, + idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here + means `tf.gather` or `tf.gather_nd`; `scatter` here means + `tf.tensor_scatter_update`.) The writing path is inefficient because it needs + to first read out a portion (probably much larger than `updates`) of `tensor` + using `strided_slice`, update it, and then write the portion back. An + alternative approach is to only use `scatter`, which amounts to using the + indexing mechanism of gather/scatter to implement + strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter + because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but + not TF gather/scatter because they don't support spans (except those that + cover entire dimensions, i.e. `:`). If we materialize spans into individual + indices, the size of the index tensor would explode. (Note that XLA + Gather/Scatter have a similar problem for stride > 1 because they don't + support strides. Indices such as `1:2:8` will need to be materialized into + individual indices such as [1, 3, 5, 7].) + + Args: + tensor: the tensor to be read from or write into. + slice_spec: the indices. + update_method: (optional) a member of `_UpdateMethod`, indicating how to + update the values (replacement, add, etc.). `None` indicates just reading. + updates: (optional) the new values to write into `tensor`. It must have the + same dtype as `tensor`. + + Returns: + The result of reading (if `update_method` is `None`) or the updated `tensor` + after writing. + """ + begin, end, strides = [], [], [] + new_axis_mask, shrink_axis_mask = 0, 0 + begin_mask, end_mask = 0, 0 + ellipsis_mask = 0 + advanced_indices = [] + shrink_indices = [] + for index, s in enumerate(slice_spec): + if isinstance(s, slice): + if s.start is not None: + begin.append(_as_index(s.start)[0]) + else: + begin.append(0) + begin_mask |= 1 << index + if s.stop is not None: + end.append(_as_index(s.stop)[0]) + else: + end.append(0) + end_mask |= 1 << index + if s.step is not None: + strides.append(_as_index(s.step)[0]) + else: + strides.append(1) + elif s is Ellipsis: + begin.append(0) + end.append(0) + strides.append(1) + ellipsis_mask |= 1 << index + elif s is array_ops.newaxis: + begin.append(0) + end.append(0) + strides.append(1) + new_axis_mask |= 1 << index + else: + s, is_scalar = _as_index(s, False) + if is_scalar: + begin.append(s) + end.append(s + 1) + strides.append(1) + shrink_axis_mask |= 1 << index + shrink_indices.append(index) + else: + begin.append(0) + end.append(0) + strides.append(1) + begin_mask |= 1 << index + end_mask |= 1 << index + advanced_indices.append((index, s, ellipsis_mask != 0)) + + # stack possibly involves no tensors, so we must use op_scope correct graph. + with ops.name_scope( + None, + 'strided_slice', + [tensor] + begin + end + strides, + skip_on_eager=False, + ) as name: + if begin: + packed_begin, packed_end, packed_strides = ( + array_ops_stack.stack(begin), + array_ops_stack.stack(end), + array_ops_stack.stack(strides), + ) + if ( + packed_begin.dtype == dtypes.int64 + or packed_end.dtype == dtypes.int64 + or packed_strides.dtype == dtypes.int64 + ): + if packed_begin.dtype != dtypes.int64: + packed_begin = math_ops.cast(packed_begin, dtypes.int64) + if packed_end.dtype != dtypes.int64: + packed_end = math_ops.cast(packed_end, dtypes.int64) + if packed_strides.dtype != dtypes.int64: + packed_strides = math_ops.cast(packed_strides, dtypes.int64) + else: + var_empty = constant_op.constant([], dtype=dtypes.int32) + packed_begin = packed_end = packed_strides = var_empty + if update_method == _UpdateMethod.UPDATE and not advanced_indices: + return array_ops.tensor_strided_slice_update( + tensor, + packed_begin, + packed_end, + packed_strides, + updates, + begin_mask=begin_mask, + end_mask=end_mask, + shrink_axis_mask=shrink_axis_mask, + new_axis_mask=new_axis_mask, + ellipsis_mask=ellipsis_mask, + name=name, + ) + else: + # TODO(b/164251540): Find a better way to support update that does not + # involve one read + two writes. + if updates is not None: + original_tensor = tensor + # TODO(agarwal): set_shape on tensor to set rank. + tensor = array_ops.strided_slice( + tensor, + packed_begin, + packed_end, + packed_strides, + begin_mask=begin_mask, + end_mask=end_mask, + shrink_axis_mask=shrink_axis_mask, + new_axis_mask=new_axis_mask, + ellipsis_mask=ellipsis_mask, + name=name, + ) + if not advanced_indices: + if update_method is None: + return tensor + assert update_method != _UpdateMethod.UPDATE + # TF lacks TensorStridedSliceAdd and alike, so we need to do + # read+add+update. + if update_method == _UpdateMethod.ADD: + update_op = math_ops.add + elif update_method == _UpdateMethod.MIN: + update_op = math_ops.minimum + elif update_method == _UpdateMethod.MAX: + update_op = math_ops.maximum + return array_ops.tensor_strided_slice_update( + original_tensor, + packed_begin, + packed_end, + packed_strides, + update_op(tensor, updates), + begin_mask=begin_mask, + end_mask=end_mask, + shrink_axis_mask=shrink_axis_mask, + new_axis_mask=new_axis_mask, + ellipsis_mask=ellipsis_mask, + name=name + '_2', + ) + advanced_indices_map = {} + for index, data, had_ellipsis in advanced_indices: + if had_ellipsis: + num_shrink = len([x for x in shrink_indices if x > index]) + dim = index - len(slice_spec) + num_shrink + else: + num_shrink = len([x for x in shrink_indices if x < index]) + dim = index - num_shrink + advanced_indices_map[dim] = data + dims = sorted(advanced_indices_map.keys()) + dims_contiguous = True + if len(dims) > 1: + if dims[0] < 0 and dims[-1] >= 0: # not all same sign + dims_contiguous = False + else: + for i in range(len(dims) - 1): + if dims[i] + 1 != dims[i + 1]: + dims_contiguous = False + break + indices = [advanced_indices_map[x] for x in dims] + indices = _promote_dtype(*indices) + indices = np_utils.tf_broadcast(*indices) + stacked_indices = array_ops_stack.stack(indices, axis=-1) + # Skip the contiguous-dims optimization for update because there is no + # tf.*scatter* op that supports the `axis` argument. + if not dims_contiguous or updates is not None: + if range(len(dims)) != dims: + tensor = moveaxis(tensor, dims, range(len(dims))) + tensor_shape_prefix = array_ops.shape( + tensor, out_type=stacked_indices.dtype + )[: len(dims)] + stacked_indices = array_ops.where_v2( + stacked_indices < 0, + stacked_indices + tensor_shape_prefix, + stacked_indices, + ) + if updates is None: + return array_ops.gather_nd(tensor, stacked_indices) + else: + # We only need to move-axis `updates` in the contiguous case becausce + # only in this case the result dimensions of advanced indexing are in + # the middle of `updates`. In the non-contiguous case, those dimensions + # are always at the front. + if dims_contiguous: + # TODO(wangpeng): Support unknown rank (e.g. by partially flattening + # `updates`) + if stacked_indices.shape.rank is None: + raise NotImplementedError( + 'Rank of the advanced indices must currently be known' + ) + batch_size = stacked_indices.shape.rank - 1 + batch_start = dims[0] + if batch_start < 0: + batch_start += len(dims) - batch_size + + def range_(start, length): + return range(start, start + length) + + updates = moveaxis( + updates, range_(batch_start, batch_size), range(batch_size) + ) + if update_method == _UpdateMethod.UPDATE: + update_op = array_ops.tensor_scatter_update + elif update_method == _UpdateMethod.ADD: + update_op = array_ops.tensor_scatter_add + elif update_method == _UpdateMethod.MIN: + update_op = array_ops.tensor_scatter_min + elif update_method == _UpdateMethod.MAX: + update_op = array_ops.tensor_scatter_max + tensor = update_op(tensor, stacked_indices, updates) + if range(len(dims)) != dims: + tensor = moveaxis(tensor, range(len(dims)), dims) + return array_ops.tensor_strided_slice_update( + original_tensor, + packed_begin, + packed_end, + packed_strides, + tensor, + begin_mask=begin_mask, + end_mask=end_mask, + shrink_axis_mask=shrink_axis_mask, + new_axis_mask=new_axis_mask, + ellipsis_mask=ellipsis_mask, + name=name + '_2', + ) + # Note that gather_nd does not support gathering from inside the array. + # To avoid shuffling data back and forth, we transform the indices and + # do a gather instead. + rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access + dims = [(x + rank if x < 0 else x) for x in dims] + shape_tensor = array_ops.shape(tensor) + dim_sizes = array_ops.gather(shape_tensor, dims) + if len(dims) == 1: + stacked_indices = indices[0] + stacked_indices = math_ops.cast(stacked_indices, dtypes.int32) + stacked_indices = array_ops.where_v2( + stacked_indices < 0, stacked_indices + dim_sizes, stacked_indices + ) + axis = dims[0] + if len(dims) > 1: + index_scaling = math_ops.cumprod(dim_sizes, reverse=True, exclusive=True) + + def _tensordot(a, b): + # TODO(b/168657656): This function should be replaced by + # tensordot(axis=1) once MatMul has int32 XLA kernel. + b = array_ops.broadcast_to(b, array_ops.shape(a)) + return math_ops.reduce_sum(a * b, axis=-1) + + stacked_indices = _tensordot(stacked_indices, index_scaling) + flat_shape = array_ops.concat( + [shape_tensor[:axis], [-1], shape_tensor[axis + len(dims) :]], axis=0 + ) + tensor = array_ops.reshape(tensor, flat_shape) + + return array_ops.gather(tensor, stacked_indices, axis=axis) + + +def _as_spec_tuple(slice_spec): + """Convert slice_spec to tuple.""" + if isinstance(slice_spec, (list, tuple)) and not isinstance( + slice_spec, np.ndarray + ): + is_index = True + for s in slice_spec: + if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)): + is_index = False + break + elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0: + is_index = False + break + if not is_index: + return tuple(slice_spec) + return (slice_spec,) + + +def _getitem(self, slice_spec): + """Implementation of ndarray.__getitem__.""" + if ( + isinstance(slice_spec, bool) + or ( + isinstance(slice_spec, core_tf_types.Tensor) + and slice_spec.dtype == dtypes.bool + ) + or ( + isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) + and slice_spec.dtype == np.bool_ + ) + ): + return array_ops.boolean_mask(tensor=self, mask=slice_spec) + + if not isinstance(slice_spec, tuple): + slice_spec = _as_spec_tuple(slice_spec) + + result_t = _slice_helper(self, slice_spec) + return result_t + + +def _with_index_update_helper(update_method, a, slice_spec, updates): + """Implementation of ndarray._with_index_*.""" + if ( + isinstance(slice_spec, bool) + or ( + isinstance(slice_spec, core_tf_types.Tensor) + and slice_spec.dtype == dtypes.bool + ) + or ( + isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) + and slice_spec.dtype == np.bool_ + ) + ): + slice_spec = nonzero(slice_spec) + + if not isinstance(slice_spec, tuple): + slice_spec = _as_spec_tuple(slice_spec) + + a_dtype = a.dtype + a, updates = _promote_dtype_binary(a, updates) + result_t = _slice_helper(a, slice_spec, update_method, updates) + return result_t.astype(a_dtype) + + +setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem) +setattr( + np_arrays.ndarray, + '_with_index_update', + functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE), +) +setattr( + np_arrays.ndarray, + '_with_index_add', + functools.partial(_with_index_update_helper, _UpdateMethod.ADD), +) +setattr( + np_arrays.ndarray, + '_with_index_min', + functools.partial(_with_index_update_helper, _UpdateMethod.MIN), +) +setattr( + np_arrays.ndarray, + '_with_index_max', + functools.partial(_with_index_update_helper, _UpdateMethod.MAX), +) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_arrays.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_arrays.py new file mode 100644 index 0000000000000000000000000000000000000000..78257ae37ec66b9fe872bef42c17a2b6e317e0ea --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_arrays.py @@ -0,0 +1,50 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ndarray class.""" + +# pylint: disable=g-direct-tensorflow-import + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.ops.numpy_ops import np_dtypes + + +def convert_to_tensor(value, dtype=None, dtype_hint=None): + """Wrapper over `tf.convert_to_tensor`. + + Args: + value: value to convert + dtype: (optional) the type we would like it to be converted to. + dtype_hint: (optional) soft preference for the type we would like it to be + converted to. `tf.convert_to_tensor` will attempt to convert value to this + type first, but will not fail if conversion is not possible falling back + to inferring the type instead. + + Returns: + Value converted to tf.Tensor. + """ + # A safer version of `tf.convert_to_tensor` to work around b/149876037. + # TODO(wangpeng): Remove this function once the bug is fixed. + if (dtype is None and isinstance(value, int) and + value >= 2**63): + dtype = dtypes.uint64 + elif dtype is None and dtype_hint is None and isinstance(value, float): + dtype = np_dtypes.default_float_type() + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + value, dtype=dtype, dtype_hint=dtype_hint) + + +ndarray = tensor.Tensor diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_config.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_config.py new file mode 100644 index 0000000000000000000000000000000000000000..c0a80c38e2975b61fb0b3b5fc4a81a1194934803 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_config.py @@ -0,0 +1,58 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Config functions for TF NumPy.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import weak_tensor_ops # pylint: disable=unused-import +from tensorflow.python.ops.numpy_ops import np_dtypes +from tensorflow.python.ops.numpy_ops import np_math_ops +from tensorflow.python.platform import tf_logging +from tensorflow.python.util import tf_export + + +@tf_export.tf_export( + "experimental.numpy.experimental_enable_numpy_behavior", v1=[] +) +def enable_numpy_behavior(prefer_float32=False, dtype_conversion_mode="legacy"): + """Enable NumPy behavior on Tensors. + + Enabling NumPy behavior has three effects: + * It adds to `tf.Tensor` some common NumPy methods such as `T`, + `reshape` and `ravel`. + * It changes dtype promotion in `tf.Tensor` operators to be + compatible with NumPy. For example, + `tf.ones([], tf.int32) + tf.ones([], tf.float32)` used to throw a + "dtype incompatible" error, but after this it will return a + float64 tensor (obeying NumPy's promotion rules). + * It enhances `tf.Tensor`'s indexing capability to be on par with + [NumPy's](https://numpy.org/doc/stable/reference/arrays.indexing.html). + + Args: + prefer_float32: Controls whether dtype inference will use float32 for Python + floats, or float64 (the default and the NumPy-compatible behavior). + dtype_conversion_mode: a string that specifies promotion mode. This string + corresponds to a PromoMode Enum and can be 'off', 'legacy', 'safe', or + 'all'. 'safe' or 'all' mode enables the auto dtype conversion semantics. + """ + if dtype_conversion_mode == "safe" or dtype_conversion_mode == "all": + tf_logging.warning( + "UserWarning: enabling the new type promotion must happen at the" + " beginning of the program. Please ensure no TF APIs have been used" + " yet." + ) + ops.set_dtype_conversion_mode(dtype_conversion_mode) + ops.enable_numpy_style_slicing() + np_math_ops.enable_numpy_methods_on_tensor() + np_dtypes.set_prefer_float32(prefer_float32) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_dtypes.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..3000e1c6e747799e6a4a8bed8323da77f38e34ae --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_dtypes.py @@ -0,0 +1,216 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dtypes and dtype utilities.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.util import tf_export + + +# We use numpy's dtypes instead of TF's, because the user expects to use them +# with numpy facilities such as `np.dtype(np.int64)` and +# `if x.dtype.type is np.int64`. +bool_ = np.bool_ +tf_export.tf_export('experimental.numpy.bool_', v1=[]).export_constant( + __name__, 'bool_' +) +complex128 = np.complex128 +tf_export.tf_export('experimental.numpy.complex128', v1=[]).export_constant( + __name__, 'complex128' +) +complex64 = np.complex64 +tf_export.tf_export('experimental.numpy.complex64', v1=[]).export_constant( + __name__, 'complex64' +) +float16 = np.float16 +tf_export.tf_export('experimental.numpy.float16', v1=[]).export_constant( + __name__, 'float16' +) +float32 = np.float32 +tf_export.tf_export('experimental.numpy.float32', v1=[]).export_constant( + __name__, 'float32' +) +float64 = np.float64 +tf_export.tf_export('experimental.numpy.float64', v1=[]).export_constant( + __name__, 'float64' +) +inexact = np.inexact +tf_export.tf_export('experimental.numpy.inexact', v1=[]).export_constant( + __name__, 'inexact' +) +int_ = np.int_ +tf_export.tf_export('experimental.numpy.int_', v1=[]).export_constant( + __name__, 'int_' +) +int16 = np.int16 +tf_export.tf_export('experimental.numpy.int16', v1=[]).export_constant( + __name__, 'int16' +) +int32 = np.int32 +tf_export.tf_export('experimental.numpy.int32', v1=[]).export_constant( + __name__, 'int32' +) +int64 = np.int64 +tf_export.tf_export('experimental.numpy.int64', v1=[]).export_constant( + __name__, 'int64' +) +int8 = np.int8 +tf_export.tf_export('experimental.numpy.int8', v1=[]).export_constant( + __name__, 'int8' +) +object_ = np.object_ +tf_export.tf_export('experimental.numpy.object_', v1=[]).export_constant( + __name__, 'object_' +) +# np.string_ is aliased to np.bytes_ and depercated in numpy 2.0. +string_ = np.bytes_ +tf_export.tf_export('experimental.numpy.string_', v1=[]).export_constant( + __name__, 'string_' +) +uint16 = np.uint16 +tf_export.tf_export('experimental.numpy.uint16', v1=[]).export_constant( + __name__, 'uint16' +) +uint32 = np.uint32 +tf_export.tf_export('experimental.numpy.uint32', v1=[]).export_constant( + __name__, 'uint32' +) +uint64 = np.uint64 +tf_export.tf_export('experimental.numpy.uint64', v1=[]).export_constant( + __name__, 'uint64' +) +uint8 = np.uint8 +tf_export.tf_export('experimental.numpy.uint8', v1=[]).export_constant( + __name__, 'uint8' +) +# np.unicode_ is aliased to np.str_ and depercated in numpy 2.0. +unicode_ = np.str_ +tf_export.tf_export('experimental.numpy.unicode_', v1=[]).export_constant( + __name__, 'unicode_' +) +if int(np.__version__.split('.')[0]) < 2: + complex_ = np.complex_ + float_ = np.float_ +else: + # Aliases np.complex_ and np.float_ have been removed in Numpy 2.0. Use + # np.complex128 and np.float64 instead. + complex_ = np.complex128 + float_ = np.float64 +tf_export.tf_export('experimental.numpy.complex_', v1=[]).export_constant( + __name__, 'complex_' +) +tf_export.tf_export('experimental.numpy.float_', v1=[]).export_constant( + __name__, 'float_' +) + + +iinfo = np.iinfo +tf_export.tf_export('experimental.numpy.iinfo', v1=[]).export_constant( + __name__, 'iinfo' +) + + +issubdtype = tf_export.tf_export('experimental.numpy.issubdtype', v1=[])( + np.issubdtype +) + + +_to_float32 = { + np.dtype('float64'): np.dtype('float32'), + np.dtype('complex128'): np.dtype('complex64'), +} + + +_cached_np_dtypes = {} + + +# Difference between is_prefer_float32 and is_allow_float64: is_prefer_float32 +# only decides which dtype to use for Python floats; is_allow_float64 decides +# whether float64 dtypes can ever appear in programs. The latter is more +# restrictive than the former. +_prefer_float32 = False + + +# TODO(b/178862061): Consider removing this knob +_allow_float64 = True + + +def is_prefer_float32(): + return _prefer_float32 + + +def set_prefer_float32(b): + global _prefer_float32 + _prefer_float32 = b + + +def is_allow_float64(): + return _allow_float64 + + +def set_allow_float64(b): + global _allow_float64 + _allow_float64 = b + + +def canonicalize_dtype(dtype): + if not _allow_float64: + try: + return _to_float32[dtype] + except KeyError: + pass + return dtype + + +def _result_type(*arrays_and_dtypes): + """Returns the resulting type given a set of arrays.""" + + def preprocess_float(x): + if is_prefer_float32(): + if isinstance(x, float): + return np.float32(x) + elif isinstance(x, complex): + return np.complex64(x) + return x + + arrays_and_dtypes = [preprocess_float(x) for x in arrays_and_dtypes] + dtype = np.result_type(*arrays_and_dtypes) + return dtypes.as_dtype(canonicalize_dtype(dtype)) + + +def _get_cached_dtype(dtype): + """Returns an np.dtype for the TensorFlow DType.""" + global _cached_np_dtypes + try: + return _cached_np_dtypes[dtype] + except KeyError: + pass + cached_dtype = np.dtype(dtype.as_numpy_dtype) + _cached_np_dtypes[dtype] = cached_dtype + return cached_dtype + + +def default_float_type(): + """Gets the default float type. + + Returns: + If `is_prefer_float32()` is false and `is_allow_float64()` is true, returns + float64; otherwise returns float32. + """ + if not is_prefer_float32() and is_allow_float64(): + return float64 + else: + return float32 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_math_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..aa014ddb2c9232b273c8fcafee51a469d6120a45 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_math_ops.py @@ -0,0 +1,1652 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mathematical operations.""" +# pylint: disable=g-direct-tensorflow-import + +import numbers +import sys + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import bitwise_ops +from tensorflow.python.ops import clip_ops +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import sort_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops import while_loop +from tensorflow.python.ops.numpy_ops import np_array_ops +from tensorflow.python.ops.numpy_ops import np_arrays +from tensorflow.python.ops.numpy_ops import np_dtypes +from tensorflow.python.ops.numpy_ops import np_utils +from tensorflow.python.util import tf_export + + +pi = np.pi +tf_export.tf_export('experimental.numpy.pi', v1=[]).export_constant( + __name__, 'pi' +) +e = np.e +tf_export.tf_export('experimental.numpy.e', v1=[]).export_constant( + __name__, 'e' +) +inf = np.inf +tf_export.tf_export('experimental.numpy.inf', v1=[]).export_constant( + __name__, 'inf' +) + + +@tf_export.tf_export('experimental.numpy.dot', v1=[]) +@np_utils.np_doc_only('dot') +def dot(a, b): # pylint: disable=missing-docstring + def f(a, b): # pylint: disable=missing-docstring + return np_utils.cond( + np_utils.logical_or( + math_ops.equal(array_ops.rank(a), 0), + math_ops.equal(array_ops.rank(b), 0), + ), + lambda: a * b, + lambda: np_utils.cond( # pylint: disable=g-long-lambda + math_ops.equal(array_ops.rank(b), 1), + lambda: math_ops.tensordot(a, b, axes=[[-1], [-1]]), + lambda: math_ops.tensordot(a, b, axes=[[-1], [-2]]), + ), + ) + + return _bin_op(f, a, b) + + +# TODO(wangpeng): Make element-wise ops `ufunc`s +def _bin_op(tf_fun, a, b, promote=True): + if promote: + a, b = np_array_ops._promote_dtype_binary(a, b) # pylint: disable=protected-access + else: + a = np_array_ops.array(a) + b = np_array_ops.array(b) + return tf_fun(a, b) + + +@tf_export.tf_export('experimental.numpy.add', v1=[]) +@np_utils.np_doc('add') +def add(x1, x2): + def add_or_or(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + return math_ops.logical_or(x1, x2) + return math_ops.add(x1, x2) + + return _bin_op(add_or_or, x1, x2) + + +@tf_export.tf_export('experimental.numpy.subtract', v1=[]) +@np_utils.np_doc('subtract') +def subtract(x1, x2): + return _bin_op(math_ops.subtract, x1, x2) + + +@tf_export.tf_export('experimental.numpy.multiply', v1=[]) +@np_utils.np_doc('multiply') +def multiply(x1, x2): + def mul_or_and(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + return math_ops.logical_and(x1, x2) + return math_ops.multiply(x1, x2) + + return _bin_op(mul_or_and, x1, x2) + + +@tf_export.tf_export('experimental.numpy.true_divide', v1=[]) +@np_utils.np_doc('true_divide') +def true_divide(x1, x2): # pylint: disable=missing-function-docstring + def _avoid_float64(x1, x2): + if x1.dtype == x2.dtype and x1.dtype in (dtypes.int32, dtypes.int64): + x1 = math_ops.cast(x1, dtype=dtypes.float32) + x2 = math_ops.cast(x2, dtype=dtypes.float32) + return x1, x2 + + def f(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + float_ = np_utils.result_type(float) + x1 = math_ops.cast(x1, float_) + x2 = math_ops.cast(x2, float_) + if not np_dtypes.is_allow_float64(): + # math_ops.truediv in Python3 produces float64 when both inputs are int32 + # or int64. We want to avoid that when is_allow_float64() is False. + x1, x2 = _avoid_float64(x1, x2) + return math_ops.truediv(x1, x2) + + return _bin_op(f, x1, x2) + + +@tf_export.tf_export('experimental.numpy.divide', v1=[]) +@np_utils.np_doc('divide') +def divide(x1, x2): # pylint: disable=missing-function-docstring + return true_divide(x1, x2) + + +@tf_export.tf_export('experimental.numpy.floor_divide', v1=[]) +@np_utils.np_doc('floor_divide') +def floor_divide(x1, x2): # pylint: disable=missing-function-docstring + def f(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + x1 = math_ops.cast(x1, dtypes.int8) + x2 = math_ops.cast(x2, dtypes.int8) + return math_ops.floordiv(x1, x2) + + return _bin_op(f, x1, x2) + + +@tf_export.tf_export('experimental.numpy.mod', v1=[]) +@np_utils.np_doc('mod') +def mod(x1, x2): # pylint: disable=missing-function-docstring + def f(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + x1 = math_ops.cast(x1, dtypes.int8) + x2 = math_ops.cast(x2, dtypes.int8) + return math_ops.mod(x1, x2) + + return _bin_op(f, x1, x2) + + +@tf_export.tf_export('experimental.numpy.remainder', v1=[]) +@np_utils.np_doc('remainder') +def remainder(x1, x2): # pylint: disable=missing-function-docstring + return mod(x1, x2) + + +@tf_export.tf_export('experimental.numpy.divmod', v1=[]) +@np_utils.np_doc('divmod') +def divmod(x1, x2): # pylint: disable=redefined-builtin + return floor_divide(x1, x2), mod(x1, x2) + + +@tf_export.tf_export('experimental.numpy.maximum', v1=[]) +@np_utils.np_doc('maximum') +def maximum(x1, x2): # pylint: disable=missing-function-docstring + # Fast path for when maximum is used as relu. + if ( + isinstance(x2, numbers.Real) + and not isinstance(x2, bool) + and x2 == 0 + and isinstance(x1, np_arrays.ndarray) + and x1.dtype != dtypes.bool + ): + return nn_ops.relu(np_array_ops.asarray(x1)) + + def max_or_or(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + return math_ops.logical_or(x1, x2) + return math_ops.maximum(x1, x2) + + return _bin_op(max_or_or, x1, x2) + + +@tf_export.tf_export('experimental.numpy.minimum', v1=[]) +@np_utils.np_doc('minimum') +def minimum(x1, x2): + def min_or_and(x1, x2): + if x1.dtype == dtypes.bool: + assert x2.dtype == dtypes.bool + return math_ops.logical_and(x1, x2) + return math_ops.minimum(x1, x2) + + return _bin_op(min_or_and, x1, x2) + + +@tf_export.tf_export('experimental.numpy.clip', v1=[]) +@np_utils.np_doc('clip') +def clip(a, a_min, a_max): # pylint: disable=missing-docstring + if a_min is None and a_max is None: + raise ValueError('Not more than one of `a_min` and `a_max` may be `None`.') + if a_min is None: + return minimum(a, a_max) + elif a_max is None: + return maximum(a, a_min) + else: + a, a_min, a_max = np_array_ops._promote_dtype(a, a_min, a_max) # pylint: disable=protected-access + return clip_ops.clip_by_value(*np_utils.tf_broadcast(a, a_min, a_max)) + + +@tf_export.tf_export('experimental.numpy.matmul', v1=[]) +@np_utils.np_doc('matmul') +def matmul(x1, x2): # pylint: disable=missing-docstring + def f(x1, x2): + try: + if x1._rank() == 2 and x2._rank() == 2: # pylint: disable=protected-access + # Fast path for known ranks. + return gen_math_ops.mat_mul(x1, x2) + return np_utils.cond( + math_ops.equal(np_utils.tf_rank(x2), 1), + lambda: math_ops.tensordot(x1, x2, axes=1), + lambda: np_utils.cond( # pylint: disable=g-long-lambda + math_ops.equal(np_utils.tf_rank(x1), 1), + lambda: math_ops.tensordot( # pylint: disable=g-long-lambda + x1, x2, axes=[[0], [-2]] + ), + lambda: math_ops.matmul(x1, x2), + ), + ) + except errors.InvalidArgumentError as err: + raise ValueError(str(err)).with_traceback(sys.exc_info()[2]) + + return _bin_op(f, x1, x2) + + +# Exported so it can be called from Tensor.__matmul__. NumPy's matmul handles +# batched matmul as well, so simply including promotion in TF's current +# __matmul__ implementation was not sufficient. +setattr(np_arrays.ndarray, '_matmul', matmul) + + +@tf_export.tf_export('experimental.numpy.tensordot', v1=[]) +@np_utils.np_doc('tensordot') +def tensordot(a, b, axes=2): + return _bin_op(lambda a, b: math_ops.tensordot(a, b, axes=axes), a, b) + + +@tf_export.tf_export('experimental.numpy.inner', v1=[]) +@np_utils.np_doc_only('inner') +def inner(a, b): # pylint: disable=missing-function-docstring + def f(a, b): + return np_utils.cond( + np_utils.logical_or( + math_ops.equal(array_ops.rank(a), 0), + math_ops.equal(array_ops.rank(b), 0), + ), + lambda: a * b, + lambda: math_ops.tensordot(a, b, axes=[[-1], [-1]]), + ) + + return _bin_op(f, a, b) + + +@tf_export.tf_export('experimental.numpy.cross', v1=[]) +@np_utils.np_doc('cross') +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=missing-docstring + def f(a, b): # pylint: disable=missing-docstring + # We can't assign to captured variable `axisa`, so make a new variable + if axis is None: + axis_a = axisa + axis_b = axisb + axis_c = axisc + else: + axis_a = axis + axis_b = axis + axis_c = axis + if axis_a < 0: + axis_a = np_utils.add(axis_a, array_ops.rank(a)) + if axis_b < 0: + axis_b = np_utils.add(axis_b, array_ops.rank(b)) + + def maybe_move_axis_to_last(a, axis): + def move_axis_to_last(a, axis): + return array_ops.transpose( + a, + array_ops.concat( + [ + math_ops.range(axis), + math_ops.range(axis + 1, array_ops.rank(a)), + [axis], + ], + axis=0, + ), + ) + + return np_utils.cond( + axis == np_utils.subtract(array_ops.rank(a), 1), + lambda: a, + lambda: move_axis_to_last(a, axis), + ) + + a = maybe_move_axis_to_last(a, axis_a) + b = maybe_move_axis_to_last(b, axis_b) + a_dim = np_utils.getitem(array_ops.shape(a), -1) + b_dim = np_utils.getitem(array_ops.shape(b), -1) + + def maybe_pad_0(a, size_of_last_dim): + def pad_0(a): + return array_ops.pad( + a, + array_ops.concat( + [ + array_ops.zeros([array_ops.rank(a) - 1, 2], dtypes.int32), + constant_op.constant([[0, 1]], dtypes.int32), + ], + axis=0, + ), + ) + + return np_utils.cond( + math_ops.equal(size_of_last_dim, 2), lambda: pad_0(a), lambda: a + ) + + a = maybe_pad_0(a, a_dim) + b = maybe_pad_0(b, b_dim) + c = math_ops.cross(*np_utils.tf_broadcast(a, b)) + if axis_c < 0: + axis_c = np_utils.add(axis_c, array_ops.rank(c)) + + def move_last_to_axis(a, axis): + r = array_ops.rank(a) + return array_ops.transpose( + a, + array_ops.concat( + [math_ops.range(axis), [r - 1], math_ops.range(axis, r - 1)], + axis=0, + ), + ) + + c = np_utils.cond( + (a_dim == 2) & (b_dim == 2), + lambda: c[..., 2], + lambda: np_utils.cond( # pylint: disable=g-long-lambda + axis_c == np_utils.subtract(array_ops.rank(c), 1), + lambda: c, + lambda: move_last_to_axis(c, axis_c), + ), + ) + return c + + return _bin_op(f, a, b) + + +@tf_export.tf_export('experimental.numpy.vdot', v1=[]) +@np_utils.np_doc_only('vdot') +def vdot(a, b): # pylint: disable=missing-docstring + a, b = np_array_ops._promote_dtype(a, b) # pylint: disable=protected-access + a = np_array_ops.reshape(a, [-1]) + b = np_array_ops.reshape(b, [-1]) + if a.dtype == np_dtypes.complex128 or a.dtype == np_dtypes.complex64: + a = conj(a) + return dot(a, b) + + +@tf_export.tf_export('experimental.numpy.power', v1=[]) +@np_utils.np_doc('power') +def power(x1, x2): + return _bin_op(math_ops.pow, x1, x2) + + +@tf_export.tf_export('experimental.numpy.float_power', v1=[]) +@np_utils.np_doc('float_power') +def float_power(x1, x2): + return power(x1, x2) + + +@tf_export.tf_export('experimental.numpy.arctan2', v1=[]) +@np_utils.np_doc('arctan2') +def arctan2(x1, x2): + return _bin_op(math_ops.atan2, x1, x2) + + +@tf_export.tf_export('experimental.numpy.nextafter', v1=[]) +@np_utils.np_doc('nextafter') +def nextafter(x1, x2): + return _bin_op(math_ops.nextafter, x1, x2) + + +@tf_export.tf_export('experimental.numpy.heaviside', v1=[]) +@np_utils.np_doc('heaviside') +def heaviside(x1, x2): # pylint: disable=missing-function-docstring + def f(x1, x2): + return array_ops.where_v2( + x1 < 0, + constant_op.constant(0, dtype=x2.dtype), + array_ops.where_v2(x1 > 0, constant_op.constant(1, dtype=x2.dtype), x2), + ) + + y = _bin_op(f, x1, x2) + if not np.issubdtype(y.dtype.as_numpy_dtype, np.inexact): + y = y.astype(np_utils.result_type(float)) + return y + + +@tf_export.tf_export('experimental.numpy.hypot', v1=[]) +@np_utils.np_doc('hypot') +def hypot(x1, x2): + return sqrt(square(x1) + square(x2)) + + +@tf_export.tf_export('experimental.numpy.kron', v1=[]) +@np_utils.np_doc('kron') +def kron(a, b): # pylint: disable=missing-function-docstring + # pylint: disable=protected-access,g-complex-comprehension + a, b = np_array_ops._promote_dtype(a, b) + t_a = np_utils.cond( + a.shape.rank < b.shape.rank, + lambda: np_array_ops.reshape( # pylint: disable=g-long-lambda + a, np_array_ops._pad_left_to(b.shape.rank, a.shape) + ), + lambda: a, + ) + t_b = np_utils.cond( + b.shape.rank < a.shape.rank, + lambda: np_array_ops.reshape( # pylint: disable=g-long-lambda + b, np_array_ops._pad_left_to(a.shape.rank, b.shape) + ), + lambda: b, + ) + + def _make_shape(shape, prepend): + ones = array_ops.ones_like(shape) + if prepend: + shapes = [ones, shape] + else: + shapes = [shape, ones] + return array_ops.reshape(array_ops_stack.stack(shapes, axis=1), [-1]) + + a_shape = array_ops.shape(t_a) + b_shape = array_ops.shape(t_b) + a_reshaped = np_array_ops.reshape(t_a, _make_shape(a_shape, False)) + b_reshaped = np_array_ops.reshape(t_b, _make_shape(b_shape, True)) + out_shape = a_shape * b_shape + return np_array_ops.reshape(a_reshaped * b_reshaped, out_shape) + + +@tf_export.tf_export('experimental.numpy.outer', v1=[]) +@np_utils.np_doc('outer') +def outer(a, b): + def f(a, b): + return array_ops.reshape(a, [-1, 1]) * array_ops.reshape(b, [-1]) + + return _bin_op(f, a, b) + + +# This can also be implemented via tf.reduce_logsumexp +@tf_export.tf_export('experimental.numpy.logaddexp', v1=[]) +@np_utils.np_doc('logaddexp') +def logaddexp(x1, x2): + amax = maximum(x1, x2) + delta = x1 - x2 + return np_array_ops.where( + isnan(delta), + x1 + x2, # NaNs or infinities of the same sign. + amax + log1p(exp(-abs(delta))), + ) + + +@tf_export.tf_export('experimental.numpy.logaddexp2', v1=[]) +@np_utils.np_doc('logaddexp2') +def logaddexp2(x1, x2): + amax = maximum(x1, x2) + delta = x1 - x2 + return np_array_ops.where( + isnan(delta), + x1 + x2, # NaNs or infinities of the same sign. + amax + log1p(exp2(-abs(delta))) / np.log(2), + ) + + +@tf_export.tf_export('experimental.numpy.polyval', v1=[]) +@np_utils.np_doc('polyval') +def polyval(p, x): # pylint: disable=missing-function-docstring + def f(p, x): + if p.shape.rank == 0: + p = array_ops.reshape(p, [1]) + p = array_ops_stack.unstack(p) + # TODO(wangpeng): Make tf version take a tensor for p instead of a list. + y = math_ops.polyval(p, x) + # If the polynomial is 0-order, numpy requires the result to be broadcast to + # `x`'s shape. + if len(p) == 1: + y = array_ops.broadcast_to(y, x.shape) + return y + + return _bin_op(f, p, x) + + +@tf_export.tf_export('experimental.numpy.isclose', v1=[]) +@np_utils.np_doc('isclose') +def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): # pylint: disable=missing-docstring + def f(a, b): # pylint: disable=missing-docstring + dtype = a.dtype + if np.issubdtype(dtype.as_numpy_dtype, np.inexact): + rtol_ = ops.convert_to_tensor(rtol, dtype.real_dtype) + atol_ = ops.convert_to_tensor(atol, dtype.real_dtype) + result = math_ops.abs(a - b) <= atol_ + rtol_ * math_ops.abs(b) + if equal_nan: + result = result | (math_ops.is_nan(a) & math_ops.is_nan(b)) + return result + else: + return a == b + + return _bin_op(f, a, b) + + +@tf_export.tf_export('experimental.numpy.allclose', v1=[]) +@np_utils.np_doc('allclose') +def allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + return np_array_ops.all( + isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + ) + + +def _tf_gcd(x1, x2): # pylint: disable=missing-function-docstring + def _gcd_cond_fn(_, x2): + return math_ops.reduce_any(x2 != 0) + + def _gcd_body_fn(x1, x2): + # math_ops.mod will raise an error when any element of x2 is 0. To avoid + # that, we change those zeros to ones. Their values don't matter because + # they won't be used. + x2_safe = array_ops.where_v2(x2 != 0, x2, constant_op.constant(1, x2.dtype)) + x1, x2 = ( + array_ops.where_v2(x2 != 0, x2, x1), + array_ops.where_v2( + x2 != 0, + math_ops.mod(x1, x2_safe), + constant_op.constant(0, x2.dtype), + ), + ) + return ( + array_ops.where_v2(x1 < x2, x2, x1), + array_ops.where_v2(x1 < x2, x1, x2), + ) + + if not np.issubdtype( + x1.dtype.as_numpy_dtype, np.integer + ) or not np.issubdtype(x2.dtype.as_numpy_dtype, np.integer): + raise ValueError('Arguments to gcd must be integers.') + shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(x1), array_ops.shape(x2) + ) + x1 = array_ops.broadcast_to(x1, shape) + x2 = array_ops.broadcast_to(x2, shape) + value, _ = while_loop.while_loop( + _gcd_cond_fn, _gcd_body_fn, (math_ops.abs(x1), math_ops.abs(x2)) + ) + return value + + +# Note that np.gcd may not be present in some supported versions of numpy. +@tf_export.tf_export('experimental.numpy.gcd', v1=[]) +@np_utils.np_doc('gcd') +def gcd(x1, x2): + return _bin_op(_tf_gcd, x1, x2) + + +# Note that np.lcm may not be present in some supported versions of numpy. +@tf_export.tf_export('experimental.numpy.lcm', v1=[]) +@np_utils.np_doc('lcm') +def lcm(x1, x2): # pylint: disable=missing-function-docstring + def f(x1, x2): + d = _tf_gcd(x1, x2) + # Same as the `x2_safe` trick above + d_safe = array_ops.where_v2( + math_ops.equal(d, 0), constant_op.constant(1, d.dtype), d + ) + x1 = math_ops.abs(x1) + x2 = math_ops.abs(x2) + return array_ops.where_v2( + math_ops.equal(d, 0), + constant_op.constant(0, d.dtype), + x1 * (x2 // d_safe), + ) + + return _bin_op(f, x1, x2) + + +def _bitwise_binary_op(tf_fn, x1, x2): # pylint: disable=missing-function-docstring + def f(x1, x2): + is_bool = x1.dtype == dtypes.bool + if is_bool: + assert x2.dtype == dtypes.bool + x1 = math_ops.cast(x1, dtypes.int8) + x2 = math_ops.cast(x2, dtypes.int8) + r = tf_fn(x1, x2) + if is_bool: + r = math_ops.cast(r, dtypes.bool) + return r + + return _bin_op(f, x1, x2) + + +@tf_export.tf_export('experimental.numpy.bitwise_and', v1=[]) +@np_utils.np_doc('bitwise_and') +def bitwise_and(x1, x2): + return _bitwise_binary_op(bitwise_ops.bitwise_and, x1, x2) + + +@tf_export.tf_export('experimental.numpy.bitwise_or', v1=[]) +@np_utils.np_doc('bitwise_or') +def bitwise_or(x1, x2): + return _bitwise_binary_op(bitwise_ops.bitwise_or, x1, x2) + + +@tf_export.tf_export('experimental.numpy.bitwise_xor', v1=[]) +@np_utils.np_doc('bitwise_xor') +def bitwise_xor(x1, x2): + return _bitwise_binary_op(bitwise_ops.bitwise_xor, x1, x2) + + +@tf_export.tf_export('experimental.numpy.bitwise_not', v1=[]) +@np_utils.np_doc('bitwise_not', link=np_utils.AliasOf('invert')) +def bitwise_not(x): + def f(x): + if x.dtype == dtypes.bool: + return math_ops.logical_not(x) + return bitwise_ops.invert(x) + + return _scalar(f, x) + + +def _scalar(tf_fn, x, promote_to_float=False): + """Computes the tf_fn(x) for each element in `x`. + + Args: + tf_fn: function that takes a single Tensor argument. + x: array_like. Could be an ndarray, a Tensor or any object that can be + converted to a Tensor using `ops.convert_to_tensor`. + promote_to_float: whether to cast the argument to a float dtype if it is not + already. + + Returns: + An ndarray with the same shape as `x`. The default output dtype is + determined by `np_utils.result_type(float)`, unless x is an ndarray with a + floating point type, in which case the output type is same as x.dtype. + """ + x = np_array_ops.asarray(x) + if promote_to_float and not np.issubdtype(x.dtype.as_numpy_dtype, np.inexact): + x = x.astype(np_utils.result_type(float)) + return tf_fn(x) + + +@tf_export.tf_export('experimental.numpy.log', v1=[]) +@np_utils.np_doc('log') +def log(x): + return _scalar(math_ops.log, x, True) + + +@tf_export.tf_export('experimental.numpy.exp', v1=[]) +@np_utils.np_doc('exp') +def exp(x): + return _scalar(math_ops.exp, x, True) + + +@tf_export.tf_export('experimental.numpy.sqrt', v1=[]) +@np_utils.np_doc('sqrt') +def sqrt(x): + return _scalar(math_ops.sqrt, x, True) + + +@tf_export.tf_export('experimental.numpy.abs', v1=[]) +@np_utils.np_doc('abs', link=np_utils.AliasOf('absolute')) +def abs(x): # pylint: disable=redefined-builtin + return _scalar(math_ops.abs, x) + + +@tf_export.tf_export('experimental.numpy.absolute', v1=[]) +@np_utils.np_doc('absolute') +def absolute(x): + return abs(x) + + +@tf_export.tf_export('experimental.numpy.fabs', v1=[]) +@np_utils.np_doc('fabs') +def fabs(x): + return abs(x) + + +@tf_export.tf_export('experimental.numpy.ceil', v1=[]) +@np_utils.np_doc('ceil') +def ceil(x): + return _scalar(math_ops.ceil, x, True) + + +@tf_export.tf_export('experimental.numpy.floor', v1=[]) +@np_utils.np_doc('floor') +def floor(x): + return _scalar(math_ops.floor, x, True) + + +@tf_export.tf_export('experimental.numpy.conj', v1=[]) +@np_utils.np_doc('conj') +def conj(x): + return _scalar(math_ops.conj, x) + + +@tf_export.tf_export('experimental.numpy.negative', v1=[]) +@np_utils.np_doc('negative') +def negative(x): + return _scalar(math_ops.negative, x) + + +@tf_export.tf_export('experimental.numpy.reciprocal', v1=[]) +@np_utils.np_doc('reciprocal') +def reciprocal(x): + return _scalar(math_ops.reciprocal, x) + + +@tf_export.tf_export('experimental.numpy.signbit', v1=[]) +@np_utils.np_doc('signbit') +def signbit(x): + def f(x): + if x.dtype == dtypes.bool: + return array_ops.fill(array_ops.shape(x), False) + return x < 0 + + return _scalar(f, x) + + +@tf_export.tf_export('experimental.numpy.sin', v1=[]) +@np_utils.np_doc('sin') +def sin(x): + return _scalar(math_ops.sin, x, True) + + +@tf_export.tf_export('experimental.numpy.cos', v1=[]) +@np_utils.np_doc('cos') +def cos(x): + return _scalar(math_ops.cos, x, True) + + +@tf_export.tf_export('experimental.numpy.tan', v1=[]) +@np_utils.np_doc('tan') +def tan(x): + return _scalar(math_ops.tan, x, True) + + +@tf_export.tf_export('experimental.numpy.sinh', v1=[]) +@np_utils.np_doc('sinh') +def sinh(x): + return _scalar(math_ops.sinh, x, True) + + +@tf_export.tf_export('experimental.numpy.cosh', v1=[]) +@np_utils.np_doc('cosh') +def cosh(x): + return _scalar(math_ops.cosh, x, True) + + +@tf_export.tf_export('experimental.numpy.tanh', v1=[]) +@np_utils.np_doc('tanh') +def tanh(x): + return _scalar(math_ops.tanh, x, True) + + +@tf_export.tf_export('experimental.numpy.arcsin', v1=[]) +@np_utils.np_doc('arcsin') +def arcsin(x): + return _scalar(math_ops.asin, x, True) + + +@tf_export.tf_export('experimental.numpy.arccos', v1=[]) +@np_utils.np_doc('arccos') +def arccos(x): + return _scalar(math_ops.acos, x, True) + + +@tf_export.tf_export('experimental.numpy.arctan', v1=[]) +@np_utils.np_doc('arctan') +def arctan(x): + return _scalar(math_ops.atan, x, True) + + +@tf_export.tf_export('experimental.numpy.arcsinh', v1=[]) +@np_utils.np_doc('arcsinh') +def arcsinh(x): + return _scalar(math_ops.asinh, x, True) + + +@tf_export.tf_export('experimental.numpy.arccosh', v1=[]) +@np_utils.np_doc('arccosh') +def arccosh(x): + return _scalar(math_ops.acosh, x, True) + + +@tf_export.tf_export('experimental.numpy.arctanh', v1=[]) +@np_utils.np_doc('arctanh') +def arctanh(x): + return _scalar(math_ops.atanh, x, True) + + +@tf_export.tf_export('experimental.numpy.deg2rad', v1=[]) +@np_utils.np_doc('deg2rad') +def deg2rad(x): + def f(x): + return x * (np.pi / 180.0) + + return _scalar(f, x, True) + + +@tf_export.tf_export('experimental.numpy.rad2deg', v1=[]) +@np_utils.np_doc('rad2deg') +def rad2deg(x): + return x * (180.0 / np.pi) + + +_tf_float_types = [ + dtypes.bfloat16, + dtypes.float16, + dtypes.float32, + dtypes.float64, +] + + +@tf_export.tf_export('experimental.numpy.angle', v1=[]) +@np_utils.np_doc('angle') +def angle(z, deg=False): # pylint: disable=missing-function-docstring + def f(x): + if x.dtype in _tf_float_types: + # Workaround for b/147515503 + return array_ops.where_v2(x < 0, np.pi, 0) + else: + return math_ops.angle(x) + + y = _scalar(f, z, True) + if deg: + y = rad2deg(y) + return y + + +@tf_export.tf_export('experimental.numpy.cbrt', v1=[]) +@np_utils.np_doc('cbrt') +def cbrt(x): + def f(x): + # __pow__ can't handle negative base, so we use `abs` here. + rt = math_ops.abs(x) ** (1.0 / 3) + return array_ops.where_v2(x < 0, -rt, rt) + + return _scalar(f, x, True) + + +@tf_export.tf_export('experimental.numpy.conjugate', v1=[]) +@np_utils.np_doc('conjugate', link=np_utils.AliasOf('conj')) +def conjugate(x): + return _scalar(math_ops.conj, x) + + +@tf_export.tf_export('experimental.numpy.exp2', v1=[]) +@np_utils.np_doc('exp2') +def exp2(x): + def f(x): + return 2**x + + return _scalar(f, x, True) + + +@tf_export.tf_export('experimental.numpy.expm1', v1=[]) +@np_utils.np_doc('expm1') +def expm1(x): + return _scalar(math_ops.expm1, x, True) + + +@tf_export.tf_export('experimental.numpy.fix', v1=[]) +@np_utils.np_doc('fix') +def fix(x): + def f(x): + return array_ops.where_v2(x < 0, math_ops.ceil(x), math_ops.floor(x)) + + return _scalar(f, x, True) + + +@tf_export.tf_export('experimental.numpy.iscomplex', v1=[]) +@np_utils.np_doc('iscomplex') +def iscomplex(x): + return np_array_ops.imag(x) != 0 + + +@tf_export.tf_export('experimental.numpy.isreal', v1=[]) +@np_utils.np_doc('isreal') +def isreal(x): + return np_array_ops.imag(x) == 0 + + +@tf_export.tf_export('experimental.numpy.iscomplexobj', v1=[]) +@np_utils.np_doc('iscomplexobj') +def iscomplexobj(x): + x = np_array_ops.array(x) + return np.issubdtype(x.dtype.as_numpy_dtype, np.complexfloating) + + +@tf_export.tf_export('experimental.numpy.isrealobj', v1=[]) +@np_utils.np_doc('isrealobj') +def isrealobj(x): + return not iscomplexobj(x) + + +@tf_export.tf_export('experimental.numpy.isnan', v1=[]) +@np_utils.np_doc('isnan') +def isnan(x): + return _scalar(math_ops.is_nan, x, True) + + +def _make_nan_reduction(np_fun_name, reduction, init_val): + """Helper to generate nan* functions.""" + + @np_utils.np_doc(np_fun_name) + def nan_reduction(a, axis=None, dtype=None, keepdims=False): + a = np_array_ops.array(a) + v = np_array_ops.array(init_val, dtype=a.dtype) + return reduction( + np_array_ops.where(isnan(a), v, a), + axis=axis, + dtype=dtype, + keepdims=keepdims, + ) + + return nan_reduction + + +nansum = tf_export.tf_export('experimental.numpy.nansum', v1=[])( + _make_nan_reduction('nansum', np_array_ops.sum, 0) +) +nanprod = tf_export.tf_export('experimental.numpy.nanprod', v1=[])( + _make_nan_reduction('nanprod', np_array_ops.prod, 1) +) + + +@tf_export.tf_export('experimental.numpy.nanmean', v1=[]) +@np_utils.np_doc('nanmean') +def nanmean(a, axis=None, dtype=None, keepdims=None): # pylint: disable=missing-docstring + a = np_array_ops.array(a) + if np.issubdtype(a.dtype.as_numpy_dtype, np.bool_) or np.issubdtype( + a.dtype.as_numpy_dtype, np.integer + ): + return np_array_ops.mean(a, axis=axis, dtype=dtype, keepdims=keepdims) + nan_mask = logical_not(isnan(a)) + if dtype is None: + dtype = a.dtype.as_numpy_dtype + normalizer = np_array_ops.sum( + nan_mask, axis=axis, dtype=dtype, keepdims=keepdims + ) + return nansum(a, axis=axis, dtype=dtype, keepdims=keepdims) / normalizer + + +@tf_export.tf_export('experimental.numpy.isfinite', v1=[]) +@np_utils.np_doc('isfinite') +def isfinite(x): + return _scalar(math_ops.is_finite, x, True) + + +@tf_export.tf_export('experimental.numpy.isinf', v1=[]) +@np_utils.np_doc('isinf') +def isinf(x): + if x.dtype.is_floating: + return _scalar(math_ops.is_inf, x, True) + return False + + +@tf_export.tf_export('experimental.numpy.isneginf', v1=[]) +@np_utils.np_doc('isneginf') +def isneginf(x): + if x.dtype.is_floating: + return x == np_array_ops.full_like(x, -np.inf) + return False + + +@tf_export.tf_export('experimental.numpy.isposinf', v1=[]) +@np_utils.np_doc('isposinf') +def isposinf(x): + if x.dtype.is_floating: + return x == np_array_ops.full_like(x, np.inf) + return False + + +@tf_export.tf_export('experimental.numpy.log2', v1=[]) +@np_utils.np_doc('log2') +def log2(x): + return log(x) / np.log(2) + + +@tf_export.tf_export('experimental.numpy.log10', v1=[]) +@np_utils.np_doc('log10') +def log10(x): + return log(x) / np.log(10) + + +@tf_export.tf_export('experimental.numpy.log1p', v1=[]) +@np_utils.np_doc('log1p') +def log1p(x): + return _scalar(math_ops.log1p, x, True) + + +@tf_export.tf_export('experimental.numpy.positive', v1=[]) +@np_utils.np_doc('positive') +def positive(x): + return _scalar(lambda x: x, x) + + +@tf_export.tf_export('experimental.numpy.sinc', v1=[]) +@np_utils.np_doc('sinc') +def sinc(x): + def f(x): + pi_x = x * np.pi + return array_ops.where_v2( + x == 0, array_ops.ones_like(x), math_ops.sin(pi_x) / pi_x + ) + + return _scalar(f, x, True) + + +@tf_export.tf_export('experimental.numpy.square', v1=[]) +@np_utils.np_doc('square') +def square(x): + return _scalar(math_ops.square, x) + + +@tf_export.tf_export('experimental.numpy.diff', v1=[]) +@np_utils.np_doc('diff') +def diff(a, n=1, axis=-1): # pylint: disable=missing-function-docstring + def f(a): + # TODO(agarwal): transpose and reshape to N, H, 1 and do a 1D convolution + # TODO(agarwal): avoid depending on static rank. + nd = a.shape.rank + if nd is None: + raise ValueError( + 'Function `diff` currently requires a known rank for input `a`. ' + f'Received: a={a} (unknown rank)' + ) + if (axis + nd if axis < 0 else axis) >= nd: + raise ValueError( + f'Argument `axis` (received axis={axis}) is out of bounds ' + f'for input {a} of rank {nd}.' + ) + if n < 0: + raise ValueError( + f'Argument `order` must be a non-negative integer. Received: axis={n}' + ) + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + op = math_ops.not_equal if a.dtype == dtypes.bool else math_ops.subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + return a + + return _scalar(f, a) + + +def _wrap(f, reverse=False): + """Wraps binary ops so they can be added as operator overloads on ndarray.""" + + def _f(a, b): + if reverse: + a, b = b, a + + if ( + getattr(b, '__array_priority__', 0) + > np_arrays.ndarray.__array_priority__ + ): + return NotImplemented + + return f(a, b) + + return _f + + +def _comparison(tf_fun, x1, x2, cast_bool_to_int=False): + """Helper function for comparision.""" + dtype = np_utils.result_type(x1, x2) + # Cast x1 and x2 to the result_type if needed. + x1 = np_array_ops.array(x1, dtype=dtype) + x2 = np_array_ops.array(x2, dtype=dtype) + if cast_bool_to_int and x1.dtype == dtypes.bool: + x1 = math_ops.cast(x1, dtypes.int32) + x2 = math_ops.cast(x2, dtypes.int32) + return tf_fun(x1, x2) + + +@tf_export.tf_export('experimental.numpy.equal', v1=[]) +@np_utils.np_doc('equal') +def equal(x1, x2): + return _comparison(math_ops.equal, x1, x2) + + +@tf_export.tf_export('experimental.numpy.not_equal', v1=[]) +@np_utils.np_doc('not_equal') +def not_equal(x1, x2): + return _comparison(math_ops.not_equal, x1, x2) + + +@tf_export.tf_export('experimental.numpy.greater', v1=[]) +@np_utils.np_doc('greater') +def greater(x1, x2): + return _comparison(math_ops.greater, x1, x2, True) + + +@tf_export.tf_export('experimental.numpy.greater_equal', v1=[]) +@np_utils.np_doc('greater_equal') +def greater_equal(x1, x2): + return _comparison(math_ops.greater_equal, x1, x2, True) + + +@tf_export.tf_export('experimental.numpy.less', v1=[]) +@np_utils.np_doc('less') +def less(x1, x2): + return _comparison(math_ops.less, x1, x2, True) + + +@tf_export.tf_export('experimental.numpy.less_equal', v1=[]) +@np_utils.np_doc('less_equal') +def less_equal(x1, x2): + return _comparison(math_ops.less_equal, x1, x2, True) + + +@tf_export.tf_export('experimental.numpy.array_equal', v1=[]) +@np_utils.np_doc('array_equal') +def array_equal(a1, a2): # pylint: disable=missing-function-docstring + def f(x1, x2): + return np_utils.cond( + math_ops.equal(array_ops.rank(x1), array_ops.rank(x2)), + lambda: np_utils.cond( # pylint: disable=g-long-lambda + np_utils.reduce_all( + math_ops.equal(array_ops.shape(x1), array_ops.shape(x2)) + ), + lambda: math_ops.reduce_all(math_ops.equal(x1, x2)), + lambda: constant_op.constant(False), + ), + lambda: constant_op.constant(False), + ) + + return _comparison(f, a1, a2) + + +def _logical_binary_op(tf_fun, x1, x2): + x1 = np_array_ops.array(x1, dtype=np.bool_) + x2 = np_array_ops.array(x2, dtype=np.bool_) + return tf_fun(x1, x2) + + +@tf_export.tf_export('experimental.numpy.logical_and', v1=[]) +@np_utils.np_doc('logical_and') +def logical_and(x1, x2): + return _logical_binary_op(math_ops.logical_and, x1, x2) + + +@tf_export.tf_export('experimental.numpy.logical_or', v1=[]) +@np_utils.np_doc('logical_or') +def logical_or(x1, x2): + return _logical_binary_op(math_ops.logical_or, x1, x2) + + +@tf_export.tf_export('experimental.numpy.logical_xor', v1=[]) +@np_utils.np_doc('logical_xor') +def logical_xor(x1, x2): + return _logical_binary_op(math_ops.logical_xor, x1, x2) + + +@tf_export.tf_export('experimental.numpy.logical_not', v1=[]) +@np_utils.np_doc('logical_not') +def logical_not(x): + x = np_array_ops.array(x, dtype=np.bool_) + return math_ops.logical_not(x) + + +@tf_export.tf_export('experimental.numpy.linspace', v1=[]) +@np_utils.np_doc('linspace') +def linspace( # pylint: disable=missing-docstring + start, stop, num=50, endpoint=True, retstep=False, dtype=float, axis=0 +): + if dtype: + # In numpy 2.x, the result type of np.linspace is based off of `start` and + # `end`. We mimic the behavior. + if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0': + dtype = np_utils.result_type([start * 1.0, stop * 1.0]) + else: + dtype = np_utils.result_type(dtype) + start = np_array_ops.array(start, dtype=dtype) + stop = np_array_ops.array(stop, dtype=dtype) + if num < 0: + raise ValueError( + 'Argument `num` (number of samples) must be a non-negative integer. ' + f'Received: num={num}' + ) + step = ops.convert_to_tensor(np.nan) + if endpoint: + result = math_ops.linspace(start, stop, num, axis=axis) + if num > 1: + step = (stop - start) / (num - 1) + else: + # math_ops.linspace does not support endpoint=False so we manually handle it + # here. + if num > 0: + step = (stop - start) / num + if num > 1: + new_stop = math_ops.cast(stop, step.dtype) - step + start = math_ops.cast(start, new_stop.dtype) + result = math_ops.linspace(start, new_stop, num, axis=axis) + else: + result = math_ops.linspace(start, stop, num, axis=axis) + if dtype: + if dtype.is_integer: + # Since numpy 1.20, linspace's rounding is towards -inf instead of 0 + result = math_ops.floor(result) + result = math_ops.cast(result, dtype) + if retstep: + return (result, step) + else: + return result + + +@tf_export.tf_export('experimental.numpy.logspace', v1=[]) +@np_utils.np_doc('logspace') +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): + # In numpy 2.x, the result type of np.logspace is based off of `start` and + # `end`. We mimic the behavior. + if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0': + dtype = np_utils.result_type([start * 1.0, stop * 1.0]) + else: + dtype = np_utils.result_type(start, stop, dtype) + result = linspace( + start, stop, num=num, endpoint=endpoint, dtype=dtype, axis=axis + ) + result = math_ops.pow(math_ops.cast(base, result.dtype), result) + if dtype: + result = math_ops.cast(result, dtype) + return result + + +@tf_export.tf_export('experimental.numpy.geomspace', v1=[]) +@np_utils.np_doc('geomspace') +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): # pylint: disable=missing-docstring + dtype = ( + dtypes.as_dtype(dtype) # pylint: disable=g-long-ternary + if dtype + else np_utils.result_type( + start, stop, float(num), np_array_ops.zeros((), dtype) + ) + ) + computation_dtype = np.promote_types(dtype.as_numpy_dtype, np.float32) + start = np_array_ops.asarray(start, dtype=computation_dtype) + stop = np_array_ops.asarray(stop, dtype=computation_dtype) + # follow the numpy geomspace convention for negative and complex endpoints + start_sign = 1 - np_array_ops.sign(np_array_ops.real(start)) + stop_sign = 1 - np_array_ops.sign(np_array_ops.real(stop)) + signflip = 1 - start_sign * stop_sign // 2 + res = signflip * logspace( + log10(signflip * start), + log10(signflip * stop), + num, + endpoint=endpoint, + base=10.0, + dtype=computation_dtype, + axis=0, + ) + if axis != 0: + res = np_array_ops.moveaxis(res, 0, axis) + return math_ops.cast(res, dtype) + + +@tf_export.tf_export('experimental.numpy.ptp', v1=[]) +@np_utils.np_doc('ptp') +def ptp(a, axis=None, keepdims=None): + return np_array_ops.amax(a, axis=axis, keepdims=keepdims) - np_array_ops.amin( + a, axis=axis, keepdims=keepdims + ) + + +@tf_export.tf_export('experimental.numpy.concatenate', v1=[]) +@np_utils.np_doc_only('concatenate') +def concatenate(arys, axis=0): # pylint: disable=missing-function-docstring + if not isinstance(arys, (list, tuple)): + arys = [arys] + if not arys: + raise ValueError( + 'Need at least one array to concatenate. Received empty ' + f'input: arys={arys}' + ) + dtype = np_utils.result_type(*arys) + arys = [np_array_ops.array(array, dtype=dtype) for array in arys] + return array_ops.concat(arys, axis) + + +@tf_export.tf_export('experimental.numpy.tile', v1=[]) +@np_utils.np_doc_only('tile') +def tile(a, reps): # pylint: disable=missing-function-docstring + a = np_array_ops.array(a) + reps = array_ops.reshape(np_array_ops.array(reps, dtype=dtypes.int32), [-1]) + + a_rank = array_ops.rank(a) + reps_size = array_ops.size(reps) + reps = array_ops.pad( + reps, [[math_ops.maximum(a_rank - reps_size, 0), 0]], constant_values=1 + ) + a_shape = array_ops.pad( + array_ops.shape(a), + [[math_ops.maximum(reps_size - a_rank, 0), 0]], + constant_values=1, + ) + a = array_ops.reshape(a, a_shape) + + return array_ops.tile(a, reps) + + +@tf_export.tf_export('experimental.numpy.count_nonzero', v1=[]) +@np_utils.np_doc('count_nonzero') +def count_nonzero(a, axis=None): + return math_ops.count_nonzero(np_array_ops.array(a), axis) + + +@tf_export.tf_export('experimental.numpy.argsort', v1=[]) +@np_utils.np_doc('argsort') +def argsort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring + # TODO(nareshmodi): make string tensors also work. + if kind not in ('quicksort', 'stable'): + raise ValueError( + 'Invalid value for argument `kind`. ' + 'Only kind="quicksort" and kind="stable" are supported. ' + f'Received: kind={kind}' + ) + if order is not None: + raise ValueError('The `order` argument is not supported. Pass order=None') + stable = kind == 'stable' + + a = np_array_ops.array(a) + + def _argsort(a, axis, stable): + if axis is None: + a = array_ops.reshape(a, [-1]) + axis = 0 + + return sort_ops.argsort(a, axis, stable=stable) + + tf_ans = np_utils.cond( + math_ops.equal(array_ops.rank(a), 0), + lambda: constant_op.constant([0]), + lambda: _argsort(a, axis, stable), + ) + + if ops.is_auto_dtype_conversion_enabled(): + return np_array_ops.array(tf_ans, dtype=int) + else: + return np_array_ops.array(tf_ans, dtype=np.intp) + + +@tf_export.tf_export('experimental.numpy.sort', v1=[]) +@np_utils.np_doc('sort') +def sort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring + if kind != 'quicksort': + raise ValueError( + 'Invalid value for argument `kind`. ' + 'Only kind="quicksort" is supported. ' + f'Received: kind={kind}' + ) + if order is not None: + raise ValueError('The `order` argument is not supported. Pass order=None') + + a = np_array_ops.array(a) + + if axis is None: + return sort_ops.sort(array_ops.reshape(a, [-1]), 0) + else: + return sort_ops.sort(a, axis) + + +def _argminmax(fn, a, axis=None): + a = np_array_ops.array(a) + if axis is None: + # When axis is None numpy flattens the array. + a_t = array_ops.reshape(a, [-1]) + else: + a_t = np_array_ops.atleast_1d(a) + return fn(input=a_t, axis=axis) + + +@tf_export.tf_export('experimental.numpy.argmax', v1=[]) +@np_utils.np_doc('argmax') +def argmax(a, axis=None): + return _argminmax(math_ops.argmax, a, axis) + + +@tf_export.tf_export('experimental.numpy.argmin', v1=[]) +@np_utils.np_doc('argmin') +def argmin(a, axis=None): + return _argminmax(math_ops.argmin, a, axis) + + +@tf_export.tf_export('experimental.numpy.append', v1=[]) +@np_utils.np_doc('append') +def append(arr, values, axis=None): + if axis is None: + return concatenate([np_array_ops.ravel(arr), np_array_ops.ravel(values)], 0) + else: + return concatenate([arr, values], axis=axis) + + +@tf_export.tf_export('experimental.numpy.average', v1=[]) +@np_utils.np_doc('average') +def average(a, axis=None, weights=None, returned=False): # pylint: disable=missing-docstring + if axis is not None and not isinstance(axis, int): + # TODO(wangpeng): Support tuple of ints as `axis` + raise ValueError( + 'Argument `axis` must be an integer. ' + f'Received axis={axis} (of type {type(axis)})' + ) + a = np_array_ops.array(a) + default_float_type = np_utils.result_type(float) + if weights is None: # Treat all weights as 1 + if not np.issubdtype(a.dtype.as_numpy_dtype, np.inexact): + a = a.astype(np_utils.result_type(a.dtype, default_float_type)) + avg = math_ops.reduce_mean(a, axis=axis) + if returned: + if axis is None: + weights_sum = array_ops.size(a) + else: + weights_sum = array_ops.shape(a)[axis] + weights_sum = math_ops.cast(weights_sum, a.dtype) + else: + if np.issubdtype(a.dtype.as_numpy_dtype, np.inexact): + out_dtype = np_utils.result_type(a.dtype, weights) + else: + out_dtype = np_utils.result_type(a.dtype, weights, default_float_type) + a = np_array_ops.array(a, out_dtype) + weights = np_array_ops.array(weights, out_dtype) + + def rank_equal_case(): + control_flow_assert.Assert( + math_ops.reduce_all(array_ops.shape(a) == array_ops.shape(weights)), + [array_ops.shape(a), array_ops.shape(weights)], + ) + weights_sum = math_ops.reduce_sum(weights, axis=axis) + avg = math_ops.reduce_sum(a * weights, axis=axis) / weights_sum + return avg, weights_sum + + if axis is None: + avg, weights_sum = rank_equal_case() + else: + + def rank_not_equal_case(): + control_flow_assert.Assert( + array_ops.rank(weights) == 1, [array_ops.rank(weights)] + ) + weights_sum = math_ops.reduce_sum(weights) + axes = ops.convert_to_tensor([[axis], [0]]) + avg = math_ops.tensordot(a, weights, axes) / weights_sum + return avg, weights_sum + + # We condition on rank rather than shape equality, because if we do the + # latter, when the shapes are partially unknown but the ranks are known + # and different, np_utils.cond will run shape checking on the true branch, + # which will raise a shape-checking error. + avg, weights_sum = np_utils.cond( + math_ops.equal(array_ops.rank(a), array_ops.rank(weights)), + rank_equal_case, + rank_not_equal_case, + ) + + avg = np_array_ops.array(avg) + if returned: + weights_sum = np_array_ops.broadcast_to(weights_sum, array_ops.shape(avg)) + return avg, weights_sum + return avg + + +@tf_export.tf_export('experimental.numpy.trace', v1=[]) +@np_utils.np_doc('trace') +def trace(a, offset=0, axis1=0, axis2=1, dtype=None): # pylint: disable=missing-docstring + if dtype: + dtype = np_utils.result_type(dtype) + a = np_array_ops.asarray(a, dtype) + + if offset == 0: + a_shape = a.shape + if a_shape.rank is not None: + rank = len(a_shape) + if (axis1 == -2 or axis1 == rank - 2) and ( + axis2 == -1 or axis2 == rank - 1 + ): + return math_ops.trace(a) + + a = np_array_ops.diagonal(a, offset, axis1, axis2) + return np_array_ops.sum(a, -1, dtype) + + +@tf_export.tf_export('experimental.numpy.meshgrid', v1=[]) +@np_utils.np_doc('meshgrid') +def meshgrid(*xi, **kwargs): + """This currently requires copy=True and sparse=False.""" + sparse = kwargs.get('sparse', False) + if sparse: + raise ValueError( + 'Function `meshgrid` does not support returning sparse arrays yet. ' + f'Received: sparse={sparse}' + ) + + copy = kwargs.get('copy', True) + if not copy: + raise ValueError( + f'Function `meshgrid` only supports copy=True. Received: copy={copy}' + ) + + indexing = kwargs.get('indexing', 'xy') + + xi = [np_array_ops.asarray(arg) for arg in xi] + kwargs = {'indexing': indexing} + + outputs = array_ops.meshgrid(*xi, **kwargs) + + return outputs + + +# Uses np_doc_only here because np.einsum (in 1.16) doesn't have argument +# `subscripts`, even though the doc says it has. +@tf_export.tf_export('experimental.numpy.einsum', v1=[]) +@np_utils.np_doc_only('einsum') +def einsum(subscripts, *operands, **kwargs): # pylint: disable=missing-docstring + casting = kwargs.get('casting', 'safe') + optimize = kwargs.get('optimize', False) + if casting == 'safe': + operands = np_array_ops._promote_dtype(*operands) # pylint: disable=protected-access + elif casting == 'no': + operands = [np_array_ops.asarray(x) for x in operands] + else: + raise ValueError( + 'Invalid value for argument `casting`. ' + f'Expected casting="safe" or casting="no". Received: casting={casting}' + ) + if not optimize: + # TF doesn't have a "no optimization" option. + # TODO(wangpeng): Print a warning that np and tf use different + # optimizations. + tf_optimize = 'greedy' + elif optimize == True: # pylint: disable=singleton-comparison,g-explicit-bool-comparison + tf_optimize = 'greedy' + elif optimize == 'greedy': + tf_optimize = 'greedy' + elif optimize == 'optimal': + tf_optimize = 'optimal' + else: + raise ValueError( + 'Invalid value for argument `optimize`. ' + 'Expected one of {True, "greedy", "optimal"}. ' + f'Received: optimize={optimize}' + ) + + res = special_math_ops.einsum(subscripts, *operands, optimize=tf_optimize) + return res + + +def _tensor_t(self): + """Returns a Tensor which is the transpose of this Tensor.""" + return self.transpose() + + +def _tensor_ndim(self): + """Returns the rank of the Tensor.""" + return self.shape.ndims + + +def _tensor_pos(self): + """Returns self, for unary operator `+`.""" + return self + + +def _tensor_size(self): + """Returns the number of elements in this Tensor, if fully known.""" + if not self.shape.is_fully_defined(): + return None + return np.prod(self.shape.as_list()) + + +def _tensor_tolist(self): + if ops.is_symbolic_tensor(self): + raise ValueError('Symbolic Tensors do not support the tolist API.') + + return self._numpy().tolist() # pylint: disable=protected-access + + +def _enable_numpy_methods(tensor_class): + """A helper method for adding additional NumPy methods.""" + t = property(_tensor_t) + setattr(tensor_class, 'T', t) + + ndim = property(_tensor_ndim) + setattr(tensor_class, 'ndim', ndim) + + size = property(_tensor_size) + setattr(tensor_class, 'size', size) + + setattr(tensor_class, '__pos__', _tensor_pos) + setattr(tensor_class, 'tolist', _tensor_tolist) + + # TODO(b/178540516): Make a custom `setattr` that changes the method's + # docstring to the TF one. + setattr(tensor_class, 'transpose', np_array_ops.transpose) + setattr(tensor_class, 'flatten', np_array_ops.flatten) + setattr(tensor_class, 'reshape', np_array_ops._reshape_method_wrapper) # pylint: disable=protected-access + setattr(tensor_class, 'ravel', np_array_ops.ravel) + setattr(tensor_class, 'clip', clip) + setattr(tensor_class, 'astype', math_ops.cast) + setattr(tensor_class, '__round__', np_array_ops.around) + setattr(tensor_class, 'max', np_array_ops.amax) + setattr(tensor_class, 'mean', np_array_ops.mean) + setattr(tensor_class, 'min', np_array_ops.amin) + + # TODO(wangpeng): Remove `data` when all uses of it are removed + data = property(lambda self: self) + setattr(tensor_class, 'data', data) + + +def enable_numpy_methods_on_tensor(): + """Adds additional NumPy methods on tf.Tensor class.""" + _enable_numpy_methods(tensor.Tensor) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_random.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_random.py new file mode 100644 index 0000000000000000000000000000000000000000..5c4c6661d007e6e31be85f1c9e80f85e71024607 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_random.py @@ -0,0 +1,137 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Random functions.""" + +# pylint: disable=g-direct-tensorflow-import + +import numpy as onp + +from tensorflow.python.framework import random_seed +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.numpy_ops import np_array_ops +from tensorflow.python.ops.numpy_ops import np_dtypes +from tensorflow.python.ops.numpy_ops import np_utils +from tensorflow.python.util import tf_export + +# TODO(agarwal): deprecate this. +DEFAULT_RANDN_DTYPE = onp.float32 + + +@tf_export.tf_export('experimental.numpy.random.seed', v1=[]) +@np_utils.np_doc('random.seed') +def seed(s): + """Sets the seed for the random number generator. + + Uses `tf.set_random_seed`. + + Args: + s: an integer. + """ + try: + s = int(s) + except TypeError: + # TODO(wangpeng): support this? + raise ValueError( + f'Argument `s` got an invalid value {s}. Only integers are supported.' + ) + random_seed.set_seed(s) + + +@tf_export.tf_export('experimental.numpy.random.randn', v1=[]) +@np_utils.np_doc('random.randn') +def randn(*args): + """Returns samples from a normal distribution. + + Uses `tf.random_normal`. + + Args: + *args: The shape of the output array. + + Returns: + An ndarray with shape `args` and dtype `float64`. + """ + return standard_normal(size=args) + + +@tf_export.tf_export('experimental.numpy.random.standard_normal', v1=[]) +@np_utils.np_doc('random.standard_normal') +def standard_normal(size=None): + # TODO(wangpeng): Use new stateful RNG + if size is None: + size = () + elif np_utils.isscalar(size): + size = (size,) + dtype = np_utils.result_type(float) + return random_ops.random_normal(size, dtype=dtype) + + +@tf_export.tf_export('experimental.numpy.random.uniform', v1=[]) +@np_utils.np_doc('random.uniform') +def uniform(low=0.0, high=1.0, size=None): + dtype = np_utils.result_type(float) + low = np_array_ops.asarray(low, dtype=dtype) + high = np_array_ops.asarray(high, dtype=dtype) + if size is None: + size = array_ops.broadcast_dynamic_shape(low.shape, high.shape) + return random_ops.random_uniform( + shape=size, minval=low, maxval=high, dtype=dtype + ) + + +@tf_export.tf_export('experimental.numpy.random.poisson', v1=[]) +@np_utils.np_doc('random.poisson') +def poisson(lam=1.0, size=None): + if size is None: + size = () + elif np_utils.isscalar(size): + size = (size,) + return random_ops.random_poisson(shape=size, lam=lam, dtype=np_dtypes.int_) + + +@tf_export.tf_export('experimental.numpy.random.random', v1=[]) +@np_utils.np_doc('random.random') +def random(size=None): + return uniform(0.0, 1.0, size) + + +@tf_export.tf_export('experimental.numpy.random.rand', v1=[]) +@np_utils.np_doc('random.rand') +def rand(*size): + return uniform(0.0, 1.0, size) + + +@tf_export.tf_export('experimental.numpy.random.randint', v1=[]) +@np_utils.np_doc('random.randint') +def randint(low, high=None, size=None, dtype=onp.int64): # pylint: disable=missing-function-docstring + low = int(low) + if high is None: + high = low + low = 0 + if size is None: + size = () + elif isinstance(size, int): + size = (size,) + dtype_orig = dtype + dtype = np_utils.result_type(dtype) + accepted_dtypes = (onp.int32, onp.int64) + if dtype not in accepted_dtypes: + raise ValueError( + f'Argument `dtype` got an invalid value {dtype_orig}. Only those ' + f'convertible to {accepted_dtypes} are supported.' + ) + return random_ops.random_uniform( + shape=size, minval=low, maxval=high, dtype=dtype + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d523ec5128d49df1592362eb06fa3c1e1e4e3a2b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/numpy_ops/np_utils.py @@ -0,0 +1,715 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for internal use.""" +# pylint: disable=g-direct-tensorflow-import + +import inspect +import numbers +import os +import re + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import flexible_dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.numpy_ops import np_arrays +from tensorflow.python.ops.numpy_ops import np_dtypes +from tensorflow.python.types import core +from tensorflow.python.util import nest +from tensorflow.python.util import tf_export + + +def _canonicalize_axis(axis, rank): + return _canonicalize_axes([axis], rank)[0] + + +def _canonicalize_axes(axes, rank): + rank = _maybe_static(rank) + + if isinstance(rank, core.Tensor): + canonicalizer = lambda axis: cond( # pylint: disable=g-long-lambda + axis < 0, lambda: axis + rank, lambda: axis + ) + else: + canonicalizer = lambda axis: axis + rank if axis < 0 else axis + + return [canonicalizer(axis) for axis in axes] + + +def _supports_signature(): + return hasattr(inspect, 'signature') + + +def _to_tf_type(dtype): + """Converts a native python or numpy type to TF DType. + + Args: + dtype: Could be a python type, a numpy type or a TF DType. + + Returns: + A tensorflow `DType`. + """ + return dtypes.as_dtype(dtype) + + +def _to_numpy_type(dtype): + """Converts a native python or TF DType to numpy type. + + Args: + dtype: Could be a python type, a numpy type or a TF DType. + + Returns: + A NumPy `dtype`. + """ + if isinstance(dtype, dtypes.DType): + return dtype.as_numpy_dtype + return np.dtype(dtype) + + +def isscalar(val): + """Returns whether `val` is a scalar value or scalar Tensor.""" + if isinstance(val, np_arrays.ndarray): + val = val.data + if isinstance(val, core.Tensor): + ndims = val.shape.ndims + if ndims is not None: + return ndims == 0 + else: + return math_ops.equal(array_ops.rank(val), 0) + else: + return np.isscalar(val) + + +def _has_docstring(f): + return ( + f and hasattr(f, '__doc__') and isinstance(f.__doc__, str) and f.__doc__ + ) + + +def _add_blank_line(s): + if s.endswith('\n'): + return s + '\n' + else: + return s + '\n\n' + + +def _np_signature(f): + """An enhanced inspect.signature that can handle numpy.ufunc.""" + # TODO(wangpeng): consider migrating away from inspect.signature. + # inspect.signature is supported in Python 3.3. + if not hasattr(inspect, 'signature'): + return None + if f is None: + return None + if not isinstance(f, np.ufunc): + try: + return inspect.signature(f) + except ValueError: + return None + + def names_from_num(prefix, n): + if n <= 0: + return [] + elif n == 1: + return [prefix] + else: + return [prefix + str(i + 1) for i in range(n)] + + input_names = names_from_num('x', f.nin) + output_names = names_from_num('out', f.nout) + keyword_only_params = [ + ('where', True), + ('casting', 'same_kind'), + ('order', 'K'), + ('dtype', None), + ('subok', True), + ('signature', None), + ('extobj', None), + ] + params = [] + params += [ + inspect.Parameter(name, inspect.Parameter.POSITIONAL_ONLY) + for name in input_names + ] + if f.nout > 1: + params += [ + inspect.Parameter(name, inspect.Parameter.POSITIONAL_ONLY, default=None) + for name in output_names + ] + params += [ + inspect.Parameter( + 'out', + inspect.Parameter.POSITIONAL_OR_KEYWORD, + default=None if f.nout == 1 else (None,) * f.nout, + ) + ] + params += [ + inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=default) + for name, default in keyword_only_params + ] + return inspect.Signature(params) + + +# Python 2 doesn't allow keyword-only argument. Python prior to 3.8 doesn't +# allow positional-only argument. So we conflate positional-only, keyword-only +# and positional-or-keyword arguments here. +def _is_compatible_param_kind(a, b): + def relax(k): + if k in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.KEYWORD_ONLY): + return inspect.Parameter.POSITIONAL_OR_KEYWORD + return k + + return relax(a) == relax(b) + + +def _prepare_np_fun_name_and_fun(np_fun_name, np_fun): + """Mutually propagates information between `np_fun_name` and `np_fun`. + + If one is None and the other is not, we'll try to make the former not None in + a best effort. + + Args: + np_fun_name: name for the np_fun symbol. At least one of np_fun or + np_fun_name shoud be set. + np_fun: the numpy function whose docstring will be used. + + Returns: + Processed `np_fun_name` and `np_fun`. + """ + if np_fun_name is not None: + assert isinstance(np_fun_name, str) + if np_fun is not None: + assert not isinstance(np_fun, str) + if np_fun is None: + assert np_fun_name is not None + try: + np_fun = getattr(np, str(np_fun_name)) + except AttributeError: + np_fun = None + if np_fun_name is None: + assert np_fun is not None + np_fun_name = np_fun.__name__ + return np_fun_name, np_fun + + +def _np_doc_helper( + f, np_f, np_fun_name=None, unsupported_params=None, link=None +): + """Helper to get docs.""" + assert np_f or np_fun_name + if not np_fun_name: + np_fun_name = np_f.__name__ + doc = "TensorFlow variant of NumPy's `%s`.\n\n" % np_fun_name + if unsupported_params: + doc += ( + 'Unsupported arguments: ' + + ', '.join('`' + name + '`' for name in unsupported_params) + + '.\n\n' + ) + if _has_docstring(f): + doc += f.__doc__ + doc = _add_blank_line(doc) + # TODO(wangpeng): Re-enable the following and choose inlined vs. link to numpy + # doc according to some global switch. + doc = _add_np_doc(doc, np_fun_name, np_f, link=link) + return doc + + +_np_doc_form = os.getenv('TF_NP_DOC_FORM', 'stable') + + +def get_np_doc_form(): + """Gets the form of the original numpy docstrings. + + Returns: + See `set_np_doc_form` for the list of valid values. + """ + return _np_doc_form + + +def set_np_doc_form(value): + r"""Selects the form of the original numpy docstrings. + + This function sets a global variable that controls how a tf-numpy symbol's + docstring should refer to the original numpy docstring. If `value` is + `'inlined'`, the numpy docstring will be verbatim copied into the tf-numpy + docstring. Otherwise, a link to the original numpy docstring will be + added. Which numpy version the link points to depends on `value`: + * `'stable'`: the current stable version; + * `'dev'`: the current development version; + * pattern `\d+(\.\d+(\.\d+)?)?`: `value` will be treated as a version number, + e.g. '1.16'. + + Args: + value: the value to set the global variable to. + """ + global _np_doc_form + _np_doc_form = value + + +class Link: + + def __init__(self, v): + self.value = v + + +class AliasOf: + + def __init__(self, v): + self.value = v + + +class NoLink: + pass + + +def generate_link(flag, np_fun_name): + """Generates link from numpy function name. + + Args: + flag: the flag to control link form. See `set_np_doc_form`. + np_fun_name: the numpy function name. + + Returns: + A string. + """ + # Only adds link in this case + if flag == 'dev': + template = 'https://numpy.org/devdocs/reference/generated/numpy.%s.html' + elif flag == 'stable': + template = 'https://numpy.org/doc/stable/reference/generated/numpy.%s.html' + elif re.match(r'\d+(\.\d+(\.\d+)?)?$', flag): + # `flag` is the version number + template = f'https://numpy.org/doc/{flag}/reference/generated/numpy.%s.html' + else: + return None + return template % np_fun_name + + +_is_check_link = os.getenv('TF_NP_CHECK_LINK', 'False') in ('True', 'true', '1') + + +def is_check_link(): + return _is_check_link + + +def set_check_link(value): + global _is_check_link + _is_check_link = value + + +def _add_np_doc(doc, np_fun_name, np_f, link): + """Appends the numpy docstring to `doc`, according to `set_np_doc_form`. + + See `set_np_doc_form` for how it controls the form of the numpy docstring. + + Args: + doc: the docstring to be appended to. + np_fun_name: the name of the numpy function. + np_f: (optional) the numpy function. + link: (optional) which link to use. See `np_doc` for details. + + Returns: + `doc` with numpy docstring appended. + """ + flag = get_np_doc_form() + if flag == 'inlined': + if _has_docstring(np_f): + doc += 'Documentation for `numpy.%s`:\n\n' % np_fun_name + # TODO(wangpeng): It looks like code snippets in numpy doc don't work + # correctly with doctest. Fix that and remove the reformatting of the np_f + # comment. + doc += np_f.__doc__.replace('>>>', '>') + elif isinstance(flag, str): + if link is None: + url = generate_link(flag, np_fun_name) + elif isinstance(link, AliasOf): + url = generate_link(flag, link.value) + elif isinstance(link, Link): + url = link.value + else: + url = None + if url is not None: + if is_check_link(): + # Imports locally because some builds may not have `requests` + import requests # pylint: disable=g-import-not-at-top + + r = requests.head(url) + if r.status_code != 200: + raise ValueError( + f'Check link failed at [{url}] with status code {r.status_code}. ' + f'Argument `np_fun_name` is {np_fun_name}.' + ) + doc += 'See the NumPy documentation for [`numpy.%s`](%s).' % ( + np_fun_name, + url, + ) + return doc + + +_is_sig_mismatch_an_error = os.getenv( + 'TF_NP_SIG_MISMATCH_IS_ERROR', 'False' +) in ('True', 'true', '1') + + +def is_sig_mismatch_an_error(): + return _is_sig_mismatch_an_error + + +def set_is_sig_mismatch_an_error(value): + global _is_sig_mismatch_an_error + _is_sig_mismatch_an_error = value + + +def np_doc(np_fun_name, np_fun=None, unsupported_params=None, link=None): + """Attachs numpy docstring to a function. + + Args: + np_fun_name: name for the np_fun symbol. At least one of np_fun or + np_fun_name shoud be set. + np_fun: (optional) the numpy function whose docstring will be used. + unsupported_params: (optional) the list of parameters not supported by + tf.numpy. + link: (optional) which link to use. If `None`, a default link generated from + `np_fun_name` will be used. If an instance of `AliasOf`, `link.value` will + be used in place of `np_fun_name` for the link generation. If an instance + of `Link`, `link.value` will be used as the whole link. If an instance of + `NoLink`, no link will be added. + + Returns: + A function decorator that attaches the docstring from `np_fun` to the + decorated function. + """ + np_fun_name_orig, np_fun_orig = np_fun_name, np_fun + np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun) + np_sig = _np_signature(np_fun) + if unsupported_params is None: + unsupported_params = [] + + def decorator(f): + """The decorator.""" + if hasattr(inspect, 'signature') and np_sig is not None: + try: + sig = inspect.signature(f) + except ValueError: + sig = None + if sig is not None: + for name, param in sig.parameters.items(): + np_param = np_sig.parameters.get(name) + if np_param is None: + if is_sig_mismatch_an_error(): + raise TypeError( + f"Cannot find parameter {name} in the numpy function's " + 'signature (which has these parameters: ' + f'{list(np_sig.parameters.keys())}). Argument `np_fun_name` ' + f'is {np_fun_name_orig}. Argument `np_fun` is {np_fun_orig}.' + ) + else: + continue + if is_sig_mismatch_an_error() and not _is_compatible_param_kind( + param.kind, np_param.kind + ): + raise TypeError( + f'Parameter {name} is of kind {param.kind} while in numpy it ' + f'is of kind {np_param.kind}. Argument `np_fun_name` is ' + f'{np_fun_name_orig}. Argument `np_fun` is {np_fun_orig}.' + ) + has_default = param.default != inspect.Parameter.empty + np_has_default = np_param.default != inspect.Parameter.empty + if is_sig_mismatch_an_error() and has_default != np_has_default: + raise TypeError( + 'Parameter {} should{} have a default value. Argument ' + '`np_fun_name` is {}. Argument `np_fun` is {}.'.format( + name, + '' if np_has_default else ' not', + np_fun_name_orig, + np_fun_orig, + ) + ) + for name in np_sig.parameters: + if name not in sig.parameters: + unsupported_params.append(name) + f.__doc__ = _np_doc_helper( + f, + np_fun, + np_fun_name=np_fun_name, + unsupported_params=unsupported_params, + link=link, + ) + return f + + return decorator + + +def np_doc_only(np_fun_name, np_fun=None): + """Attachs numpy docstring to a function. + + This differs from np_doc in that it doesn't check for a match in signature. + + Args: + np_fun_name: name for the np_fun symbol. At least one of np_fun or + np_fun_name shoud be set. + np_fun: (optional) the numpy function whose docstring will be used. + + Returns: + A function decorator that attaches the docstring from `np_fun` to the + decorated function. + """ + np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun) + + def decorator(f): + f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name) + return f + + return decorator + + +# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-docstring-missing-newline,g-doc-return-or-yield,g-doc-args +@tf_export.tf_export('experimental.numpy.finfo', v1=[]) +@np_doc('finfo') +def finfo(dtype): + """Note that currently it just forwards to the numpy namesake, while + + tensorflow and numpy dtypes may have different properties. + """ + return np.finfo(_to_numpy_type(dtype)) + + +# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-docstring-missing-newline,g-doc-return-or-yield,g-doc-args + + +def _maybe_get_dtype(x): + """Returns a numpy type if available from x. Skips if x is numpy.ndarray.""" + # Don't put np.ndarray in this list, because np.result_type looks at the + # value (not just dtype) of np.ndarray to decide the result type. + if isinstance(x, numbers.Real): + return x + if isinstance(x, indexed_slices.IndexedSlices) or tensor_util.is_tf_type(x): + return _to_numpy_type(x.dtype) + if isinstance(x, dtypes.DType): + return x.as_numpy_dtype + if isinstance(x, (list, tuple)): + raise ValueError( + 'Cannot find dtype for type inference from argument `x` of a sequence ' + f'type {type(x)}. For sequences, please call this function on each ' + 'element individually.' + ) + return x + + +@tf_export.tf_export('experimental.numpy.result_type', v1=[]) +# Can't use np_doc because np.result_type is a builtin function. +@np_doc_only('result_type') +def result_type(*arrays_and_dtypes): # pylint: disable=missing-function-docstring + if ops.is_auto_dtype_conversion_enabled(): + # Use auto dtype conversion semantics for type inference. + dtype, _ = flexible_dtypes.result_type(*arrays_and_dtypes) + return dtype + arrays_and_dtypes = [ + _maybe_get_dtype(x) for x in nest.flatten(arrays_and_dtypes) + ] + if not arrays_and_dtypes: + # If arrays_and_dtypes is an empty list, let numpy decide what the dtype is. + arrays_and_dtypes = [np.asarray([])] + return np_dtypes._result_type(*arrays_and_dtypes) # pylint: disable=protected-access + + +def result_type_unary(a, dtype): # pylint: disable=missing-function-docstring + """Find the result type from a single input and a dtype.""" + if dtype: + # We need to let np_utils.result_type decide the dtype, not tf.zeros_like + return result_type(dtype) + + # np_utils.result_type treats string inputs as dtype strings, not as strings. + # but for unary we want to treat it as a string input. + if isinstance(a, str): + return np.str_ + elif isinstance(a, bytes): + return np.bytes_ + + # TF and numpy has different interpretations of Python types such as + # `float`, so we let `np_utils.result_type` decide. + return result_type(a) + + +def _result_type_binary(t1, t2): # pylint: disable=missing-function-docstring + """A specialization of result_type for 2 arguments for performance reasons.""" + try: + return np_dtypes._result_type( # pylint: disable=protected-access + _maybe_get_dtype(t1), + _maybe_get_dtype(t2), + ) + except ValueError: + return result_type(t1, t2) + + +@tf_export.tf_export('experimental.numpy.promote_types', v1=[]) +@np_doc('promote_types') +def promote_types(type1, type2): # pylint: disable=missing-function-docstring + type1 = _to_numpy_type(type1) + type2 = _to_numpy_type(type2) + return np_dtypes.canonicalize_dtype(np.promote_types(type1, type2)) + + +def tf_broadcast(*args): + """Broadcast tensors. + + Args: + *args: a list of tensors whose shapes are broadcastable against each other. + + Returns: + Tensors broadcasted to the common shape. + """ + if len(args) <= 1: + return args + sh = array_ops.shape(args[0]) + for arg in args[1:]: + sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg)) + return [array_ops.broadcast_to(arg, sh) for arg in args] + + +# TODO(wangpeng): Move the following functions to a separate file and check for +# float dtypes in each of them. + + +def get_static_value(x): + """A version of tf.get_static_value that returns None on float dtypes. + + It returns None on float dtypes in order to avoid breaking gradients. + + Args: + x: a tensor. + + Returns: + Same as `tf.get_static_value`, except that it returns None when `x` has a + float dtype. + """ + if isinstance(x, core.Tensor) and (x.dtype.is_floating or x.dtype.is_complex): + return None + return tensor_util.constant_value(x) + + +def _maybe_static(x): + value = get_static_value(x) + if value is None: + return x + else: + return value + + +# All the following functions exist becaues get_static_value can't handle +# their TF counterparts. + + +def cond(pred, true_fn, false_fn): + """A version of tf.cond that tries to evaluate the condition.""" + v = get_static_value(pred) + if v is None: + return tf_cond.cond(pred, true_fn, false_fn) + if v: + return true_fn() + else: + return false_fn() + + +def add(a, b): + """A version of tf.add that eagerly evaluates if possible.""" + return _maybe_static(a) + _maybe_static(b) + + +def subtract(a, b): + """A version of tf.subtract that eagerly evaluates if possible.""" + return _maybe_static(a) - _maybe_static(b) + + +def greater(a, b): + """A version of tf.greater that eagerly evaluates if possible.""" + return _maybe_static(a) > _maybe_static(b) + + +def greater_equal(a, b): + """A version of tf.greater_equal that eagerly evaluates if possible.""" + return _maybe_static(a) >= _maybe_static(b) + + +def less_equal(a, b): + """A version of tf.less_equal that eagerly evaluates if possible.""" + return _maybe_static(a) <= _maybe_static(b) + + +def logical_and(a, b): + """A version of tf.logical_and that eagerly evaluates if possible.""" + a_value = get_static_value(a) + if a_value is not None: + if np.isscalar(a_value): + if a_value: + return _maybe_static(b) + else: + return a_value + else: + return a_value & _maybe_static(b) + else: + return a & _maybe_static(b) + + +def logical_or(a, b): + """A version of tf.logical_or that eagerly evaluates if possible.""" + a_value = get_static_value(a) + if a_value is not None: + if np.isscalar(a_value): + if a_value: + return a_value + else: + return _maybe_static(b) + else: + return a_value | _maybe_static(b) + else: + return a | _maybe_static(b) + + +def getitem(a, slice_spec): + """A version of __getitem__ that eagerly evaluates if possible.""" + return _maybe_static(a)[slice_spec] + + +def reduce_all(input_tensor, axis=None, keepdims=False): + """A version of tf.reduce_all that eagerly evaluates if possible.""" + v = get_static_value(input_tensor) + if v is None: + return math_ops.reduce_all(input_tensor, axis=axis, keepdims=keepdims) + else: + return v.all(axis=axis, keepdims=keepdims) + + +def reduce_any(input_tensor, axis=None, keepdims=False): + """A version of tf.reduce_any that eagerly evaluates if possible.""" + v = get_static_value(input_tensor) + if v is None: + return math_ops.reduce_any(input_tensor, axis=axis, keepdims=keepdims) + else: + return v.any(axis=axis, keepdims=keepdims) + + +def tf_rank(t): + r = t.shape.rank + if r is not None: + return r + return array_ops.rank(t) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..457e54641c6953a605ba89b462d6e896b9792c1e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ragged Tensors. + +This package defines ops for manipulating ragged tensors (`tf.RaggedTensor`), +which are tensors with non-uniform shapes. In particular, each `RaggedTensor` +has one or more *ragged dimensions*, which are dimensions whose slices may have +different lengths. For example, the inner (column) dimension of +`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices +(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed +description of ragged tensors, see the `tf.RaggedTensor` class documentation +and the [Ragged Tensor Guide](/guide/ragged_tensor). + +API docstring: tensorflow.ragged +""" +from tensorflow.python.ops.ragged import ragged_tensor diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d151940b127e521367edcd0a41ac159575b9b5a9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/dynamic_ragged_shape.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/dynamic_ragged_shape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9325a1a298ed2bc2dec92293f541d59514acb5b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/dynamic_ragged_shape.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_array_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e9a74dbe92cd666279271c91c8b4d95add1b557 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_array_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_autograph.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_autograph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d34600d0b84b4b9bcd32cfb69c40902cbb25f7eb Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_autograph.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_batch_gather_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_batch_gather_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb0ba2f9573a319daf6aec13e481be4046c1c2f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_batch_gather_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_batch_gather_with_default_op.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_batch_gather_with_default_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e758a773de8a7e68bd26a461d6373a8323141aa Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_batch_gather_with_default_op.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_bincount_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_bincount_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde3ceb281c6a5fcb9e60d356e42648497e928d2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_bincount_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_check_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_check_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a02512f8574418cfdbb7f0ce7f4af8057d9c79 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_check_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_concat_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_concat_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75ab36ad33214a4bf311a328c088d43ccee6f507 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_concat_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_config.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f3d3b4e1ed6c04ca83740327b5b0384ccc56e7a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_config.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_conversion_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_conversion_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e836f34daa8ac41e76c977db197be18101a324c9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_conversion_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_dispatch.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ce05f340e9aaca10d3d50d8e69ffd3f92f0c2fa Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_dispatch.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_embedding_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_embedding_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f757527ab41edafe0e7d458feda8f69ba6ec6f92 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_embedding_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_factory_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_factory_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cc1f16a0bd645539be4e4677fc63beaf8a27376 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_factory_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_functional_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_functional_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..808d33381e7296e25fb1cb34d305e564bf0d7ddd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_functional_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_gather_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_gather_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80e69ee0e543506ea2fe435128cdd6a89b700e92 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_gather_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_getitem.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_getitem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a22d21c3448c2a995dd93591cb8823ed3b95d54 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_getitem.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_image_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_image_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66174ba5170c7f8d11fafdd06a96dc5c9d75e2b2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_image_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_map_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_map_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bece84207b0040c0085587bdcbf124b386e67996 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_map_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_math_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_math_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a830896f6ea60689418ad4ac04bda1264d21fd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_math_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_operators.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08d06418d87ea3c6fa2d1bcc79043be2700a783a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_operators.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dfbeaeee0823a0e701a2044235a603ffe045be9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_squeeze_op.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_squeeze_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4012d909e885736d85b8d9cb0bf4c47ffa8e7d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_squeeze_op.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_string_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_string_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c318174c962d7d29dd729a3b27419b5cace23bc5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_string_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02a3d99b2a1b7db857b3571249d3493acdbcfd4d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_shape.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_shape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b64cbe790d73b62e9088eadd96a42887c32ae3f0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_shape.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_test_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_test_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72b083055c0c9a1016d026c1ae7fd798a0ca3d16 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_test_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_value.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_value.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0681f2c96e30fddebe5eae7efc03d0d3320293b8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor_value.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d32dce689f01b59c0bc24e3d5a792e2c815978cc Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_where_op.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_where_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b44738bf83cbacec7900c60e89ac0fb9914dc1e0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_where_op.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/row_partition.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/row_partition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ca74ddfa0291d826cc6ec73fadb5ec496b7d2a9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/row_partition.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/segment_id_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/segment_id_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4231bb3e841ce27fac7da977c31ae5eec419d09b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/segment_id_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/dynamic_ragged_shape.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/dynamic_ragged_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..30b0534accb95d353baa4b2ad54edcf7d030ba07 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/dynamic_ragged_shape.py @@ -0,0 +1,3292 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Shapes & broadcasting for RaggedTensors. + +TODO(martinz): make this suitable for output for tf.shape +TODO(martinz): replace ragged_tensor_shape with this. +""" + +import abc +from typing import Any, Iterable, Optional, Sequence, Tuple, Union + +import numpy as np +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import extension_type +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.ops.ragged.row_partition import RowPartitionSpec +from tensorflow.python.types import core +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +class _DynamicRaggedShapeBatchEncoder(extension_type.ExtensionTypeBatchEncoder): + """A batch encoder for DynamicRaggedShape below.""" + + def batch(self, spec: "DynamicRaggedShape.Spec", + batch_size) -> "DynamicRaggedShape.Spec": + if spec.num_row_partitions: + new_head = _batch_rp_spec_head(spec._row_partitions[0], batch_size) # pylint:disable=protected-access + new_tail = [_batch_rp_spec(rp, batch_size) for rp in spec._row_partitions] # pylint:disable=protected-access + new_rp = [new_head] + new_tail + new_static_inner_shape = _batch_static_inner_shape( + spec._static_inner_shape, batch_size) # pylint:disable=protected-access + + return DynamicRaggedShape.Spec( + row_partitions=new_rp, + static_inner_shape=new_static_inner_shape, + dtype=spec.dtype) + elif batch_size is None: + if spec.inner_rank == 0: + return DynamicRaggedShape.Spec._from_tensor_shape( # pylint:disable=protected-access + [None], + 0, + dtype=spec.dtype) + else: + # Might be None + new_head = RowPartitionSpec( + uniform_row_length=spec._dimension(0), # pylint:disable=protected-access + dtype=spec.dtype) + new_static_inner_shape = _batch_static_inner_shape( + spec._static_inner_shape, batch_size) # pylint:disable=protected-access + return DynamicRaggedShape.Spec( + row_partitions=[new_head], + static_inner_shape=new_static_inner_shape, + dtype=spec.dtype) + else: + + return DynamicRaggedShape.Spec( + row_partitions=[], + static_inner_shape=_batch_tensor_shape( + spec._static_inner_shape, # pylint:disable=protected-access + batch_size), + dtype=spec.dtype) + + def unbatch(self, + spec: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape.Spec": + if spec.num_row_partitions: + result = [] + head = spec._row_partitions[0] # pylint:disable=protected-access + scale = None if head.uniform_row_length is None else head.nrows + + for rp in spec._row_partitions[1:]: # pylint:disable=protected-access + if scale is None: + result.append( + RowPartitionSpec( + nrows=None, + nvals=None, + uniform_row_length=rp.uniform_row_length, + dtype=spec.dtype)) + else: + nrows = None if rp.nrows is None else rp.nrows // scale + if rp.uniform_row_length is None: + scale = None + result.append( + RowPartitionSpec( + nrows=nrows, + nvals=None, + uniform_row_length=None, + dtype=spec.dtype)) + else: + result.append( + RowPartitionSpec( + nrows=nrows, + nvals=rp.nvals // scale, + uniform_row_length=rp.uniform_row_length, + dtype=spec.dtype)) + return DynamicRaggedShape.Spec( + row_partitions=result, + static_inner_shape=_unbatch_static_inner_shape( + spec._static_inner_shape, scale), # pylint:disable=protected-access + dtype=spec.dtype) + else: # spec.num_row_partitions == 0 + return DynamicRaggedShape.Spec( + row_partitions=[], + static_inner_shape=spec._static_inner_shape[1:], # pylint:disable=protected-access + dtype=spec.dtype) + + def decode(self, spec: "DynamicRaggedShape.Spec", + encoding) -> "DynamicRaggedShape": + return DynamicRaggedShape.from_tensor(encoding, dtype=spec.dtype) + + def encode( + self, + spec: "DynamicRaggedShape.Spec", + value, + minimum_rank=0) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]: + return ones(value, dtype=dtypes.bool) + + def encoding_specs( + self, spec: "DynamicRaggedShape.Spec" + ) -> Union[ragged_tensor.RaggedTensorSpec, tensor_lib.TensorSpec]: + if spec.rank != 0: + ragged_rank = spec.num_row_partitions + else: + # special case: need to unbatch twice to get ragged tensor. + ragged_rank = -1 + return ragged_tensor.RaggedTensorSpec( + shape=spec._to_tensor_shape(), # pylint:disable=protected-access + dtype=dtypes.bool, + ragged_rank=ragged_rank, + row_splits_dtype=spec.dtype) + + +# TODO(martinz): allow inner_shape to be a fully defined TensorShape. +# A "fully defined TensorShape" means one where the rank and all dimensions are +# known. +# Allowing inner_shape might mean allowing inner_shape to be initialized by +# a fully defined TensorShape, or it might mean that you can actually store +# TensorShape in the inner_shape field. This could conceivably construct +# a DynamicRaggedShape that was dtype agnostic. +# +# TODO(martinz): unify the impl of the determination of index type across +# RowPartition and DynamicRaggedShape. +@tf_export("experimental.DynamicRaggedShape") +class DynamicRaggedShape(extension_type.BatchableExtensionType): + """The shape of a ragged or dense tensor. + + Ragged shapes are encoded using two fields: + + * `inner_shape`: An integer vector giving the shape of a dense tensor. + * `row_partitions`: A list of `RowPartition` objects, describing how + that flat shape should be partitioned to add ragged axes. + + If a DynamicRaggedShape is the shape of a RaggedTensor rt, then: + 1. row_partitions = rt._nested_row_partitions + (and thus len(row_partitions) > 0) + 2. inner_shape is the shape of rt.flat_values + + If a DynamicRaggedShape is the shape of a dense tensor t, then: + 1. row_partitions = [] + 2. inner_shape is the shape of t. + + Examples: + + The following table gives a few examples (where `RP(lengths)` is short + for `RowPartition.from_lengths(lengths)`): + + Row Partitions | Inner Shape | Example Tensor + --------------------------- | ------------ | ---------------------------- + [] | [2, 3] | `[[1, 2, 3], [4, 5, 6]]` + [RP([2, 0, 3])] | [5] | `[[1, 2], [], [3, 4, 5]]` + [RP([2, 1])] | [3, 2] | `[[[1, 2], [3, 4]], [[5, 6]]]` + [RP([2, 1]), RP([2, 1, 2])] | [5] | `[[[1, 2], [3]], [[4, 5]]]` + """ + _row_partitions: Tuple[RowPartition, ...] + _inner_shape: tensor_lib.Tensor + _static_inner_shape: tensor_shape.TensorShape + __batch_encoder__ = _DynamicRaggedShapeBatchEncoder() + __name__ = "tf.DynamicRaggedShape" + + def __init__(self, + row_partitions: Sequence[RowPartition], + inner_shape: core.TensorLike, + dtype: Optional[dtypes.DType] = None, + validate: bool = False, + static_inner_shape: ... = None): + """Core constructor for a DynamicRaggedShape. + + Create a DynamicRaggedShape. This can be used to construct a + DynamicRaggedShape representing a ragged or dense shape. If row_partitions + is an empty list, then this is equivalent to a dense shape. + + If row_partitions is specified, then the num_row_partitions will be equal + to len(row_partitions). There are several checks made. + Specifically: + 1. Consecutive row_partitions must have consistent nvals and nrows. + 2. The last row_partitions must have nvals equal to the first element of + inner_shape. + + The inner_shape is converted to a tensor. + All row_partitions and the inner_shape are converted to the same dtype + (int64 or int32). + + Args: + row_partitions: the row_partitions of the shape. + inner_shape: if len(row_partitions) > 0, the shape of the flat_values. + Otherwise, the shape of the tensor. + dtype: tf.int64, tf.int32, or None representing the preferred dtype. + validate: if true, dynamic validation is applied to the shape. + static_inner_shape: if len(row_partitions) > 0, the static shape of the + flat_values. Otherwise, the static shape of the tensor. Should be + convertible to a TensorShape. + """ + if not isinstance(row_partitions, Iterable): + raise TypeError( + "row_partitions should be a list of row partitions. Instead, got " + + str(row_partitions)) + for x in row_partitions: + if not isinstance(x, RowPartition): + raise TypeError("row_partitions contains " + str(x) + + " which is not a RowPartition") + dtype = _find_dtype_iterable(row_partitions, dtype) + dtype = _find_dtype(inner_shape, dtype) + if (isinstance(inner_shape, np.ndarray) and + inner_shape.dtype == np.int32 and dtype is None): + dtype = dtypes.int32 + dtype = _find_dtype(dtypes.int64, dtype) + + row_partitions = tuple([rp.with_dtype(dtype) for rp in row_partitions]) + self._row_partitions = row_partitions + self._inner_shape = ops.convert_to_tensor( + inner_shape, dtype_hint=dtype, name="inner_dim_sizes") + if self._inner_shape.dtype != dtype: + self._inner_shape = math_ops.cast(self._inner_shape, dtype) + + checks = [] + # Validate shapes. + if self._row_partitions: + for axis, rp in enumerate(self._row_partitions): + if axis > 0: + previous_row_partition = self._row_partitions[axis - 1] + msg = ("RowPartitions in DynamicRaggedShape do not align " + f"between {axis - 1} and {axis}") + static_nrows = rp.static_nrows + static_nvals = previous_row_partition.static_nvals + if (static_nrows is not None) and (static_nvals is not None): + if static_nrows != static_nvals: + raise ValueError(msg) + else: + continue + if validate: + checks.append( + check_ops.assert_equal( + previous_row_partition.nvals(), rp.nrows(), message=msg)) + + self._inner_shape.shape.assert_has_rank(1) + + self._static_inner_shape = tensor_util.constant_value_as_shape( + self._inner_shape) + if static_inner_shape is not None: + self._static_inner_shape = self._static_inner_shape.merge_with( + static_inner_shape) + + if row_partitions: + last_row_partition = row_partitions[-1] + static_nvals = last_row_partition.static_nvals + static_inner_shape_nvals = tensor_shape.dimension_value( + self._static_inner_shape[0]) + if static_nvals is not None and static_inner_shape_nvals is not None: + if static_nvals != static_inner_shape_nvals: + raise ValueError("Last row partition does not match inner_shape.") + elif validate: + checks.append( + check_ops.assert_equal( + last_row_partition.nvals(), + self._inner_shape[0], + message="Last row partition does not match inner_shape.")) + if checks: + self._inner_shape = control_flow_ops.with_dependencies( + checks, self._inner_shape, name="inner_shape_validated") + self._row_partitions = [ + rp._with_dependencies(checks) for rp in self._row_partitions # pylint: disable=protected-access + ] + + @classmethod + def from_lengths(cls, + lengths: Sequence[Union[Sequence[int], int]], + num_row_partitions=None, + dtype=dtypes.int64): + """Creates a shape with the given lengths and num_row_partitions. + + The lengths can either be a nonnegative int or a list of nonnegative ints. + + If num_row_partitions is None, then the minimal num_row_partitions is used. + + For example, [2, (3, 2)] is the shape of [[0, 0, 0], [0, 0]], and + [2, 2] is the shape of [[0, 0], [0, 0]] + + This chooses the minimal num_row_partitions required (including zero). + + The following table gives a few examples (where `RP(lengths)` is short + for `RowPartition.from_lengths(lengths)`): + + For example: + from_lengths | row_partitions | inner_shape + ---------------------- | --------------------------| ------------- + [] | [] | [] + [2, (3, 2)] | [RP([3, 2])] | [5] + [2, 2] | [] | [2, 2] + [2, (3, 2), 7] | [RP([3, 2])] | [5, 7] + [2, (2, 2), 3] | [RP([2, 2])] | [4, 3] + [2, 2, 3] | [] | [2, 2, 3] + [2, (2, 1), (2, 0, 3)] | [RP(2, 1), RP([2, 0, 3])] | [5] + + If we want the row partitions to end with uniform row partitions, then + we can set num_row_partitions. + + For example, + below URP(3, 12) is RowPartition.from_uniform_row_length(3, 12) + + from_lengths | num_row_partitions | row_partitions | inner_shape + ---------------| -------------------|--------------------------|------------ + [2, (3, 2), 2] | 2 | [RP([3, 2]), URP(2, 10)] | [10] + [2, 2] | 1 | [URP(2, 4)] | [4] + [2, 2, 3] | 0 | [] | [2, 2, 3] + [2, 2, 3] | 1 | [URP(2, 4)] | [4, 3] + [2, 2, 3] | 2 | [URP(2, 4), URP(3, 12)] | [12] + + + + Representing the shapes from init(): + + from_lengths | Tensor Example + ------------------------ | ------------------------------ + `[2, 3]` | `[[1, 2, 3], [4, 5, 6]]` + `[3, (2, 0, 3)]` | `[[1, 2], [], [3, 4, 5]]` + `[2, (2, 1), 2]` | `[[[1, 2], [3, 4]], [[5, 6]]]` + `[2, (2, 1), (2, 1, 2)]` | `[[[1, 2], [3]], [[4, 5]]]` + + Args: + lengths: the lengths of sublists along each axis. + num_row_partitions: the num_row_partitions of the result or None + indicating the minimum number of row_partitions. + dtype: the dtype of the shape (tf.int32 or tf.int64). + + Returns: + a new DynamicRaggedShape + """ + if not isinstance(lengths, list): + raise ValueError("lengths should be a list") + for x in lengths: + if not _is_int_or_tuple_of_ints(x): + raise ValueError( + "element of lengths should be int or tuple of ints: instead %r" % + (x,)) + + if num_row_partitions is None: + # Calculate the minimal num_row_partitions. + is_list = [not isinstance(x, int) for x in lengths] + if any(is_list): + # Last index when not a list. + num_row_partitions = len(is_list) - is_list[-1::-1].index(True) - 1 + else: + num_row_partitions = 0 + + if not isinstance(num_row_partitions, int): + raise ValueError("num_row_partitions should be an int or None") + + if not lengths: + if num_row_partitions > 0: + raise ValueError("num_row_partitions==0 for a scalar shape") + return DynamicRaggedShape([], [], dtype=dtype) + + if not num_row_partitions < len(lengths): + raise ValueError("num_row_partitions should be less than `len(lengths)` " + "if shape is not scalar.") + + if num_row_partitions > 0: + (row_partitions, nvals) = _to_row_partitions_and_nvals_from_lengths( + lengths[:num_row_partitions + 1]) + inner_shape = [nvals] + lengths[num_row_partitions + 1:] + return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype) + else: + return DynamicRaggedShape([], lengths, dtype=dtype) + + @classmethod + def from_row_partitions(cls, row_partitions, dtype=None): + """Create a shape from row_partitions. + + Args: + row_partitions: a nonempty list of RowPartition objects. + dtype: the dtype to use, or None to use the row_partitions dtype. + + Returns: + a DynamicRaggedShape with inner_rank==1. + """ + if not row_partitions: + raise ValueError("row_partitions cannot be empty") + inner_shape = [row_partitions[-1].nvals()] + return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype) + + @classmethod + def _from_inner_shape(cls, inner_shape, dtype=None): + """Create a shape from inner_shape, where num_row_partitions == 0.""" + return DynamicRaggedShape([], inner_shape, dtype=dtype) + + # pylint: disable=protected-access + @classmethod + def from_tensor(cls, t, dtype=None): + """Constructs a ragged shape for a potentially ragged tensor.""" + if ragged_tensor.is_ragged(t): + return DynamicRaggedShape( + t._nested_row_partitions, _flat_values_shape(t), dtype=dtype) + else: + return DynamicRaggedShape._from_inner_shape( + array_ops.shape(t), dtype=dtype) + + @property + def row_partitions(self): + """The row_partitions of the shape.""" + return self._row_partitions + + @property + def num_row_partitions(self): + """The number of row_partitions of the shape.""" + return len(self._row_partitions) + + @property + def dtype(self): + """The dtype of the shape -- one of tf.int32 or tf.int64.""" + return self._inner_shape.dtype + + def _static_inner_shape_as_list(self, truncate_first): + """Returns the lengths of the inner shape (if rank known), or [...].""" + if self._static_inner_shape.rank is None: + return [...] + result = self._static_inner_shape.as_list() + if truncate_first: + return result[1:] + return result + + def static_lengths(self, ragged_lengths=True): + """Returns a list of statically known axis lengths. + + This represents what values are known. For each row partition, it presents + either the uniform row length (if statically known), + the list of row lengths, or none if it is not statically known. + For the inner shape, if the rank is known, then each dimension is reported + if known, and None otherwise. If the rank of the inner shape is not known, + then the returned list ends with an ellipsis. + + Args: + ragged_lengths: If false, returns None for all ragged dimensions. + + Returns: + A Sequence[Union[Sequence[int],int, None]] of lengths, with a possible + Ellipsis at the end. + """ + if self.num_row_partitions == 0: + return self._static_inner_shape_as_list(False) + first_dim = self.row_partitions[0].static_nrows + if isinstance(first_dim, tensor_shape.Dimension): + first_dim = first_dim.value + rp_dims = [first_dim] + for rp in self.row_partitions: + if rp.is_uniform(): + rp_dims.append(rp.static_uniform_row_length) + elif ragged_lengths: + const_vals = tensor_util.constant_value(rp.row_lengths()) + if const_vals is None: + rp_dims.append(None) + else: + rp_dims.append(tuple(const_vals.tolist())) + else: + rp_dims.append(None) + + return rp_dims + self._static_inner_shape_as_list(True) + + def __repr__(self): + lengths = _list_with_ellipsis_to_str(self.static_lengths()) + return ("" % + (lengths, self.num_row_partitions)) + + def _to_tensor_shape(self) -> tensor_shape.TensorShape: + """Returns a TensorShape representation of the shape.""" + lengths = self.static_lengths(ragged_lengths=False) + if not lengths: + return tensor_shape.TensorShape(()) + if lengths[-1] == Ellipsis: + return tensor_shape.TensorShape(None) + return tensor_shape.TensorShape(lengths) + + def _slice_shape(self, start, stop): + """Returns a shape self[start:stop]. + + If start == 0, then this truncates dimensions after stop. + If start != 0, then this will return a shape with num_row_partitions == 0. + + See __getitem__. + + Args: + start: the first dimension. 0 <= start <= rank + stop: the last dimension (exclusive). 0 <= stop <= rank + """ + if stop <= start: + return DynamicRaggedShape._from_inner_shape([]) + elif start == 0: + if stop <= self.num_row_partitions: + if stop == 1: + return DynamicRaggedShape._from_inner_shape( + [self.row_partitions[0].nrows()]) + new_row_partitions = self.row_partitions[:stop - 1] + new_inner_shape = [new_row_partitions[-1].nvals()] + return DynamicRaggedShape(new_row_partitions, new_inner_shape) + else: + if self.rank is None: + new_inner_rank = stop - self.num_row_partitions + new_inner_shape = self.inner_shape[:new_inner_rank] + return DynamicRaggedShape( + row_partitions=self.row_partitions, + inner_shape=new_inner_shape, + static_inner_shape=None, + validate=False) + + elif self.rank <= stop: + return self + new_inner_rank = stop - self.num_row_partitions + new_inner_shape = self.inner_shape[:new_inner_rank] + return DynamicRaggedShape( + row_partitions=self.row_partitions, + inner_shape=new_inner_shape, + static_inner_shape=tensor_shape.TensorShape([None] * + new_inner_rank), + validate=False) + else: + if self.rank is None or stop < self.rank: + partial = self._slice_shape(0, stop) + else: + partial = self + + for x in partial.row_partitions: + if not x.is_uniform(): + raise ValueError("All relevant dimensions must be uniform") + if partial.rank is None: + # TODO(martinz): Implement _with_num_row_partitions(0) if rank is + # unknown, and remove. + raise NotImplementedError( + "__getitem__[start:stop] where start > 0 not implemented") + + return DynamicRaggedShape._from_inner_shape( + partial._with_num_row_partitions(0).inner_shape[start:]) + + def _dimension(self, index): + """Return a dimension, if the dimension is not ragged (see __getitem__).""" + rank = self.rank + if not isinstance(index, int): + raise TypeError("index should be an int") + if (self.num_row_partitions == 0 or index > self.num_row_partitions + 1): + # If num_row_partitions > 0 and index <= num_row_partitions + 1, then + # we are safe. + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ on a large index.") + if index >= rank: + raise IndexError("Index is too big: " + str(index) + ">=" + str(rank)) + if index < 0: + raise IndexError("Index must be non-negative: " + str(index)) + elif not self.is_uniform(index): + raise ValueError("Index " + str(index) + " is not uniform") + elif index == 0 and self.num_row_partitions > 0: + static_nrows = self.row_partitions[0].static_nrows + if static_nrows is not None: + return constant_op.constant(static_nrows, dtype=self.dtype) + return self.row_partitions[0].nrows() + elif self.num_row_partitions == 0: + static_result = tensor_shape.dimension_value( + self._static_inner_shape[index]) + if static_result is not None: + return constant_op.constant(static_result, dtype=self.dtype) + return self.inner_shape[index] + elif index > self.num_row_partitions: + static_result = tensor_shape.dimension_value( + self._static_inner_shape[index - self.num_row_partitions]) + if static_result is not None: + return constant_op.constant(static_result, dtype=self.dtype) + + return self.inner_shape[index - self.num_row_partitions] + else: + return self.row_partitions[index - 1].uniform_row_length() + + def __getitem__(self, index): + """Returns a dimension or a slice of the shape. + + Ragged shapes can have ragged dimensions that depend upon other dimensions. + Therefore, if you ask for a dimension that is ragged, this function returns + a ValueError. For similar reasons, if a slice is selected that includes + a ragged dimension without including the zero dimension, then this fails. + + Any slice that does not start at zero will return a shape + with num_row_partitions == 0. + + Args: + index: the index: can be an int or a slice. + + Raises: + IndexError: if the index is not in range. + ValueError: if the rank is unknown, or a ragged rank is requested + incorrectly. + """ + rank = self.rank + if isinstance(index, slice): + + if (index.step is not None) and (index.step != 1): + raise IndexError("Cannot stride through a shape") + start = index.start + stop = index.stop + if start is None: + start = 0 + start = _fix_start_index(start, rank, self.num_row_partitions) + stop = _fix_stop_index(stop, rank) + return self._slice_shape(start, stop) + elif isinstance(index, int): + if index < 0: + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ with a negative index.") + return self._dimension(rank + index) + return self._dimension(index) + else: + raise TypeError("Argument is not an int or a slice") + + def _num_elements(self): + """Number of elements in a shape. + + Returns: + The number of elements in the shape. + + """ + return math_ops.reduce_prod(self.inner_shape) + + def _num_slices_in_dimension(self, axis): + """The total size of a dimension (like nvals). + + Effectively, this is self[:axis+1]._num_elements() + + Example: + shape = DynamicRaggedShape._from_inner_shape([2, 3, 4]) + shape._num_slices_in_dimension(0) = 2 + shape._num_slices_in_dimension(1) = 6 + shape._num_slices_in_dimension(2) = 24 + shape._num_slices_in_dimension(-1) = 24 + shape._num_slices_in_dimension(-2) = 6 + shape._num_slices_in_dimension(-2) = 2 + + Args: + axis: the last axis to include in the number of elements. If negative, + then axis = axis + rank. + + Returns: + The number of elements in the shape. + """ + if not isinstance(axis, int): + raise TypeError("axis must be an integer") + if axis < 0: + rank = self.rank + if rank is None: + raise ValueError( + "You can't use negative values if the rank is undefined") + axis = axis + rank + if axis == 0: + return self._dimension(0) + if axis <= self.num_row_partitions: + return self.row_partitions[axis - 1].nvals() + # If self.num_row_partitions = 1, and + # self.inner_shape=[3,5,6], and axis=2, then you want: + # 15 = 3 * 5 = math_ops.reduce_prod(self.inner_shape[:2]) + # 2 = axis - (self.num_row_partitions - 1) + # If num_row_partitions=0, and + # self.inner_shape=[3,5,6] and axis=2, then you want: + # 90 = 3 * 5 * 6 = math_ops.reduce_prod(self.inner_shape[:3]) + # 3 = axis - (self.num_row_partitions - 1) + remainder = axis - (self.num_row_partitions - 1) + return _reduce_prod_patch(self.inner_shape[:remainder]) + + def is_uniform(self, axis): + """Returns true if the indicated dimension is uniform.""" + if not isinstance(axis, int): + raise TypeError("axis must be an integer") + rank = self.rank + if axis < 0: + raise IndexError("Negative axis values are not supported") + elif rank is not None and axis >= rank: + raise IndexError("Expected axis=%s < rank=%s" % (axis, rank)) + else: + return ((axis == 0 or axis > len(self._row_partitions)) # pylint:disable=superfluous-parens + or self._row_partitions[axis - 1].is_uniform()) + + @property + def rank(self): + """The number of dimensions in this shape, or None if unknown.""" + inner_rank = self.inner_rank + if inner_rank is None: + return None + else: + return self.num_row_partitions + inner_rank + + @property + def inner_shape(self): + """The inner dimension sizes for this shape. + + Returns: + A 1-D integer `Tensor`. + """ + return self._inner_shape + + @property + def inner_rank(self): + """The rank of inner_shape.""" + return tensor_shape.dimension_value(self._static_inner_shape.rank) + + def _alt_inner_shape(self, new_inner_rank): + """Get an alternative inner shape with higher or lower rank. + + For the rank of the inner shape to be be higher, the last few ragged + dimensions must have uniform_row_length. + + Args: + new_inner_rank: the new rank of the inner_shape + + Returns: + A new inner_shape of rank new_inner_rank. + """ + if new_inner_rank == 0: + raise ValueError("new_inner_rank cannot be zero") + elif self.inner_rank == 0: + raise ValueError("old inner_rank cannot be zero") + elif new_inner_rank == self.inner_rank: + return self.inner_shape + elif new_inner_rank < self.inner_rank: + if self._static_inner_shape.is_fully_defined(): + return _alt_inner_shape_from_tensor_shape(self._static_inner_shape, + self.dtype, new_inner_rank) + first_dimension = self._num_slices_in_dimension(-new_inner_rank) + if new_inner_rank == 1: + return array_ops.expand_dims(first_dimension, 0) + remaining_dimensions = self.inner_shape[1 - new_inner_rank:] + return array_ops.concat( + [array_ops.expand_dims(first_dimension, 0), remaining_dimensions], + axis=0) + else: + assert new_inner_rank > self.inner_rank + new_dimensions = new_inner_rank - self.inner_rank + if any( + [not x.is_uniform() for x in self.row_partitions[-new_dimensions:]]): + raise ValueError("Cannot get an inner shape over a ragged dimension") + first_dimension = self._num_slices_in_dimension(-new_inner_rank) + new_dimensions = new_inner_rank - self.inner_rank + new_dims = [first_dimension] + [ + x.uniform_row_length() for x in self.row_partitions[-new_dimensions:] + ] + return array_ops.concat( + [array_ops_stack.stack(new_dims), self.inner_shape[1:]], axis=0) + + def _inner_shape_dim(self, dimension): + """Returns an int or a tensor representing _inner_shape[dimension].""" + result = tensor_shape.dimension_value(self._static_inner_shape[dimension]) + return self._inner_shape[dimension] if result is None else result + + def _with_inner_rank(self, inner_rank): + """Returns the same shape but a different inner_rank. + + All dimensions that are to be represented in the inner_shape must be dense. + See inner_rank. + + Args: + inner_rank: the new inner_rank of the shape. + + Returns: + the same shape but a different inner_rank + + Raises: + ValueError if the new dense rank is invalid, or the old rank is unknown. + """ + rank = self.rank + if rank is None: + raise ValueError("Rank must be known to adjust inner_rank") + elif rank < 2: + if inner_rank == rank: + return self + raise ValueError("Cannot change inner_rank if rank < 2") + else: + # When self.rank is not None: + # self.rank = self.inner_rank + self.num_row_partitions + new_num_row_partitions = rank - inner_rank + return self._with_num_row_partitions(new_num_row_partitions) + + def _with_num_row_partitions(self, num_row_partitions): + """Creates an identical shape with the given num_row_partitions. + + Note that the shape must be statically refactorable to this rank. + In particular: + * rank must be known. + * num_row_partitions must be a nonnegative int. + * num_row_partitions must be less than the rank of the shape + * num_row_partitions must be greater or equal to the index of any ragged + dimension. + + Note that if the num_row_partitions is the same, self is returned. + + Args: + num_row_partitions: the target num_row_partitions (must be a nonnegative + int). + + Returns: + a shape with a (possibly) different num_row_partitions. + + Raises: + ValueError: if the rank is unknown, the argument is not a nonnegative int, + or there is a dimension that is nonuniform. + """ + rank = self.rank + if rank is None: + raise ValueError("Rank must be known to adjust num_row_partitions") + if not isinstance(num_row_partitions, int): + raise ValueError("num_row_partitions must be an int") + if num_row_partitions < 0: + raise ValueError("num_row_partitions must be nonnegative") + if num_row_partitions == self.num_row_partitions: + return self + if num_row_partitions >= rank: + raise ValueError("num_row_partitions must be less than rank") + if num_row_partitions > self.num_row_partitions: + num_row_partitions_diff = num_row_partitions - self.num_row_partitions + new_inner_rank = self.rank - num_row_partitions + nvals = self._inner_shape_dim(0) + more_rp = [] + for i in range(num_row_partitions_diff): + nrows = nvals + row_length = self._inner_shape_dim(i + 1) + nvals = nrows * row_length + rp = RowPartition.from_uniform_row_length( + row_length, nrows=nrows, dtype=self.dtype) + more_rp.append(rp) + alt_inner = self._alt_inner_shape(new_inner_rank) + return DynamicRaggedShape(list(self.row_partitions) + more_rp, alt_inner) + else: + assert num_row_partitions < self.num_row_partitions + return DynamicRaggedShape( + self.row_partitions[:num_row_partitions], + self._alt_inner_shape(self.rank - num_row_partitions)) + + def _merge_dims(self, outer_axis: int, + inner_axis: int) -> "DynamicRaggedShape": + """Merges outer_axis...inner_axis into a single dimension. + + Returns a copy of this shape with the specified range of dimensions + flattened into a single dimension, with elements in row-major order. + + #### Examples: + + >>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1), + ... (1,2,3)])._merge_dims(0, 1) + + >>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1), + ... (1,2,3)])._merge_dims(1, 2) + + >>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1), + ... (1,2,3)])._merge_dims(0, 2) + + + To mimic the behavior of `np.flatten` (which flattens all dimensions), use + `rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which + flattens all dimensions except the outermost batch dimension), use + `rt.merge_dims(1, -1)`. + + Args: + outer_axis: `int`: The first dimension in the range of dimensions to + merge. May be negative if `self.shape.rank` is statically known. + inner_axis: `int`: The last dimension in the range of dimensions to merge. + May be negative if `self.shape.rank` is statically known. + + Returns: + A copy of this shape, with the specified dimensions merged into a + single dimension. The returned shape will be + `self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N` + is the total number of slices in the merged dimensions. + """ + outer_axis = array_ops.get_positive_axis( + outer_axis, self.rank, axis_name="outer_axis", ndims_name="rank(self)") + inner_axis = array_ops.get_positive_axis( + inner_axis, self.rank, axis_name="inner_axis", ndims_name="rank(self)") + if not outer_axis <= inner_axis: + raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or " + f"equal to inner_axis ({inner_axis}).") + if outer_axis == inner_axis: + return self + if self.num_row_partitions == 0: + # A dense tensor. + (new_inner_shape, + new_static_inner_shape) = _merge_inner_shape(self._inner_shape, + self._static_inner_shape, + outer_axis, inner_axis) + return DynamicRaggedShape([], + new_inner_shape, + dtype=self.dtype, + static_inner_shape=new_static_inner_shape) + if inner_axis <= self.num_row_partitions: + # Here, we are merging the row_partitions, + # but the inner_shape is unchanged. + if outer_axis == 0: + # There is no need to merge axes before the first, just truncate them. + return DynamicRaggedShape( + self._row_partitions[inner_axis:], + self.inner_shape, + dtype=self.dtype, + static_inner_shape=self._static_inner_shape) + prefix_rp = self._row_partitions[:outer_axis - 1] + suffix_rp = self._row_partitions[inner_axis:] + internal_rp = self._row_partitions[outer_axis - 1:inner_axis] + new_rp = prefix_rp + (_merge_row_partitions(internal_rp),) + suffix_rp + + return DynamicRaggedShape( + new_rp, + self.inner_shape, + dtype=self.dtype, + static_inner_shape=self._static_inner_shape) + elif outer_axis > self.num_row_partitions: + # In this scenario, only the inner_shape is changed. + # Example #1: + # if [2, (1, 2), 5, 3], num_row_partitions=1, outer_axis=2, inner_axis=3. + # Result: [2, (1, 2), 15], num_row_partitions=1, outer_axis=2, + # inner_axis=3. + (new_inner_shape, new_static_inner_shape) = _merge_inner_shape( + self._inner_shape, self._static_inner_shape, + outer_axis - self.num_row_partitions, + inner_axis - self.num_row_partitions) + return DynamicRaggedShape( + self._row_partitions, + new_inner_shape, + dtype=self.dtype, + static_inner_shape=new_static_inner_shape) + else: + # Here, both inner_shape and row_partitions are changed. + rank = self.rank + if rank is None: + raise ValueError("Cannot merge_dims of the inner shape if the " + + "dimension of inner_shape is unknown") + if outer_axis == 0: + new_inner_shape = self._alt_inner_shape(rank - inner_axis) + return DynamicRaggedShape._from_inner_shape(new_inner_shape) + else: + prefix = self._row_partitions[:outer_axis - 1] + suffix = _merge_row_partitions(self._row_partitions[outer_axis - 1:]) + new_inner_shape = self._alt_inner_shape(rank - inner_axis) + num_merged_inner = inner_axis - self.num_row_partitions + prod = _reduce_prod_patch(self._inner_shape[1:num_merged_inner + 1]) + tail_suffix = RowPartition.from_row_splits(suffix.row_splits() * prod) + return DynamicRaggedShape(prefix + (tail_suffix,), new_inner_shape) + + def with_dtype(self, dtype): + """Change the dtype of the shape.""" + if dtype == self.dtype: + return self + else: + return DynamicRaggedShape( + self.row_partitions, self.inner_shape, dtype=dtype) + + def _merge_with(self, other: "DynamicRaggedShape") -> "DynamicRaggedShape": + """Merge two shapes that are equal modulo num_row_partitions. + + The resulting num_row_partitions is the maximum of the two + num_row_partitions. + + Args: + other: a DynamicRaggedShape representing the same shape with a possibly + different number of row partitions. + + Returns: + A DynamicRaggedShape with the same shape and the maximum of the + num_row_partitions of the two shapes. + """ + max_num_row_partitions = max(self.num_row_partitions, + other.num_row_partitions) + a = self._with_num_row_partitions(max_num_row_partitions) + b = other._with_num_row_partitions(max_num_row_partitions) + new_row_partitions = [ + rp_a._merge_precomputed_encodings(rp_b) + for (rp_a, rp_b) in zip(a._row_partitions, b._row_partitions) + ] + new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64 + + new_static_inner_shape = a._static_inner_shape.merge_with( + b._static_inner_shape) + new_inner_shape = a._inner_shape + return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, + True, new_static_inner_shape) + + def _merge_with_spec( + self, other: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape": + """Merge a spec with a DynamicRaggedShape.""" + # TODO(martinz): add tests for dynamic inconsistencies. + max_num_row_partitions = max(self.num_row_partitions, + other.num_row_partitions) + a = self._with_num_row_partitions(max_num_row_partitions) + b = other._with_num_row_partitions(max_num_row_partitions) + new_row_partitions = [ + rp_a._merge_with_spec(rp_b) + for (rp_a, rp_b) in zip(a._row_partitions, b._row_partitions) + ] + new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64 + + new_static_inner_shape = a._static_inner_shape.merge_with( + b._static_inner_shape) + new_inner_shape = a._inner_shape + return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, + True, new_static_inner_shape) + + def _as_row_partitions(self): + """Returns row partitions representing this shape. + + In order to represent a shape as row partitions, the rank of the shape + must be known, and the shape must have rank at least one. + + Returns: + A list of RowPartition objects. + Raises: + ValueError, if the shape cannot be represented by RowPartitions. + """ + rank = self.rank + if rank is None: + raise ValueError("rank must be known for _as_row_partitions") + elif rank < 1: + raise ValueError("rank must be >= 1 for _as_row_partitions") + fully_ragged = self._with_num_row_partitions(rank - 1) + return fully_ragged.row_partitions + + def _validate_flat_values_dynamically(self, flat_values): + """Test if flat_values have the right nvals dynamically.""" + if self.row_partitions: + assert_op = check_ops.assert_equal( + self.row_partitions[-1].nvals(), + array_ops.shape(flat_values, out_type=self.dtype)[0], + message="Last row partition does not match flat_values.") + return control_flow_ops.with_dependencies([assert_op], flat_values) + return flat_values + + def _validate_flat_values(self, flat_values): + """Test if flat_values have the right nvals.""" + if not isinstance(flat_values, tensor_lib.Tensor): + return flat_values + if self.row_partitions: + last_row_partition = self.row_partitions[-1] + flat_values_shape = flat_values.shape + if flat_values_shape is None: + return self._validate_flat_values_dynamically(flat_values) + first_dim_flat_values = flat_values_shape[0] + if isinstance(first_dim_flat_values, tensor_shape.Dimension): + first_dim_flat_values = first_dim_flat_values.value + if first_dim_flat_values is None: + return self._validate_flat_values_dynamically(flat_values) + static_nvals = last_row_partition.static_nvals + if static_nvals is None: + return self._validate_flat_values_dynamically(flat_values) + if first_dim_flat_values != static_nvals: + raise ValueError("Last row partition does not match flat_values.") + return flat_values + + def _add_row_partitions(self, flat_values, validate=False): + """Add row partitions to flat_values, if necessary. + + If the shape is truly ragged, then this adds the row_partitions. + + The shape is dense, then this just returns flat_values. + + Args: + flat_values: the flat_values of a ragged tensor with this shape, or a + dense tensor with this shape. + validate: validate the flat_values have the right first dimension. + + Returns: + flat_values reshaped to have row_partitions. + """ + if self.row_partitions: + if validate: + flat_values = self._validate_flat_values(flat_values) + return ragged_tensor.RaggedTensor._from_nested_row_partitions( + flat_values, self.row_partitions, validate=False) + else: + return flat_values + + class Spec: + """A Spec for DynamicRaggedShape: similar to a static shape.""" + + def __init__(self, row_partitions: Tuple[RowPartitionSpec, ...], + static_inner_shape: tensor_shape.TensorShape, + dtype: dtypes.DType): + """Create a Spec given row partitions, a static inner shape, and a dtype. + + Args: + row_partitions: A sequence of `RowPartitionSpec`s describing how the + ragged shape is partitioned. + static_inner_shape: The static shape of the flat_values. + dtype: The DType used to encode the shape (tf.int64 or tf.int32). + """ + # Independent validation and coercion of each argument. + if not isinstance(row_partitions, Iterable): + raise TypeError("row_partitions should be an Iterable") + + row_partitions = tuple(row_partitions) + + static_inner_shape = tensor_shape.as_shape(static_inner_shape) + + dtype = dtypes.as_dtype(dtype) + + if not all(isinstance(rp, RowPartitionSpec) for rp in row_partitions): + raise TypeError( + "row_partitions should be an Iterable of RowPartitionSpecs") + + if dtype != dtypes.int32 and dtype != dtypes.int64: + raise ValueError("dtype must be tf.int32 or tf.int64") + + # All fields are now typechecked and internally consistent. + for spec in row_partitions: + if spec.dtype != dtype: + raise ValueError( + f"dtype of {spec!r} is {spec.dtype!r}: expected {dtype!r}") + + row_partitions = tuple(row_partitions) + + inner_rank = static_inner_shape.rank + + if inner_rank == 0: + if row_partitions: + raise ValueError( + "If row_partitions are provided, must have inner_rank > 0") + else: + num_slices_in_dimension = [] # type: Sequence[tensor_shape.Dimension] + + # We first attempt to calculate num_slices_in_dimension through a + # forward pass, using nrows[k] = nrows[k-1] * uniform_row_length + # and other tricks. + for i in range(len(row_partitions)): + rp = row_partitions[i] + result = tensor_shape.Dimension(rp.nrows) + if i > 0: + previous_rp = row_partitions[i - 1] + result = result.merge_with(previous_rp.nvals) + result = result.merge_with(num_slices_in_dimension[-1] * + previous_rp.uniform_row_length) + num_slices_in_dimension.append(result) + # In the last step of the forward pass, + # we combine nvals and the first dimension in static_inner_shape. + if row_partitions: + last_rp = row_partitions[-1] + result = (num_slices_in_dimension[-1] * + last_rp.uniform_row_length).merge_with(last_rp.nvals) + if inner_rank is not None: + result = result.merge_with( + tensor_shape.dimension_at_index(static_inner_shape, 0)) + static_inner_shape = result + static_inner_shape[1:] + num_slices_in_dimension.append(result) + + # Now, we start a backward pass. + for i in range(len(num_slices_in_dimension) - 1, 0, -1): + num_slices_in_dimension[i - 1] = num_slices_in_dimension[ + i - 1].merge_with( + _safe_floor_div(num_slices_in_dimension[i], + row_partitions[i - 1].uniform_row_length)) + + # Finally, we construct the partitions. + row_partitions = [ + RowPartitionSpec( # pylint: disable=g-complex-comprehension + nrows=num_slices_in_dimension[i].value, + uniform_row_length=rp.uniform_row_length, + nvals=num_slices_in_dimension[i + 1].value, + dtype=rp.dtype) for i, rp in enumerate(row_partitions) + ] + + self._static_inner_shape = static_inner_shape + self._inner_shape = tensor_lib.TensorSpec([inner_rank], dtype=dtype) + self._row_partitions = row_partitions + + def __repr__(self): + return ( + f"DynamicRaggedShape.Spec(row_partitions={self._row_partitions!r}, " + + f"static_inner_shape={self._static_inner_shape!r}, " + + f"dtype={self.dtype!r})") + + @classmethod + def from_value(cls, value: Any) -> "DynamicRaggedShape.Spec": + """Create a Spec from a DynamicRaggedShape.""" + # super().from_value(...) creates an object, but there is no validation. + # No methods can be trusted on the object, just the properties. + initial = super(DynamicRaggedShape.Spec, cls).from_value(value) + + # However, since value is a DynamicRaggedShape, we + # can guarantee that initial._inner_shape.shape.rank == 1 + + # Moreover, if inner_shape.shape[0] is not None, then + # static_inner_shape.rank is not None. + + return DynamicRaggedShape.Spec( + row_partitions=initial._row_partitions, + static_inner_shape=initial._static_inner_shape, + dtype=initial._inner_shape.dtype) + + # TODO(martinz): it is unclear what the default uniformity of RowPartitions + # should be, so I am moving this to experimental until we figure it out. + # Also, while I have specified this is meant to represent a shape of a + # proper Tensor instead of a RaggedTensor, this is also subject to + # interpretation. + @classmethod + def _from_tensor_shape(cls, shape: Any, num_row_partitions: int, + dtype: dtypes.DType) -> "DynamicRaggedShape.Spec": + """Creates a `DynamicRaggedShape.Spec` corresponding to a `tf.TensorShape`. + + It is assumed that this is a `tf.TensorShape` coming from a + `tf.TensorSpec`, not from `RaggedTensor.shape`. + + In addition to the shape, we need to know the number of row partitions, + and the dtype used in the shape (tf.int32 or tf.int64). + + Within the dimensions that are partitioned, all dimensions are assumed + to be uniform. + + Args: + shape: a TensorShape. + num_row_partitions: the ragged rank of the RaggedShape. + dtype: the dtype of the shape (not the tensor); tf.int64 or tf.int32. + + Returns: + a DynamicRaggedShape.Spec representing a TensorShape. + """ + if dtype != dtypes.int32 and dtype != dtypes.int64: + raise ValueError("dtype must be tf.int32 or tf.int64") + + shape = tensor_shape.as_shape(shape) + if shape.rank is None: + row_partitions = [ + RowPartitionSpec(dtype=dtype) for _ in range(num_row_partitions) + ] + return DynamicRaggedShape.Spec( + row_partitions=row_partitions, + static_inner_shape=tensor_shape.TensorShape(None), + dtype=dtype) + + if shape.rank <= 1: + # Create a scalar or vector shape. + if num_row_partitions: + raise ValueError("num_row_partitions should be zero " + + "if shape is a scalar or vector.") + return DynamicRaggedShape.Spec( + row_partitions=[], static_inner_shape=shape, dtype=dtype) + + if shape.rank <= num_row_partitions: + raise ValueError("num_row_partitions must be less than rank") + + num_elements_so_far = tensor_shape.dimension_value(shape[0]) + rp_specs = [] + for i in range(num_row_partitions): + current_dim = tensor_shape.dimension_value(shape[i + 1]) + if current_dim is None or num_elements_so_far is None: + nvals = None + else: + nvals = num_elements_so_far * current_dim + rp_specs.append( + RowPartitionSpec( + nrows=num_elements_so_far, + nvals=nvals, + uniform_row_length=current_dim, + dtype=dtype)) + num_elements_so_far = nvals + + static_inner_shape = tensor_shape.TensorShape( + [num_elements_so_far]) + shape[num_row_partitions + 1:] + return DynamicRaggedShape.Spec( + row_partitions=rp_specs, + static_inner_shape=static_inner_shape, + dtype=dtype) + + @classmethod + def _from_spec( + cls, + spec: Union["DynamicRaggedShape.Spec", ragged_tensor.RaggedTensorSpec, + tensor_lib.TensorSpec], + dtype: dtypes.DType = dtypes.int64) -> "DynamicRaggedShape.Spec": + """Create a TypeSpec for the shape of an object with a given TypeSpec. + + I.e., if `x_spec = tf.type_spec_from_value(x)`, then + `DynamicRaggedShape.from_spec(x_spec)` returns a TypeSpec compatible with + `tf.type_spec_from_value(tf.shape(x))`. + + >>> rt = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) + >>> rt_spec = tf.type_spec_from_value(rt) + >>> rt_shape = DynamicRaggedShape.from_tensor(rt) + + >>> shape_spec_1 = tf.type_spec_from_value(rt_shape) + >>> shape_spec_2 = DynamicRaggedShape.Spec._from_spec(rt_spec) + >>> assert shape_spec_1.is_compatible_with(shape_spec_2) + + Args: + spec: a Spec of a Tensor or RaggedTensor. + dtype: the default dtype (if necessary). + + Returns: + A Spec of the shape of a Tensor or RaggedTensor. + + """ + # TODO(martinz): Add StructuredTensor.Spec when its easy. + if isinstance(spec, DynamicRaggedShape.Spec): + return spec + elif isinstance(spec, ragged_tensor.RaggedTensorSpec): + return cls._from_tensor_shape(spec.shape, spec.ragged_rank, + spec.row_splits_dtype) + elif isinstance(spec, tensor_lib.TensorSpec): + return cls._from_tensor_shape( + shape=spec.shape, num_row_partitions=0, dtype=dtype) + + @property + def dtype(self) -> dtypes.DType: + return self._inner_shape.dtype + + @property + def inner_rank(self) -> Optional[int]: + if self._static_inner_shape.rank is not None: + return self._static_inner_shape.rank + if self._inner_shape.shape.rank is None: + return None + return tensor_shape.dimension_value(self._inner_shape.shape[0]) + + @property + def num_row_partitions(self) -> int: + return len(self._row_partitions) + + @property + def rank(self) -> Optional[int]: + inner_rank = self.inner_rank + return None if inner_rank is None else inner_rank + self.num_row_partitions + + def _dimension(self, index: int) -> Optional[int]: + """Get the size of dimension index, if known statically.""" + if index == 0: + if self._row_partitions: + return self._row_partitions[0].nrows + elif self.inner_rank is None: + return None + elif self.inner_rank == 0: + raise ValueError("Index out of range: 0.") + else: + return tensor_shape.dimension_value(self._static_inner_shape[0]) + if index <= len(self._row_partitions): + return self._row_partitions[index - 1].uniform_row_length + + relative_index = index - self.num_row_partitions + + if self.inner_rank is None: + return None + elif self.inner_rank <= relative_index: + raise ValueError(f"Index out of range: {index}.") + else: + return tensor_shape.dimension_value( + self._static_inner_shape[relative_index]) + + def _num_slices_in_dimension(self, axis: int) -> Optional[int]: + """The total size of a dimension (like nvals). + + This is a static version of DynamicRaggedShape._num_slices_in_dimension() + + Example: + + ``` + shape = DynamicRaggedShape.Spec( + _row_partitions=[ + RowPartitionSpec(nrows=3, nvals=14, dtype=tf.int32) + RowPartitionSpec(nrows=14, nvals=25, dtype=tf.int32) + + ], + _static_inner_shape=tf.TensorShape([25, 3, 4]), + _inner_shape=tf.TensorSpec(tf.TensorShape([3]), dtype=tf.int32)) + shape._num_slices_in_dimension(0) = 3 + shape._num_slices_in_dimension(1) = 14 + shape._num_slices_in_dimension(2) = 25 + shape._num_slices_in_dimension(3) = 3 + shape._num_slices_in_dimension(4) = 4 + shape._num_slices_in_dimension(-2) = 3 + ``` + + Args: + axis: the last dimension to include. + + Returns: + the number of values in a dimension. + """ + if not isinstance(axis, int): + raise TypeError("axis must be an integer") + axis = array_ops.get_positive_axis(axis, self.rank, ndims_name="rank") + + if axis == 0: + return self._dimension(0) + if axis <= self.num_row_partitions: + # TODO(martinz): use nvals OR nrows, whichever is defined. + return self._row_partitions[axis - 1].nvals + remainder = axis - (self.num_row_partitions - 1) + head_inner_shape = self._static_inner_shape[:remainder] + return head_inner_shape.num_elements() + + def with_dtype(self, dtype: dtypes.DType) -> "DynamicRaggedShape.Spec": + """Return the same spec, but with a different DType.""" + new_rp_specs = [rp.with_dtype(dtype) for rp in self._row_partitions] + return DynamicRaggedShape.Spec( + row_partitions=new_rp_specs, + static_inner_shape=self._static_inner_shape, + dtype=dtype) + + def _merge_with( + self, other: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape.Spec": + """Merges all information between two specs. + + Specs are expected to represent the same information modulo + num_row_partitons. + + If the specs are of different ranks, then fail. + + Args: + other: another Spec of the same rank. + + Returns: + a Spec with the union of information. + """ + max_num_row_partitions = max(self.num_row_partitions, + other.num_row_partitions) + a = self._with_num_row_partitions(max_num_row_partitions) + b = other._with_num_row_partitions(max_num_row_partitions) + + new_rp = [ + a._merge_with(b) + for (a, b) in zip(a._row_partitions, b._row_partitions) + ] + + new_static_inner_shape = a._static_inner_shape.merge_with( + b._static_inner_shape) + + dtype = b.dtype if (a.dtype == dtypes.int32) else dtypes.int64 + + return DynamicRaggedShape.Spec( + new_rp, new_static_inner_shape, dtype=dtype) + + def _with_num_row_partitions( + self, new_num_row_partitions: int) -> "DynamicRaggedShape.Spec": + """Change the number of row partitions in the spec.""" + rank = self.rank + if rank is None: + raise ValueError( + "Changing num_row_partitions with unknown rank unsupported") + if new_num_row_partitions > max(rank - 1, 0): + raise ValueError("Number of row partitions too large") + if new_num_row_partitions < 0: + raise ValueError("Number of row partitions negative") + if self.num_row_partitions == new_num_row_partitions: + return self + elif self.num_row_partitions < new_num_row_partitions: + # TODO(martinz): Consider swapping. + rp_delta = new_num_row_partitions - self.num_row_partitions + tail_shape = DynamicRaggedShape.Spec._from_tensor_shape( + self._static_inner_shape, rp_delta, self.dtype) + return DynamicRaggedShape.Spec( + row_partitions=self._row_partitions + tail_shape._row_partitions, + static_inner_shape=tail_shape._static_inner_shape, + dtype=self.dtype) + else: + assert self.num_row_partitions > new_num_row_partitions + new_row_partitions = self._row_partitions[:new_num_row_partitions] + last_row_partition = new_row_partitions[-1] + old_row_partitions = self._row_partitions[new_num_row_partitions:] + new_static_inner_shape = ( + tensor_shape.TensorShape( + [last_row_partition.nvals] + + [x.uniform_row_length for x in old_row_partitions]) + + self._static_inner_shape[1:]) + return DynamicRaggedShape.Spec(new_row_partitions, + new_static_inner_shape, self.dtype) + + def _set_rank_if_unknown(self, new_rank: int) -> "DynamicRaggedShape.Spec": + """Ensures this has a known rank at least new_rank.""" + if new_rank is None: + raise TypeError("new_rank is None, but expected int") + if new_rank < 0: + raise ValueError("Rank must be non-negative") + current_rank = self.rank + if current_rank is not None and current_rank < new_rank: + raise ValueError( + "Rank is {current_rank}, expected at least {new_rank}.".format( + current_rank=current_rank, new_rank=new_rank)) + + if current_rank is not None: + return self + + if self._row_partitions: + new_inner_rank = max(new_rank - self.num_row_partitions, 1) + first_dim = self._row_partitions[-1].nvals + static_inner_shape = tensor_shape.TensorShape([first_dim] + [None] * + (new_inner_rank - 1)) + else: + static_inner_shape = tensor_shape.TensorShape([None] * new_rank) + + return DynamicRaggedShape.Spec( + row_partitions=self._row_partitions, + static_inner_shape=static_inner_shape, + dtype=self.dtype) + + def _truncate(self, new_rank: int) -> "DynamicRaggedShape.Spec": + """Truncate a ragged shape spec. + + For example, if the original spec s was for a shape: + [3, [4, 1], 2, 7] + + Then truncate_dynamic_ragged_shape_spec(s, 3) is a spec for: + [3, [4, 1], 2] + + Args: + new_rank: the new rank + + Returns: + A truncated DynamicRaggedShape.Spec. + """ + if self.rank is None: + return self._set_rank_if_unknown(new_rank)._truncate(new_rank) + + if new_rank == 0: + return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype) + + if new_rank == 1: + vector_size = self._dimension(0) + return DynamicRaggedShape.Spec._from_tensor_shape([vector_size], 0, + self.dtype) + + if new_rank < self.num_row_partitions + 1: + new_row_partitions = self._row_partitions[:new_rank - 1] + new_static_inner_shape = tensor_shape.TensorShape( + [new_row_partitions[-1].nvals]) + return DynamicRaggedShape.Spec( + row_partitions=new_row_partitions, + static_inner_shape=new_static_inner_shape, + dtype=self.dtype) + else: + remainder = new_rank - self.num_row_partitions + new_static_inner_shape = self._static_inner_shape[:remainder] + return DynamicRaggedShape.Spec( + row_partitions=self._row_partitions, + static_inner_shape=new_static_inner_shape, + dtype=self.dtype) + + def _to_tensor_shape(self): + """Get a tensor shape corresponding to this type.""" + alt = self + if alt._static_inner_shape.rank is None: + return tensor_shape.TensorShape(None) + if alt._static_inner_shape.rank == 0: + assert not alt._row_partitions + return alt._static_inner_shape + prefix = [alt._dimension(0)] + prefix.extend([rp.uniform_row_length for rp in alt._row_partitions]) + suffix = alt._static_inner_shape[1:] + return tensor_shape.TensorShape(prefix) + suffix + + +def broadcast_dynamic_shape(shape_x: DynamicRaggedShape, + shape_y: DynamicRaggedShape) -> DynamicRaggedShape: + """Returns the shape formed by broadcasting two shapes to be compatible. + + 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes + don't match. + 2. If neither has row_partitions and they have different dtypes, + go with int64. + 3. If one has row_partitions, go with that dtype. + + Args: + shape_x: A `DynamicRaggedShape` + shape_y: A `DynamicRaggedShape` + + Returns: + A `DynamicRaggedShape`. + Raises: + ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. + """ + if not isinstance(shape_x, DynamicRaggedShape): + raise TypeError("shape_x must be a DynamicRaggedShape") + if not isinstance(shape_y, DynamicRaggedShape): + raise TypeError("shape_y must be a DynamicRaggedShape") + + return broadcast_dynamic_shape_extended(shape_x, shape_y)[0] + + +def broadcast_to(rt_input, shape: DynamicRaggedShape): + """Broadcasts a potentially ragged tensor to a ragged shape. + + Tiles `rt_input` as necessary to match the given shape. + + Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`. + + Args: + rt_input: The potentially ragged tensor to broadcast. + shape: A `DynamicRaggedShape` + + Returns: + A potentially ragged tensor whose values are taken from + `rt_input`, and whose shape matches `shape`. + """ + if not isinstance(shape, DynamicRaggedShape): + raise TypeError("shape must be a DynamicRaggedShape") + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) + origin_shape = None + if ragged_tensor.is_ragged(rt_input): + if shape.num_row_partitions != 0: + if rt_input.row_splits.dtype != shape.dtype: + raise ValueError("Cannot coerce row_splits.dtype") + else: + shape = shape.with_dtype(rt_input.row_splits.dtype) + origin_shape = DynamicRaggedShape.from_tensor(rt_input) + else: + if shape.num_row_partitions != 0: + origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype) + else: + origin_shape = DynamicRaggedShape.from_tensor( + rt_input, dtype=dtypes.int64) + shape = shape.with_dtype(dtype=dtypes.int64) + + broadcaster = _get_broadcaster(origin_shape, shape) + return broadcaster.broadcast(rt_input) + + +def broadcast_dynamic_shape_extended( + a: DynamicRaggedShape, b: DynamicRaggedShape +): # -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster] + """Gets the smallest shape to which a and b can broadcast. + + In order to create the smallest shape, one must also do most of the + work to figure out how to transform from the shapes given. Thus, in addition + to returning the shape, it also creates transformations from the + original shapes to the result. + + This is the equivalent of: + + c = broadcast_dynamic_shape(a, b) + ac = get_broadcaster(a, c) + bc = get_broadcaster(b, c) + return (c, ac, bc) + + Args: + a: a DynamicRaggedShape + b: a DynamicRaggedShape + + Returns: + A triple of a shape and two broadcasters. + """ + if a.row_partitions and b.row_partitions: + if a.dtype != b.dtype: + raise ValueError("Dtypes don't match") + elif a.dtype != b.dtype: + if a.row_partitions: + b = b.with_dtype(a.dtype) + elif b.row_partitions: + a = a.with_dtype(b.dtype) + else: + a = a.with_dtype(dtypes.int64) + b = b.with_dtype(dtypes.int64) + + if (a.rank is None or b.rank is None): + raise ValueError("Unable to broadcast: unknown rank") + elif a.rank == 0: + return (b, _Broadcaster(a, b, []), _get_identity_broadcaster(b)) + elif b.rank == 0: + return (a, _get_identity_broadcaster(a), _Broadcaster(b, a, [])) + elif a.rank == 1 and b.rank == 1: + [a_layer, b_layer, + target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape) + target_shape = DynamicRaggedShape._from_inner_shape(target) # pylint: disable=protected-access + return (target_shape, _Broadcaster(a, target_shape, [a_layer]), + _Broadcaster(b, target_shape, [b_layer])) + + if a.rank > b.rank: + (c, bc, ac) = _broadcast_dynamic_shape_extended_helper(b, a) # pylint: disable=arguments-out-of-order + + return (c, ac, bc) + + return _broadcast_dynamic_shape_extended_helper(a, b) + + +def _row_partitions_identical(shape_a, shape_b): + """Returns True iff all row_partitions in shapes are identical.""" + return ((shape_a.num_row_partitions == shape_b.num_row_partitions) and all( + a is b for a, b in zip(shape_a.row_partitions, shape_b.row_partitions))) + + +# TODO(martinz): Preserve shapes better (see CL/414806185) +@dispatch.dispatch_for_binary_elementwise_apis(ragged_tensor.RaggedOrDense, + ragged_tensor.RaggedOrDense) +def ragged_binary_elementwise_op_impl(op, x, y): + """Binary elementwise api handler for RaggedTensors.""" + x_is_ragged = ragged_tensor.is_ragged(x) + y_is_ragged = ragged_tensor.is_ragged(y) + + # Convert args to tensors. + x = ragged_tensor.convert_to_tensor_or_ragged_tensor( + x, preferred_dtype=(y.dtype if y_is_ragged else None)) + y = ragged_tensor.convert_to_tensor_or_ragged_tensor( + y, preferred_dtype=x.dtype) + + if x_is_ragged and y_is_ragged: + x, y = ragged_tensor.match_row_splits_dtypes(x, y) + + if ((x_is_ragged and y_is_ragged) or + (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or + (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): + shape_x = DynamicRaggedShape.from_tensor(x) + shape_y = DynamicRaggedShape.from_tensor(y) + if shape_x.dtype != shape_y.dtype: + if not x_is_ragged: + shape_x = shape_x.with_dtype(shape_y.dtype) + elif not y_is_ragged: + shape_y = shape_y.with_dtype(shape_x.dtype) + + if _row_partitions_identical(shape_x, shape_y): + # At this point, both x and y must be ragged. + return shape_x._add_row_partitions( # pylint: disable=protected-access + op(x.flat_values, y.flat_values), + validate=False) + + (shape_z, bcast_xz, + bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y) + x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False) + y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False) + z_flat = op(x_new_flat, y_new_flat) + return shape_z._add_row_partitions(z_flat, validate=True) # pylint: disable=protected-access + + x_values = x.flat_values if ragged_tensor.is_ragged(x) else x + y_values = y.flat_values if ragged_tensor.is_ragged(y) else y + mapped_values = op(x_values, y_values) + if isinstance(mapped_values, bool): + return mapped_values # Special case for tensor_equals. + if ragged_tensor.is_ragged(x): + return x.with_flat_values(mapped_values) + else: + return y.with_flat_values(mapped_values) + + +@dispatch.dispatch_for_binary_elementwise_assert_apis( + ragged_tensor.RaggedOrDense, ragged_tensor.RaggedOrDense) +def ragged_binary_elementwise_assert_op_impl(op, x, y): + """Binary elementwise assert api handler for RaggedTensors. + + This handles binary assert operations for ragged tensors. Compared with + `ragged_binary_elementwise_op_impl`, this handler does not compute a ragged + tensor as output. Instead, it applies the assert operation `op` to input + tensors based on their ragged shapes and flat_values, and returns the result + of the assertion operation. + + Args: + op: a binary assert operation on Tensors. + x: something that can be coerced to a Tensor or RaggedTensor. + y: something that can be coerced to a Tensor or RaggedTensor. + + Returns: + the result of the assertion operation. + + """ + x_is_ragged = ragged_tensor.is_ragged(x) + y_is_ragged = ragged_tensor.is_ragged(y) + + # Convert args to tensors. + x = ragged_tensor.convert_to_tensor_or_ragged_tensor( + x, preferred_dtype=(y.dtype if y_is_ragged else None)) + y = ragged_tensor.convert_to_tensor_or_ragged_tensor( + y, preferred_dtype=x.dtype) + + if x_is_ragged and y_is_ragged: + x, y = ragged_tensor.match_row_splits_dtypes(x, y) + + if ((x_is_ragged and y_is_ragged) or + (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or + (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): + shape_x = DynamicRaggedShape.from_tensor(x) + shape_y = DynamicRaggedShape.from_tensor(y) + if shape_x.dtype != shape_y.dtype: + if not x_is_ragged: + shape_x = shape_x.with_dtype(shape_y.dtype) + elif not y_is_ragged: + shape_y = shape_y.with_dtype(shape_x.dtype) + + if _row_partitions_identical(shape_x, shape_y): + # At this point, both x and y must be ragged. + return op(x.flat_values, y.flat_values) + + (_, bcast_xz, bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y) + x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False) + y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False) + return op(x_new_flat, y_new_flat) + + x_values = x.flat_values if ragged_tensor.is_ragged(x) else x + y_values = y.flat_values if ragged_tensor.is_ragged(y) else y + return op(x_values, y_values) + + +def _find_dtype_helper(value, preferred): + """Helper for _find_dtype.""" + if preferred is not None: + return preferred + elif isinstance(value, RowPartition): + return value.dtype + elif isinstance(value, dtypes.DType): + return value + elif isinstance(value, int): + return None + elif isinstance(value, list): + return None + elif isinstance(value, tuple): + return None + elif isinstance(value, core.Tensor): + return value.dtype + return value.dtype + + +def _find_dtype(value, preferred): + """Returns the preferred dtype of value or preferred if preferred != None. + + This is used as an operator to pass over multiple objects in decreasing order + of priority until there is a preferred dtype for one. For example, if you were + adding three tensor-ish things (some tensors, some lists), and needed a + preferred dtype, you could use this as: + + def adding(a, b, c, dtype = None): + dtype = _find_dtype(a, dtype) + dtype = _find_dtype(b, dtype) + dtype = _find_dtype(c, dtype) + if dtype is None: + dtype = tf.float32 + ...Code continues here... + + Args: + value: a list, value, RowPartition, or tensor. + preferred: a given dtype. If not None, this will be returned. + + Returns: + an optional dtype. + """ + result = _find_dtype_helper(value, preferred) + if (result == dtypes.int64 or result == dtypes.int32 or result is None): + return result + raise ValueError("Illegal dtype: " + str(result)) + + +def _find_dtype_iterable( + iterable: Iterable[Any], + dtype: Optional[dtypes.DType]) -> Optional[dtypes.DType]: + """Find the preferred dtype of a list of objects. + + This will go over the iterable, and use the first object with a preferred + dtype. The dtype passed has highest priority if it is not None. + + Args: + iterable: an iterable with things that might have a dtype. + dtype: an overriding dtype, or None. + + Returns: + an optional dtype. + """ + if dtype is not None: + return dtype + for x in iterable: + dtype = _find_dtype(x, dtype) + return dtype + + +class _LayerBroadcaster(abc.ABC): + """A broadcaster of a single layer. + + Although this class does not literally contain a gather_index, the reference + implementation is defined through a gather_index. Thus, any subclasses should + first define the gather_index property. Other functions can be overridden + for optimization, but it should not change the behavior. + """ + + @property + @abc.abstractmethod + def gather_index(self): + """Returns a 1D tensor. + + The size of the 1D tensor is equal to the destination size. + + The ith element of the result is the index of the source of the ith element. + """ + pass + + @property + def dtype(self): + """Returns the dtype of the broadcast.""" + return self.gather_index.dtype + + @abc.abstractmethod + def with_dtype(self, dtype): + """Returns an identical _LayerBroadcaster with a different dtype.""" + pass + + def __repr__(self): + return str(self.gather_index) + + @classmethod + def from_gather_index(cls, gather_index): + """Create a broadcaster from a gather_index.""" + return _GatherLayerBroadcaster(gather_index) + + @classmethod + def first_layer(cls, nrows_source, nrows_target): + """Create a broadcaster from a gather_index.""" + gather_index = _first_layer_gather_index(nrows_source, nrows_target) + return _LayerBroadcaster.from_gather_index(gather_index) + + @classmethod + def get_singleton_broadcaster(cls, target_size): + """Broadcast from 1 element to target_size elements.""" + return _LayerBroadcaster.from_gather_index( + array_ops.zeros(target_size, dtype=target_size.dtype)) + + @abc.abstractmethod + def with_dependencies(self, checks): + """Add dependencies to a _LayerBroadcaster. + + Args: + checks: a list of ops that need to be run before any tensors from the + Broadcaster are used. + + Returns: + a copy of this _LayerBroadcaster with dependencies added. + """ + pass + + @classmethod + def get_identity_broadcaster(cls, nvals, dtype=None): + """Create an identity broadcaster. + + TODO(martinz): an identity broadcaster can be far more efficient than a + generic broadcaster. Add an optimized implementation. + Args: + nvals: the number of values for the broadcaster. + dtype: the dtype of the broadcaster, or None to use the dtype of nvals. + + Returns: + an identity broadcaster from [0....nvals-1] to [0...nvals-1] + """ + return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype)) + + def broadcast_tensor(self, tensor): + """Broadcast from a dense tensor. + + It is assumed that the first axis of the dense tensor is indexed by the + source shape, and at the end, the first axis of the dense tensor is + indexed by the destination shape. + + Args: + tensor: a dense tensor. + + Returns: + A dense tensor. + """ + return array_ops.gather(tensor, self.gather_index) + + def dest_nrows(self): + """Return the number of rows in the resulting gather, or None if tiling.""" + return math_ops.cast( + array_ops.shape(self.gather_index)[0], dtype=self.dtype) + + def broadcast_row_partition(self, rp): + """Return a new shape where the rows are broadcasted. + + *--self--->* + | | + rp result + | | + V V + *--------->* + + This is equivalent to: + return RowPartition.from_row_lengths(self.broadcast(rp.row_lengths())) + + However, if the shape has uniform row length, then that property is + maintained. + + Args: + rp: a row partition. + + Returns: + a RowPartition representing a broadcast version of this row partition. + """ + if not rp.is_uniform(): + return RowPartition.from_row_lengths( + self.broadcast_tensor(rp.row_lengths())) + else: + return RowPartition.from_uniform_row_length( + rp.uniform_row_length(), + nvals=rp.uniform_row_length() * self.dest_nrows(), + nrows=self.dest_nrows()) + + def next_layer(self, original_rp, broadcast_rp): + r"""Create the next layer gather_index whether or not a broadcast happens. + + *---------self------->* + | | + original_rp broadcast_rp + | | + \|/ \|/ + *--next_broadcaster-->* + Args: + original_rp: the original row partition. + broadcast_rp: the target row partition. + + Returns: + the gather_index for next_broadcaster. + + """ + gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp) + return _LayerBroadcaster.from_gather_index(gather_index) + + +class _GatherLayerBroadcaster(_LayerBroadcaster): + """Implements _LayerBroadcaster with an explicit gather_index. + + For example, suppose that the source shape is: + [*],[*,*] + And the target shape is: + [*],[*,*],[*],[*,*] + Then, this can be represented with a map: + [0,1,2,0,1,2] + + """ + + def __init__(self, gather_index): + gather_index = ops.convert_to_tensor(gather_index) + if (gather_index.dtype != dtypes.int64 and + gather_index.dtype != dtypes.int32): + raise ValueError("gather_index must be int64 or int32") + self._gather_index = gather_index + + @property + def gather_index(self): + return self._gather_index + + def with_dtype(self, dtype): + return _GatherLayerBroadcaster(math_ops.cast(self._gather_index, dtype)) + + def with_dependencies(self, checks): + new_gather_index = control_flow_ops.with_dependencies( + checks, self._gather_index) + return _GatherLayerBroadcaster(new_gather_index) + + +class _Broadcaster: + """A _Broadcaster represents a transformation from one shape to another. + + It provides a transform for each axis of the source shape to the + corresponding axis of the destination shape. + + """ + + def __init__(self, + source_shape, + target_shape, + layer_broadcasters, + dtype=None): + """Create a broadcaster. + + Do not call directly. + The source_shape, target_shape, and layer_broadcasters are converted + to have the same dtype. + + Note: source_shape.rank and target_shape.rank must be known. + Args: + source_shape: the source DynamicRaggedShape + target_shape: the target DynamicRaggedShape + layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank. + dtype: the preferred dtype of the broadcaster. + + Raises: + TypeError: if the input types don't match. + """ + if not isinstance(source_shape, DynamicRaggedShape): + raise TypeError("source_shape is not a DynamicRaggedShape") + if not isinstance(target_shape, DynamicRaggedShape): + raise TypeError("target_shape is not a DynamicRaggedShape") + if not isinstance(layer_broadcasters, list): + raise TypeError("layer_broadcasters not a list: " + + str(layer_broadcasters)) + for bc in layer_broadcasters: + if not isinstance(bc, _LayerBroadcaster): + raise TypeError("Not a LayerBroadcaster: " + str(bc)) + + dtype = _find_dtype(source_shape, dtype) + dtype = _find_dtype(target_shape, dtype) + dtype = _find_dtype_iterable(layer_broadcasters, dtype) + dtype = _find_dtype(dtypes.int64, dtype) + self._source_shape = source_shape.with_dtype(dtype) + self._target_shape = target_shape.with_dtype(dtype) + self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters] + + def __repr__(self): + return ("{src_shape:" + str(self._source_shape) + ", target_shape:" + + str(self._target_shape) + " layer_broadcasters: " + + str(self._layer_broadcasters) + "}") + + def with_dtype(self, dtype): + """Return a copy of this Broadcaster with a different dtype.""" + return _Broadcaster(self._source_shape, self._target_shape, + self._layer_broadcasters, dtype) + + @property + def source_shape(self): + return self._source_shape + + @property + def target_shape(self): + return self._target_shape + + @property + def dtype(self): + return self._source_shape.dtype + + def _target_inner_shape_int32(self): + new_inner_shape = self.target_shape.inner_shape + if new_inner_shape.dtype == dtypes.int64: + new_inner_shape = math_ops.cast(new_inner_shape, dtype=dtypes.int32) + return new_inner_shape + + # pylint:disable=protected-access + def broadcast_flat_values(self, rt, inner_dimensions=True): + """flat_values of a ragged tensor broadcast to target_shape. + + If inner_dimensions==True, then the result is a dense tensor with shape + target_shape.inner_shape, the flat values of the broadcasted shape. + + If you add target_shape.row_partitions, you will get the full broadcasted + shape. + + If inner_dimensions==False, the result is a dense tensor that satsifies + certain properties: + 1. broadcast_to(result, target_shape.inner_shape) will give the result + if inner_dimensions==True. + 2. Either (a) (result.rank < target_shape.inner_rank) + or (b) (result.shape[0] == target_shape.inner_shape[0]). + 3. result.rank = min(target_shape.inner_rank, rt.rank) + 4. For i < target_shape.inner_rank - 1, and i < rt.rank, + and if rt.shape[-i]!=1, then result.shape[-i]=target_shape[-i]. + Args: + rt: a ragged or dense tensor. + inner_dimensions: if true, broadcast the inner dimensions as well. + + Returns: + a dense tensor + """ + if ragged_tensor.is_ragged(rt): + rt = rt.flat_values + # If rt was a regular tensor, it is its own flat_values. + if self.target_shape.rank == 0: + return rt + inner_rank = self.target_shape.inner_rank + if inner_rank > self._source_shape.rank: + # The dense rank is larger than the whole shape. So, we make the shape + # dense. + if self.source_shape.num_row_partitions > 0: + rt = array_ops.reshape( + rt, self.source_shape._alt_inner_shape(self.source_shape.rank)) + # rt.rank == self._source_shape.rank < inner_rank + # Here, property 2a holds. + if inner_dimensions: + return array_ops.broadcast_to(rt, self._target_inner_shape_int32()) + return rt + else: + if self._source_shape.inner_rank != inner_rank: + rt = array_ops.reshape(rt, + self._source_shape._alt_inner_shape(inner_rank)) # pylint:disable=protected-access + # After the reshape, rt is flat_values with inner_rank. + flat_broadcaster = self._layer_broadcasters[-inner_rank] + rt = flat_broadcaster.broadcast_tensor(rt) + # Here, property 2b holds. + if inner_dimensions: + rt = array_ops.broadcast_to(rt, self._target_inner_shape_int32()) + return rt + + def broadcast(self, rt): + """Broadcast a tensor of source_shape to target_shape.""" + flat_values = self.broadcast_flat_values(rt) + return self.target_shape._add_row_partitions(flat_values) # pylint:disable=protected-access + + +def _get_layer_broadcasters_from_rps(zero_broadcaster, source_rps, target_rps): + """Get LayerBroadcasters from RowPartitions. + + *--zero_broadcaster->* + | | + source_rps[0] target_rps[0] + | | + V V + *---result[1]------->* + | | + source_rps[1] target_rps[1] + | | + V V + *---result[2]------->* + . + . + . + *---result[k-1]----->* + | | + source_rps[k] target_rps[k] + | | + V V + *---result[k]------->* + + Note: result[0] = zero_broadcaster + + Args: + zero_broadcaster: a broadcaster between the source and target row + partitions' rows, and equal to result[0]. + source_rps: source row partitions. + target_rps: target row partitions (same length as source_rps). + + Returns: + result: a list of LayerBroadcasters. + """ + if not isinstance(zero_broadcaster, _LayerBroadcaster): + raise TypeError("Not a _LayerBroadcaster: " + str(zero_broadcaster)) + assert len(source_rps) == len(target_rps) + if not source_rps: + return [zero_broadcaster] + next_broadcaster = zero_broadcaster.next_layer(source_rps[0], target_rps[0]) + tail_broadcasters = _get_layer_broadcasters_from_rps(next_broadcaster, + source_rps[1:], + target_rps[1:]) + return [zero_broadcaster] + tail_broadcasters + + +def _get_broadcaster(source_shape, target_shape): + """Get a _Broadcaster from source_shape to target_shape.""" + if source_shape.dtype != target_shape.dtype: + raise ValueError("The source and target row_split dtypes should be equal") + + if (source_shape.rank is None or target_shape.rank is None): + raise ValueError("Rank of source and target must be statically known") + elif source_shape.rank > target_shape.rank: + raise ValueError("Cannot broadcast to a shape with smaller rank") + elif source_shape.rank == 0: + return _Broadcaster(source_shape, target_shape, []) + elif target_shape.rank == 1: + assert source_shape.rank == 1 + layer = _LayerBroadcaster.first_layer(source_shape.inner_shape[0], + target_shape.inner_shape[0]) + return _Broadcaster(source_shape, target_shape, [layer]) + + assert source_shape.rank <= target_shape.rank + assert target_shape.rank >= 2 + assert source_shape.rank >= 1 + + source_rps = source_shape._as_row_partitions() # pylint: disable=protected-access + + target_rps = target_shape._as_row_partitions() # pylint: disable=protected-access + + assert len(target_rps) >= 1 + assert len(source_rps) <= len(target_rps) + source_nrows = source_shape[0] + if len(source_rps) < len(target_rps): + # Note: this includes the case where len(source_rps)==0. + # Here we begin at -1, one dimension before source_rps[0]. + # neg_one_source_rp | neg_one_target_rp=target_rps[-(len(source_rps)+1)] + # source_rps[0] | target_rps[-len(source_rps)] + # source_rps[1] | target_rps[1-len(source_rps)] + # ... | ... + # source_rps[-1] | target_rps[-1] + neg_one_source_rp = RowPartition.from_uniform_row_length( + uniform_row_length=source_nrows, nrows=1, nvals=source_nrows) + neg_one_target_rp = target_rps[-(len(source_rps) + 1)] + neg_one_broadcaster = _LayerBroadcaster.get_singleton_broadcaster( + neg_one_target_rp.nrows()) + zeroth_broadcaster = neg_one_broadcaster.next_layer(neg_one_source_rp, + neg_one_target_rp) + target_rps_tail = target_rps[-len(source_rps):] if len( + source_rps) >= 1 else [] + + layers = _get_layer_broadcasters_from_rps(zeroth_broadcaster, source_rps, + target_rps_tail) + return _Broadcaster(source_shape, target_shape, layers) + else: + assert len(target_rps) == len(source_rps) + zeroth_broadcaster = _LayerBroadcaster.first_layer(source_rps[0].nrows(), + target_rps[0].nrows()) + layers = _get_layer_broadcasters_from_rps(zeroth_broadcaster, source_rps, + target_rps) + + return _Broadcaster(source_shape, target_shape, layers) + + +def _get_identity_broadcaster(shape): + """Gets a Broadcaster for two identical shapes.""" + if shape.rank is None: + raise ValueError("Shape must have a defined rank") + layers = [ + _LayerBroadcaster.get_identity_broadcaster( + shape._num_slices_in_dimension(i)) for i in range(shape.rank) # pylint: disable=protected-access + ] + return _Broadcaster(shape, shape, layers) + + +def _broadcast_dynamic_shape_one_layer(a, b): + """Broadcast two vectors, given their shapes. + + Args: + a: the number of rows in a. + b: the number of rows in b. + + Returns: + (layer_a, layer_b, target_shape) + layer_a is a _LayerBroadcaster from a to the target_shape. + layer_b is a _LayerBroadcaster from b to the target_shape. + target_shape is the target_shape + + Raises: + InvalidArgumentError if the shapes are not consistent. + """ + a_0 = a[0] + b_0 = b[0] + + def broadcast_from_a(): + # Assumes a_0 == 1 + a_layer = array_ops.zeros(b_0, dtype=b_0.dtype) + b_layer = math_ops.range(b_0) + target = b + return [a_layer, b_layer, target] + + a_static = tensor_util.constant_value(a) + if a_static is not None and a_static[0] == 1: + [a_gi, b_gi, target] = broadcast_from_a() + a_layer = _LayerBroadcaster.from_gather_index(a_gi) + b_layer = _LayerBroadcaster.from_gather_index(b_gi) + return [a_layer, b_layer, target] + + def broadcast_from_b(): + # Assumes b_0 == 1 + a_layer = math_ops.range(a_0) + b_layer = array_ops.zeros(a_0, dtype=a_0.dtype) + target = a + return [a_layer, b_layer, target] + + b_static = tensor_util.constant_value(b) + if b_static is not None and b_static[0] == 1: + [a_gi, b_gi, target] = broadcast_from_b() + a_layer = _LayerBroadcaster.from_gather_index(a_gi) + b_layer = _LayerBroadcaster.from_gather_index(b_gi) + return [a_layer, b_layer, target] + + def broadcast_noop(): + # Assumes a_0 == 1 + a_layer = math_ops.range(a_0) + b_layer = math_ops.range(b_0) + target = b + return [a_layer, b_layer, target] + + can_broadcast_from_a = math_ops.equal(a_0, 1) + can_broadcast_from_b = math_ops.equal(b_0, 1) + + def broadcast_not_from_a(): + return cond.cond( + can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop) + + nrows_equal = math_ops.equal(a_0, b_0) + can_broadcast = math_ops.logical_or( + can_broadcast_from_a, + math_ops.logical_or(can_broadcast_from_b, nrows_equal)) + + check_can_broadcast = check_ops.assert_equal( + can_broadcast, True, message="Cannot broadcast") + + results = cond.cond( + can_broadcast_from_a, + true_fn=broadcast_from_a, + false_fn=broadcast_not_from_a) + + results = [ + control_flow_ops.with_dependencies([check_can_broadcast], x) + for x in results + ] + [a_gi, b_gi, target] = results + a_layer = _LayerBroadcaster.from_gather_index(a_gi) + b_layer = _LayerBroadcaster.from_gather_index(b_gi) + return [a_layer, b_layer, target] + + +def _broadcast_dynamic_shape_first_layer(a_0, b_0): + """Broadcast the first layer of two dynamic shapes given the dimensions. + + Args: + a_0: the number of rows in a. + b_0: the number of rows in b. + + Returns: + (use_a, layer_a, layer_b) + where use_a is true if the target provably equals a, false otherwise. + layer_a is a _LayerBroadcaster from a to the target. + layer_b is a _LayerBroadcaster from b to the target. + """ + + def broadcast_from_a(): + # Assumes a_0 == 1 + a_layer = array_ops.zeros(b_0, dtype=b_0.dtype) + b_layer = math_ops.range(b_0) + return [a_layer, b_layer] + + static_a_0 = tensor_util.constant_value(a_0) + static_b_0 = tensor_util.constant_value(b_0) + if static_a_0 is not None: + if static_a_0 == static_b_0: + id_broadcaster = _LayerBroadcaster.get_identity_broadcaster( + static_a_0, dtype=a_0.dtype) + return [id_broadcaster, id_broadcaster] + elif static_a_0 == 1: + return [ + _LayerBroadcaster.get_singleton_broadcaster(b_0), + _LayerBroadcaster.get_identity_broadcaster(b_0) + ] + + if static_b_0 == 1: + return [ + _LayerBroadcaster.get_identity_broadcaster(a_0), + _LayerBroadcaster.get_singleton_broadcaster(a_0) + ] + + def broadcast_from_b(): + # Assumes b_0 == 1 + a_layer = math_ops.range(a_0) + b_layer = array_ops.zeros(a_0, dtype=a_0.dtype) + return [a_layer, b_layer] + + def broadcast_noop(): + # Assumes a_0 == b_0 + a_layer = math_ops.range(a_0) + b_layer = math_ops.range(b_0) + return [a_layer, b_layer] + + can_broadcast_from_a = math_ops.equal(a_0, constant_op.constant(1, a_0.dtype)) + can_broadcast_from_b = math_ops.equal(b_0, constant_op.constant(1, b_0.dtype)) + + def broadcast_not_from_a(): + return cond.cond( + can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop) + + # Ideally, this would only block control flow on broadcast_noop, but + # the control flow doesn't seem to work. + can_broadcast = math_ops.logical_or( + math_ops.logical_or(can_broadcast_from_a, can_broadcast_from_b), + math_ops.equal(a_0, b_0)) + + result = cond.cond( + can_broadcast_from_a, + true_fn=broadcast_from_a, + false_fn=broadcast_not_from_a) + + return [ + _LayerBroadcaster.from_gather_index( + control_flow_ops.with_dependencies( + [check_ops.assert_equal(can_broadcast, True)], x)) for x in result + ] + + +def _broadcast_half( + ac_0: _LayerBroadcaster, + a_1: RowPartition) -> Tuple[_LayerBroadcaster, RowPartition]: + """Does a NOOP broadcast of a_1. + + *-ac_0-->* + | | + a_1 c_1 + | | + V V + *-ac_1-->* + + Note that by definition this cannot fail: there is always a well-defined + NOOP broadcast. This is usually intended as half of broadcasting two shapes + together. + Args: + ac_0: previous LayerBroadcaster + a_1: previous RowPartition + + Returns: + [ac_1, c_1] where ac_1 is the next LayerBroadcaster, and c_1 is the + broadcast RowPartition + """ + c_1 = ac_0.broadcast_row_partition(a_1) + old_value_rowids = array_ops.gather(ac_0.gather_index, c_1.value_rowids()) + old_row_starts = array_ops.gather(a_1.row_splits(), old_value_rowids) + gather_index = old_row_starts + c_1.offsets_in_rows() + return [_LayerBroadcaster.from_gather_index(gather_index), c_1] + + +def _broadcast_dynamic_shape_next_layer_half_ragged( + ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, + b_1: RowPartition +) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]: + r"""Broadcast target and next layer broadcaster of two dynamic shapes. + + a_1 is uniform, and b_1 is ragged. + *--ac_0-->*<--bc_0--* + | | | + a_1 c_1 b_1 + | | | + V V V + *--ac_1-->*<--bc_1--* + + Args: + ac_0: _LayerBroadcaster from a to c in the previous layer. + bc_0: _LayerBroadcaster from b to c in the previous layer. + a_1: a uniform RowPartition for the next layer of a. + b_1: a ragged RowPartition for the next layer of b. + + Returns: + (c_1, ac_1, bc_1) + c_1: a RowPartition for the next layer of the dynamic shape. + ac_1: _LayerBroadcaster from a to c in the next layer. + bc_1: _LayerBroadcaster from b to c in the next layer. + """ + if not isinstance(ac_0, _LayerBroadcaster): + raise TypeError("ac_0 should be a _LayerBroadcaster") + if not isinstance(bc_0, _LayerBroadcaster): + raise TypeError("bc_0 should be a _LayerBroadcaster") + if not isinstance(a_1, RowPartition): + raise TypeError("a_1 should be a RowPartition") + if not isinstance(b_1, RowPartition): + raise TypeError("b_1 should be a RowPartition") + + assert a_1.is_uniform() + assert not b_1.is_uniform() + + static_a_1 = tensor_util.constant_value(a_1.uniform_row_length()) + if static_a_1 == 1: + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids()) + c_1 = RowPartition.from_row_splits(c_1b.row_splits()) + ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index) + bc_1 = _LayerBroadcaster.from_gather_index(bc_1.gather_index) + return [c_1, ac_1, bc_1] + + def broadcast_noop(): + # The sides must be "equal". + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + checks = [check_ops.assert_equal(c_1a.row_splits(), c_1b.row_splits())] + return [ + control_flow_ops.with_dependencies(checks, x) + for x in [a_1.row_splits(), ac_1.gather_index, bc_1.gather_index] + ] + + def broadcast_a(): + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids()) + return [ + c_1b.row_splits(), + ac_1_gather_index, + bc_1.gather_index, + ] + + can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1) + + [c_1_row_splits, ac_1_gather_index, + bc_1_gather_index] = cond.cond( + can_broadcast_a, true_fn=broadcast_a, false_fn=broadcast_noop) + + c_1 = RowPartition.from_row_splits(c_1_row_splits) + ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index) + bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index) + return [c_1, ac_1, bc_1] + + +def _broadcast_dynamic_shape_next_layer_both_uniform( + ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, + b_1: RowPartition +) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]: + r"""Broadcast target and next layer broadcaster of two uniform dynamic shapes. + + *--ac_0-->*<--bc_0--* + | | | + a_1 c_1 b_1 + | | | + V V V + *--ac_1-->*<--bc_1--* + + Args: + ac_0: _LayerBroadcaster from a to c in the previous layer. + bc_0: _LayerBroadcaster from b to c in the previous layer. + a_1: a RowPartition for the next layer of a. + b_1: a RowPartition for the next layer of b. + + Returns: + (c_1, ac_1, bc_1) + c_1: a RowPartition for the next layer of the dynamic shape. + ac_1: _LayerBroadcaster from a to c in the next layer. + bc_1: _LayerBroadcaster from b to c in the next layer. + """ + if not isinstance(ac_0, _LayerBroadcaster): + raise TypeError("ac_0 should be a _LayerBroadcaster") + if not isinstance(bc_0, _LayerBroadcaster): + raise TypeError("bc_0 should be a _LayerBroadcaster") + if not isinstance(a_1, RowPartition): + raise TypeError("a_1 should be a RowPartition") + if not isinstance(b_1, RowPartition): + raise TypeError("b_1 should be a RowPartition") + assert a_1.is_uniform() + assert b_1.is_uniform() + + static_a_1 = tensor_util.constant_value(a_1.uniform_row_length()) + static_b_1 = tensor_util.constant_value(b_1.uniform_row_length()) + + if static_a_1 is not None: + if static_a_1 == static_b_1: + # Here, this dimension is the same, but we may have to broadcast previous + # dimensions. + [ac_1, _] = _broadcast_half(ac_0, a_1) + [bc_1, _] = _broadcast_half(bc_0, b_1) + c_1 = RowPartition.from_uniform_row_length( + static_a_1, nrows=ac_0.dest_nrows()) + return [c_1, ac_1, bc_1] + elif static_a_1 == 1: + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1 = _LayerBroadcaster.from_gather_index( + array_ops.gather(ac_0.gather_index, c_1b.value_rowids())) + c_1 = RowPartition.from_uniform_row_length( + b_1.uniform_row_length(), nrows=bc_0.dest_nrows()) + return [c_1, ac_1, bc_1] + + if static_b_1 == 1: + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + bc_1 = _LayerBroadcaster.from_gather_index( + array_ops.gather(bc_0.gather_index, c_1a.value_rowids())) + c_1 = RowPartition.from_uniform_row_length( + a_1.uniform_row_length(), nrows=ac_0.dest_nrows()) + return [c_1, ac_1, bc_1] + + def broadcast_noop(): + # Assumes a_1.uniform_row_length() == b_1.uniform_row_length() + # Both sides broadcast to a single shape. + [ac_1, _] = _broadcast_half(ac_0, a_1) + [bc_1, _] = _broadcast_half(bc_0, b_1) + return [a_1.uniform_row_length(), ac_1.gather_index, bc_1.gather_index] + + def broadcast_a(): + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids()) + return [ + b_1.uniform_row_length(), + ac_1_gather_index, + bc_1.gather_index, + ] + + def broadcast_b(): + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + bc_1_gather_index = array_ops.gather(bc_0.gather_index, c_1a.value_rowids()) + return [a_1.uniform_row_length(), ac_1.gather_index, bc_1_gather_index] + + can_broadcast_b = math_ops.equal(b_1.uniform_row_length(), 1) + + def no_broadcast_a(): + return cond.cond( + can_broadcast_b, true_fn=broadcast_b, false_fn=broadcast_noop) + + can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1) + + broadcast_asserts = [ + check_ops.assert_equal( + math_ops.logical_or( + math_ops.logical_or(can_broadcast_a, can_broadcast_b), + math_ops.equal(a_1.uniform_row_length(), + b_1.uniform_row_length())), True) + ] + + result = cond.cond( + can_broadcast_a, true_fn=broadcast_a, false_fn=no_broadcast_a) + + [c_1_uniform_row_length, ac_1_gather_index, bc_1_gather_index] = [ + control_flow_ops.with_dependencies(broadcast_asserts, x) for x in result + ] + + c_1 = RowPartition.from_uniform_row_length( + c_1_uniform_row_length, + nvals=c_1_uniform_row_length * ac_0.dest_nrows(), + nrows=ac_0.dest_nrows()) + ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index) + bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index) + return [c_1, ac_1, bc_1] + + +def _broadcast_dynamic_shape_next_layer( + ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, + b_1: RowPartition +) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]: + r"""Broadcast target and next layer broadcaster of two dynamic shapes. + + *--ac_0-->*<--bc_0--* + | | | + a_1 c_1 b_1 + | | | + V V V + *--ac_1-->*<--bc_1--* + + Args: + ac_0: _LayerBroadcaster from a to c in the previous layer. + bc_0: _LayerBroadcaster from b to c in the previous layer. + a_1: a RowPartition for the next layer of a. + b_1: a RowPartition for the next layer of b. + + Returns: + (c_1, ac_1, bc_1) + c_1: a RowPartition for the next layer of the dynamic shape. + ac_1: _LayerBroadcaster from a to c in the next layer. + bc_1: _LayerBroadcaster from b to c in the next layer. + """ + if not isinstance(ac_0, _LayerBroadcaster): + raise TypeError("ac_0 should be a _LayerBroadcaster") + if not isinstance(bc_0, _LayerBroadcaster): + raise TypeError("bc_0 should be a _LayerBroadcaster") + if not isinstance(a_1, RowPartition): + raise TypeError("a_1 should be a RowPartition") + if not isinstance(b_1, RowPartition): + raise TypeError("b_1 should be a RowPartition") + + if a_1.is_uniform(): + if b_1.is_uniform(): + return _broadcast_dynamic_shape_next_layer_both_uniform( + ac_0, bc_0, a_1, b_1) + else: + return _broadcast_dynamic_shape_next_layer_half_ragged( + ac_0, bc_0, a_1, b_1) + else: + if b_1.is_uniform(): + [c_1, bc_1, ac_1] = _broadcast_dynamic_shape_next_layer_half_ragged( # pylint: disable=arguments-out-of-order + bc_0, ac_0, b_1, a_1) + return (c_1, ac_1, bc_1) + else: + # If neither shape is uniform, we cannot broadcast the dimension. + [ac_1, c_1a] = _broadcast_half(ac_0, a_1) + [bc_1, c_1b] = _broadcast_half(bc_0, b_1) + check_valid = [ + check_ops.assert_equal(c_1a.row_splits(), c_1b.row_splits()) + ] + return ( + c_1a._with_dependencies(check_valid), # pylint: disable=protected-access + ac_1.with_dependencies(check_valid), + bc_1.with_dependencies(check_valid)) + + +def _broadcast_dynamic_shape_from_rps( + a_zero: _LayerBroadcaster, b_zero: _LayerBroadcaster, + a_rps: Sequence[RowPartition], b_rps: Sequence[RowPartition] +) -> Tuple[Sequence[RowPartition], Sequence[_LayerBroadcaster], + Sequence[_LayerBroadcaster]]: + """Create BroadcastLayers from two shapes to a target shape. + + + *--a_zero->*<-b_zero-* + | | | + a_rps[0] c_rps[0] b_rps[0] + | | | + V V V + *--ac[1]-->*<-bc[1]--* + | | | + a_rps[1] c_rps[0] b_rps[1] + | | | + V V V + *--ac[2]-->*<-bc[2]--* + + Note: ac[0]=a_zero, and bc[0]=b_zero. + Args: + a_zero: broadcaster from rows of a_rps[0] to target shape. + b_zero: broadcaster from rows of b_rps[0] to target shape. + a_rps: RowPartitions of first shape. + b_rps: RowPartitions of second shape, equal in length to a_rps. + + Returns: + (c_rps, ac, bc) where: + c_rps: RowPartitions of target shape. + ac: layers broadcasting from the first shape. + bc: layers broadcasting from the second shape. + """ + assert len(a_rps) == len(b_rps) + if a_rps: + (c_1, ac_1, + bc_1) = _broadcast_dynamic_shape_next_layer(a_zero, b_zero, a_rps[0], + b_rps[0]) + (c_suffix, a_layers, + b_layers) = _broadcast_dynamic_shape_from_rps(ac_1, bc_1, a_rps[1:], + b_rps[1:]) + + return ([c_1] + c_suffix, [ac_1] + a_layers, [bc_1] + b_layers) + else: + return ([], [], []) + + +def _get_broadcast_num_row_partitions(a: DynamicRaggedShape, + b: DynamicRaggedShape): + """Returns broadcast_dynamic_shape(a, b).num_row_partitions.""" + # Assumes rank and num_row_partitions are not None. + if (a.num_row_partitions == 0 and b.num_row_partitions == 0): + return 0 + expanded_num_row_partitions_a = a.num_row_partitions + max(0, b.rank - a.rank) + expanded_num_row_partitions_b = b.num_row_partitions + max(0, a.rank - b.rank) + + if a.num_row_partitions == 0: + return expanded_num_row_partitions_b + + if b.num_row_partitions == 0: + return expanded_num_row_partitions_a + + return max(expanded_num_row_partitions_a, expanded_num_row_partitions_b) + + +# pylint: disable=protected-access +def _broadcast_dynamic_shape_extended_complete( + a: DynamicRaggedShape, b: DynamicRaggedShape, b_rps: Sequence[RowPartition], + c_suffix: Sequence[RowPartition], ac: Sequence[_LayerBroadcaster], + bc_suffix: Sequence[_LayerBroadcaster] +) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]: + """Helper for broadcast_dynamic_shape_extended.""" + c_prefix = b_rps[:-len(c_suffix)] + bc_prefix_length = b.rank - len(bc_suffix) + bc_prefix = [ + _LayerBroadcaster.get_identity_broadcaster(b._num_slices_in_dimension(i)) + for i in range(bc_prefix_length) + ] + c_num_row_partitions = _get_broadcast_num_row_partitions(a, b) + + c_raw = DynamicRaggedShape.from_row_partitions(c_prefix + tuple(c_suffix)) + c = c_raw._with_num_row_partitions(c_num_row_partitions) + return (c, _Broadcaster(a, c, ac), _Broadcaster(b, c, bc_prefix + bc_suffix)) + + +def _broadcast_dynamic_shape_extended_helper( + a: DynamicRaggedShape, b: DynamicRaggedShape +) -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]: + """Helper for broadcast_dynamic_shape_extended. + + Here, we force: + a.rank <= b.rank + 2 <= b.rank + 1 <= a.rank + Args: + a: a DynamicRaggedShape + b: a DynamicRaggedShape + + Returns: + A triple of a shape and two broadcasters. + """ + assert a.rank <= b.rank + assert 2 <= b.rank + assert 1 <= a.rank + a_rps = a._as_row_partitions() # pylint: disable=protected-access + b_rps = b._as_row_partitions() # pylint: disable=protected-access + + if len(a_rps) < len(b_rps): + # Note: this includes the case where len(a_rps)==0. + # Here we begin at -1, one dimension before a_rps[0]. + # neg_one_a_rp | b_rps[-(len(a_rps)+1)] + # a_rps[0] | b_rps[-len(a_rps)] + # a_rps[1] | b_rps[1-len(a_rps)] + # ... | ... + # a_rps[-1] | b_rps[-1] + + a_nrows = a[0] + a_nrows_static = tensor_util.constant_value(a_nrows) + if a_nrows_static is not None: + a_nrows = a_nrows_static + + neg_one_a_rp = RowPartition.from_uniform_row_length( + uniform_row_length=a_nrows, nrows=1, nvals=a_nrows) + neg_one_b_rp = b_rps[-(len(a_rps) + 1)] + (neg_one_ac, neg_one_bc) = _broadcast_dynamic_shape_first_layer( + constant_op.constant(1, dtype=b_rps[0].dtype), neg_one_b_rp.nrows()) + + # The first part of the solution. + (c_zero, ac_zero, + bc_zero) = _broadcast_dynamic_shape_next_layer(neg_one_ac, neg_one_bc, + neg_one_a_rp, neg_one_b_rp) + b_rps_tail = b_rps[-len(a_rps):] if len(a_rps) >= 1 else [] + + (c_suffix, ac_layers, + bc_layers) = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, + b_rps_tail) + + return _broadcast_dynamic_shape_extended_complete( + a=a, + b=b, + b_rps=b_rps, + c_suffix=[c_zero] + c_suffix, + ac=[ac_zero] + ac_layers, + bc_suffix=[neg_one_bc, bc_zero] + bc_layers) + + else: + assert len(a_rps) == len(b_rps) + (ac_zero, + bc_zero) = _broadcast_dynamic_shape_first_layer(a_rps[0].nrows(), + b_rps[0].nrows()) + + (c_rps, a_layers, + b_layers) = _broadcast_dynamic_shape_from_rps(ac_zero, bc_zero, a_rps, + b_rps) + return _broadcast_dynamic_shape_extended_complete( + a=a, + b=b, + b_rps=b_rps, + c_suffix=c_rps, + ac=[ac_zero] + a_layers, + bc_suffix=[bc_zero] + b_layers) + + +def _fix_start_index(index, rank, num_row_partitions): + """Slice indexes are always silently truncated.""" + if index < 0: + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ on a negative index.") + index = rank + index + if index < 0: + index = 0 + if (num_row_partitions > 0 and index <= num_row_partitions + 1): + # The rank is always >= num_row_partitions + 1 if num_row_partitions > 0. + return index + if index == 0: + return index + if rank is None: + raise ValueError("Rank must be known to use __getitem__ on a large index.") + if index >= rank: + index = rank + return index + + +def _fix_stop_index(index, rank): + """Slice indexes are always silently truncated.""" + if index is None: + if rank is None: + raise ValueError("Rank must be known to use __getitem__ without a stop.") + index = rank + if index < 0: + if rank is None: + raise ValueError( + "Rank must be known to use __getitem__ on a negative index.") + index = rank + index + if index < 0: + index = 0 + if rank is not None: + index = min(rank, index) + return index + + +def _first_layer_gather_index(nrows_source, nrows_target): + """Return the first layer gather_index. + + Args: + nrows_source: the number of rows in the source. + nrows_target: the number of rows in the target. + + Returns: + A tensor, usable as a gather_index for a _LayerBroadcaster. + """ + + def gi_broadcast_first(): + return array_ops.zeros(nrows_target, dtype=nrows_target.dtype) + + def gi_no_broadcast_first(): + gather_index = math_ops.range(nrows_target, dtype=nrows_target.dtype) + return gather_index + + do_broadcast = math_ops.equal(nrows_source, + constant_op.constant(1, nrows_source.dtype)) + nrows_equal = math_ops.equal(nrows_source, nrows_target) + can_broadcast = check_ops.assert_equal( + math_ops.logical_or(do_broadcast, nrows_equal), + True, + message="Cannot broadcast") + + gather_index = cond.cond( + do_broadcast, true_fn=gi_broadcast_first, false_fn=gi_no_broadcast_first) + + return control_flow_ops.with_dependencies([can_broadcast], gather_index) + + +def _next_layer_gather_index(bc, original_rp, broadcast_rp): + r"""Create the next layer gather_index whether or not a broadcast happens. + + *----------bc-------->* + | | + original_rp broadcast_rp + | | + \|/ \|/ + *--next_broadcaster-->* + + Args: + bc: the old broadcaster. + original_rp: the original row partition. + broadcast_rp: the target row partition. + + Returns: + the gather_index for next_broadcaster. + Raises: + InvalidArgumentError if the shapes are incompatible. + """ + old_value_rowids = array_ops.gather(bc.gather_index, + broadcast_rp.value_rowids()) + + def gi_no_broadcast(): + # TODO(martinz): decide if row_splits or row_starts should be used here. + old_row_starts = array_ops.gather(original_rp.row_splits(), + old_value_rowids) + expected_row_lengths = array_ops.gather( + params=original_rp.row_lengths(), indices=bc.gather_index) + actual_row_lengths = broadcast_rp.row_lengths() + check_valid = check_ops.assert_equal( + expected_row_lengths, actual_row_lengths, message="Cannot broadcast") + gather_index = old_row_starts + broadcast_rp.offsets_in_rows() + return control_flow_ops.with_dependencies([check_valid], gather_index) + + def gi_broadcast(): + # Several optimizations can occur here. + # old_row_starts == old_value_rowids, because: + # if you are broadcasting, then the source has uniform row length of 1, + # implying original_rp.row_splits == tf.range(orgininal_rp.nvals + 1) + # When broadcasting, there is no need to add offsets to the + # source, because the source has size 1. + # Also, this is always valid, because we enforce source and destination + # have uniform_row_length. + return old_value_rowids + + if not original_rp.is_uniform(): + return gi_no_broadcast() + + do_broadcast = math_ops.equal(original_rp.uniform_row_length(), + constant_op.constant(1, original_rp.dtype)) + gather_index = cond.cond( + do_broadcast, true_fn=gi_broadcast, false_fn=gi_no_broadcast) + + return gather_index + + +def _flat_values_shape(rt): + if isinstance(rt, ragged_tensor.RaggedTensor): + return array_ops.shape(rt.flat_values) + return rt.flat_values.shape + + +def _to_row_partitions_and_nvals_from_lengths( + lengths: Sequence[Union[int, Sequence[int]]], + dtype=None) -> Tuple[Sequence[RowPartition], int]: + """Allow ragged and uniform shapes to be specified. + + For example, [2, [2,1], 2] represents a shape like: + [[[0, 0], [0, 0]], [[0, 0]]] + + Args: + lengths: a list of integers and lists of integers. + dtype: dtype of the shape (tf.int32 or tf.int64) + + Returns: + a sequence of RowPartitions, and the number of values of the last partition. + """ + size_so_far = lengths[0] + result = [] + for current_lengths in lengths[1:]: + if isinstance(current_lengths, int): + nrows = size_so_far + nvals = current_lengths * nrows + size_so_far = nvals + result.append( + RowPartition.from_uniform_row_length( + current_lengths, nvals, nrows=nrows, dtype_hint=dtype)) + else: + if size_so_far != len(current_lengths): + raise ValueError("Shape not consistent.") + result.append( + RowPartition.from_row_lengths(current_lengths, dtype_hint=dtype)) + size_so_far = sum(current_lengths) + return (result, size_so_far) + + +def _element_to_string(x): + """element to a string within a list.""" + if x is Ellipsis: + return "..." + if isinstance(x, str): + return "'" + x + "'" + return str(x) + + +def _list_tail_with_ellipsis(arr): + """Print the tail of a list where the list might have an ellipsis.""" + if not arr: + return "]" + else: + return ", " + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:]) + + +def _list_with_ellipsis_to_str(arr): + """Print a list that might have ellipsis.""" + if not arr: + return "[]" + return "[" + _element_to_string(arr[0]) + _list_tail_with_ellipsis(arr[1:]) + + +def _is_int_or_tuple_of_ints(x): + if isinstance(x, int): + return True + if not isinstance(x, tuple): + return False + for y in x: + if not isinstance(y, int): + return False + return True + + +def _alt_inner_shape_from_tensor_shape(shape, dtype, new_inner_rank): + """Helper for _alt_inner_shape, used directly in _with_num_row_partitions.""" + if new_inner_rank == 1: + return constant_op.constant([shape.num_elements()], dtype=dtype) + new_inner_rank_tail_length = new_inner_rank - 1 + inner_shape_tail = shape[-new_inner_rank_tail_length:].as_list() + first_dim = shape[:-new_inner_rank_tail_length].num_elements() + return constant_op.constant([first_dim] + inner_shape_tail, dtype=dtype) + + +def _safe_floor_div(dividend: tensor_shape.Dimension, + divisor: tensor_shape.Dimension) -> tensor_shape.Dimension: + if tensor_shape.dimension_value(divisor) == 0: + return None + return dividend // divisor + + +# TODO(b/218932570) +def _reduce_prod_patch(x): + if x.dtype == dtypes.int64: + return math_ops.cast( + math_ops.reduce_prod(math_ops.cast(x, dtypes.int32)), dtypes.int64) + return math_ops.reduce_prod(x) + + +# Type alias for shape encoded as a DynamicRaggedShape or a Tensor. +DenseOrRaggedShape = Union[DynamicRaggedShape, core.TensorLike] + + +def _merge_row_partitions( + row_partitions: Sequence[RowPartition]) -> RowPartition: + # TODO(martinz): handle uniform splits. + # TODO(martinz): consider using value_row_ids if present. + # Note: this probably won't be called with len(row_partitions)==1, so no + # need to optimize. + row_splits = row_partitions[0].row_splits() + for rp in row_partitions[1:]: + row_splits = array_ops.gather(rp.row_splits(), row_splits) + return RowPartition.from_row_splits(row_splits) + + +def _merge_inner_shape( + inner_shape: tensor_lib.Tensor, + static_inner_shape: tensor_shape.TensorShape, + outer_axis: int, + inner_axis: int) -> Tuple[tensor_lib.Tensor, tensor_shape.TensorShape]: + """Merge the inner shape of a DynamicRaggedShape.""" + prefix = inner_shape[:outer_axis] + suffix = inner_shape[inner_axis + 1:] + + internal = inner_shape[outer_axis:inner_axis + 1] + internal_value = [_reduce_prod_patch(internal)] + new_internal = array_ops.concat([prefix, internal_value, suffix], axis=0) + prefix_static = static_inner_shape[:outer_axis] + suffix_static = static_inner_shape[inner_axis + 1:] + internal_static = static_inner_shape[outer_axis:inner_axis + 1] + internal_value_static = tensor_shape.TensorShape( + [internal_static.num_elements()]) + new_internal_static = prefix_static + internal_value_static + suffix_static + + return (new_internal, new_internal_static) + + +def _batch_rp_spec(rp_spec: RowPartitionSpec, + batch_size: Optional[int]) -> RowPartitionSpec: + """Batches a RowPartitionSpec. + + Given a RowPartitionSpec and a batch_size, create a RowPartitionSpec that + will be the spec for the concatenation of batch_size RowPartitions. + + A RowPartition can be considered a transformation from a list of a given + length to a list of lists. Assume rp_a is a map from list_a to nlist_a, + And rp_b is a map from list_b to nlist_b. concat(rp_a, rp_b) is a + transform of concat(list_a, list_b) to concat(nlist_a, nlist_b). + + If batch_size is None, then have the spec be able to handle an arbitrary + number of RowPartitions. + + Args: + rp_spec: a RowPartitionSpec for all the RowPartitions to be concatenated. + batch_size: the number of rp_specs to be concatenated. + + Returns: + a batched RowPartitionSpec. + """ + if batch_size is None: + return RowPartitionSpec( + uniform_row_length=rp_spec.uniform_row_length, dtype=rp_spec.dtype) + nrows = None if rp_spec.nrows is None else rp_spec.nrows * batch_size + nvals = None if rp_spec.nvals is None else rp_spec.nvals * batch_size + return RowPartitionSpec( + nrows=nrows, + nvals=nvals, + uniform_row_length=rp_spec.uniform_row_length, + dtype=rp_spec.dtype) + + +def _batch_rp_spec_head(old_head: RowPartitionSpec, + batch_size: Optional[int]) -> RowPartitionSpec: + """Creates a RowPartitionSpec representing the new dimension created.""" + nvals = None if (old_head.nrows is None or + batch_size is None) else batch_size * old_head.nrows + return RowPartitionSpec( + nrows=batch_size, + nvals=nvals, + uniform_row_length=old_head.nrows, + dtype=old_head.dtype) + + +def _batch_static_inner_shape( + old_shape: tensor_shape.TensorShape, + batch_size: Optional[int]) -> tensor_shape.TensorShape: + """Returns a copy of old_shape with axis=0 multiplied by batch_size. + + Only use if this is the inner_shape of a DynamicRaggedShape.Spec with one + or more row partitions. + + Args: + old_shape: the original inner_shape. + batch_size: the batch size. + + Returns: + a new shape. + """ + head_dim = tensor_shape.dimension_at_index(old_shape, 0) * batch_size + return head_dim + old_shape[1:] + + +def _batch_tensor_shape(old_shape: tensor_shape.TensorShape, + batch_size: int) -> tensor_shape.TensorShape: + return tensor_shape.TensorShape([batch_size]) + old_shape + + +def _unbatch_static_inner_shape( + old_shape: tensor_shape.TensorShape, + batch_size: Optional[int]) -> tensor_shape.TensorShape: + """Unbatch a static_inner_shape when num_row_partitions > 0.""" + head_dim = tensor_shape.dimension_at_index(old_shape, 0) // batch_size + return head_dim + old_shape[1:] + + +# Copied from ragged_array_ops.py +def ones(shape: DynamicRaggedShape, + dtype=dtypes.float32, + name: Optional[str] = None) -> ragged_tensor.RaggedOrDense: + """Returns ones shaped like x.""" + flat_values = array_ops.ones(shape.inner_shape, dtype=dtype, name=name) + return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access + flat_values, shape.row_partitions) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_array_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..befa309e169ce535ba93d806fb702e9d05e78811 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_array_ops.py @@ -0,0 +1,1300 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Array operations for RaggedTensors.""" + +from typing import Optional +from typing import Union + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import data_flow_ops +from tensorflow.python.ops import gen_ragged_array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sort_ops +from tensorflow.python.ops.ragged import dynamic_ragged_shape +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.ops.ragged import segment_id_ops +from tensorflow.python.types import core as core_types +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + +# =============================================================================== +# Masking +# =============================================================================== + + +@tf_export('ragged.boolean_mask') +@dispatch.add_dispatch_support +def boolean_mask(data, mask, name=None): + """Applies a boolean mask to `data` without flattening the mask dimensions. + + Returns a potentially ragged tensor that is formed by retaining the elements + in `data` where the corresponding value in `mask` is `True`. + + * `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]` + + Where `j` is the `i`th `True` entry of `mask[a1...aA]`. + + Note that `output` preserves the mask dimensions `a1...aA`; this differs + from `tf.boolean_mask`, which flattens those dimensions. + + Args: + data: A potentially ragged tensor. + mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix + of `data`'s shape. `rank(mask)` must be known statically. + name: A name prefix for the returned tensor (optional). + + Returns: + A potentially ragged tensor that is formed by retaining the elements in + `data` where the corresponding value in `mask` is `True`. + + * `rank(output) = rank(data)`. + * `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`. + + Raises: + ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is + not a prefix of `data.shape`. + + #### Examples: + + >>> # Aliases for True & False so data and mask line up. + >>> T, F = (True, False) + + >>> tf.ragged.boolean_mask( # Mask a 2D Tensor. + ... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + ... mask=[[T, F, T], [F, F, F], [T, F, F]]).to_list() + [[1, 3], [], [7]] + + >>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor. + ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), + ... tf.ragged.constant([[F, F, T], [F], [T, T]])).to_list() + [[3], [], [5, 6]] + + >>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor. + ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), + ... tf.ragged.constant([True, False, True])).to_list() + [[1, 2, 3], [5, 6]] + """ + with ops.name_scope(name, 'RaggedMask', [data, mask]): + # Convert inputs to tensors. + data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') + mask = ragged_tensor.convert_to_tensor_or_ragged_tensor( + mask, dtypes.bool, name='mask') + row_splits_dtype, (data, mask) = ragged_tensor.match_row_splits_dtypes( + data, mask, return_dtype=True) + + # Get static rank of mask. + if mask.shape.ndims is None: + raise ValueError('mask.shape.ndims must be known statically.') + elif mask.shape.ndims == 0: + raise ValueError('mask cannot be scalar.') + + # If mask is ragged, then recurse with a non-ragged mask. + if ragged_tensor.is_ragged(mask): + if not ragged_tensor.is_ragged(data): + data = ragged_tensor.RaggedTensor.from_tensor( + data, + ragged_rank=mask.ragged_rank, + row_splits_dtype=mask.row_splits.dtype) + # Check that mask.nested_row_splits is a prefix of + # data.nested_row_splits. + splits_list = [ + mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank] + ] + with ops.control_dependencies( + ragged_util.assert_splits_match(splits_list)): + # Strip off ragged `splits` until `mask` is non-ragged. Keep the splits + # that we strip off in `splits`, so we can add them back on after + # we recursively mask the non-ragged data. + splits = [] + while ragged_tensor.is_ragged(mask): + if mask.shape.ndims > 2: + splits.append(mask.row_splits) + else: + # Count the number of True mask values in each row to find the + # lengths of the filtered rows; then convert to splits. + int_mask = ragged_functional_ops.map_flat_values( + math_ops.cast, mask, dtype=row_splits_dtype) + masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1) + splits.append(ragged_util.lengths_to_splits(masked_row_lengths)) + mask = mask.values + data = data.values + + # Recursively apply the nested non-ragged mask to the nested data. + masked_values = boolean_mask(data, mask) + + # Add the ragged `splits` back to the result. + masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits( + masked_values, splits, validate=False) + + return masked_values + + # If mask is non-ragged and has rank 1, and data is ragged, then build a + # ragged tensor with the indicated rows. + elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1: + # Get the masked splits: first get the length of each row, then filter + # out the rows that we are deleting, and convert that filtered set of + # masks back to a splits tensor. + lengths = data.row_lengths() + masked_lengths = array_ops.boolean_mask(lengths, mask) + masked_splits = ragged_util.lengths_to_splits(masked_lengths) + + # Get the masked values: first get row ids corresponding to each + # value, then use tf.gather to build a boolean mask that's false for + # values that come from rows that we are deleting, and use that mask to + # construct the masked values tensor. + segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits) + segment_mask = array_ops.gather(mask, segment_ids) + masked_values = boolean_mask(data.values, segment_mask) + + return ragged_tensor.RaggedTensor.from_row_splits( + masked_values, masked_splits, validate=False) + + # If mask is non-ragged and has rank>1, then convert it to be ragged, + # with a ragged rank matching data. + if ragged_tensor.is_ragged(data): + mask = ragged_tensor.RaggedTensor.from_tensor( + mask, + ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1), + row_splits_dtype=data.row_splits.dtype) + return boolean_mask(data, mask) + + # Otherwise, data and mask are both `Tensor`s. + else: + # Apply `boolean_mask` to get the masked values. + masked_values = array_ops.boolean_mask(data, mask) + + if mask.shape.ndims >= 2: + # Add the innermost ragged dimension. For each innermost cell, get the + # number of values it contains. Then flatten that to get a list of + # cell lengths, and convert it to splits. Finally, combine the splits + # and values to get the innermost ragged tensor. + masked_lengths = math_ops.count_nonzero( + mask, axis=-1, dtype=row_splits_dtype) + flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1]) + masked_values = ragged_tensor.RaggedTensor.from_row_lengths( + masked_values, flattened_masked_lengths, validate=False) + + # Wrap remaining ragged dimensions. + if mask.shape.ndims > 2: + mask_shape = array_ops.shape(mask, out_type=row_splits_dtype) + split_size = math_ops.cumprod(mask_shape) + 1 + for dim in range(mask.shape.ndims - 3, -1, -1): + elt_size = mask_shape[dim + 1] + masked_splits = math_ops.range(split_size[dim]) * elt_size + masked_values = ragged_tensor.RaggedTensor.from_row_splits( + masked_values, masked_splits, validate=False) + + return masked_values + + +# =============================================================================== +# Tiling +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.tile) +def tile(input: ragged_tensor.Ragged, multiples, name=None): # pylint: disable=redefined-builtin + """Constructs a `RaggedTensor` by tiling a given `RaggedTensor`. + + The values of `input` are replicated `multiples[i]` times along the + `i`th dimension (for each dimension `i`). For every dimension `axis` in + `input`, the length of each output element in that dimension is the + length of corresponding input element multiplied by `multiples[axis]`. + + Args: + input: A `RaggedTensor`. + multiples: A 1-D integer `Tensor`. Length must be the same as the number of + dimensions in `input`. + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` with the same type, rank, and ragged_rank as `input`. + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> tf.tile(rt, [3, 2]).to_list() + [[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]] + """ + with ops.name_scope(name, 'RaggedTile', [input, multiples]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, name='input') + if not ragged_tensor.is_ragged(input): + return array_ops.tile(input, multiples, name) + multiples = ragged_util.convert_to_int_tensor( + multiples, name='multiples', dtype=input.row_splits.dtype) + multiples.shape.assert_has_rank(1) + + # If the constant value of `multiples` is available, then we can use it + # to skip tiling dimensions where `multiples=1`. + const_multiples = tensor_util.constant_value(multiples) + + return ragged_tensor.RaggedTensor.from_nested_row_splits( + _tile_ragged_values(input, multiples, const_multiples), + _tile_ragged_splits(input, multiples, const_multiples), + validate=False) + + +def _tile_ragged_values(rt_input, multiples, const_multiples=None): + """Builds flat_values tensor for a tiled `RaggedTensor`. + + Returns a tensor that repeats the values in + `rt_input.flat_values` in the + appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as + specified by `multiples`. + + Args: + rt_input: The `RaggedTensor` whose values should be repeated. + multiples: A 1-D integer `tensor`, indicating how many times each dimension + should be repeated. + const_multiples: Optional constant value for multiples. Used to skip tiling + dimensions where `multiples=1`. + + Returns: + A `Tensor` with the same type and rank as `rt_input.flat_values`. + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy() + array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32) + """ + ragged_rank = rt_input.ragged_rank + nested_splits = rt_input.nested_row_splits + + # Pointers to the values in `rt_input.flat_values`. + inner_value_ids = math_ops.range(nested_splits[-1][-1]) + + # For each ragged dimension (working from the innermost to outermost), + # expand `inner_value_ids` as necessary to tile that dimension. + prev_splits = None + for axis in range(ragged_rank, 0, -1): + # Ragged splits for this dimension. + splits = nested_splits[axis - 1] + + # Adjust splits so they point into `inner_value_ids` (instead of just + # pointing into the next dimension's values). + if prev_splits is not None: # Not the first pass through the loop. + splits = array_ops.gather(prev_splits * multiples[axis + 1], splits) + + # Repeat each element in this ragged dimension `multiples[axis]` times. + if const_multiples is None or const_multiples[axis] != 1: + inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits, + multiples[axis]) + + prev_splits = splits + + # Gather the tiled inner values. + ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids) + + # Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus + # `axis=range(ragged_rank, rank)`). + inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]], + axis=0) + return array_ops.tile(ragged_tiled_values, inner_repeats) + + +def _tile_ragged_splits(rt_input, multiples, const_multiples=None): + """Builds nested_split tensors for a tiled `RaggedTensor`. + + Returns a list of split tensors that can be used to construct the + `RaggedTensor` that tiles `rt_input` as specified by `multiples`. + + Args: + rt_input: The `RaggedTensor` that is being tiled. + multiples: A 1-D integer `tensor`, indicating how many times each dimension + should be repeated. + const_multiples: Optional constant value for multiples. Used to skip tiling + dimensions where `multiples=1`. + + Returns: + A list of 1-D integer `Tensor`s (one for each ragged dimension in + `rt_input`). + + #### Example: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> _tile_ragged_splits(rt, [3, 2]) + [] + """ + ragged_rank = rt_input.ragged_rank + nested_splits = rt_input.nested_row_splits + + # projected_splits[src_axis, dst_axis] contains the split points that divide + # the rows from src_axis in the list of dst_axis values. E.g., + # projected_splits[i, i] = nested_splits[i], and + # projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]). + projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)] + for src_axis in range(ragged_rank): + for dst_axis in range(src_axis + 1, ragged_rank - 1): + projected_splits[src_axis][dst_axis] = array_ops.gather( + nested_splits[dst_axis], projected_splits[src_axis][dst_axis - 1]) + + # For each ragged dimension: nested_splits[axis] -> result_splits[axis]. + result_splits = [] + for axis in range(ragged_rank): + # Get the length of each row for the input tensor for this dimension. + input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1] + + # Multiply those lengths by the `multiples` of dimension axis+1, since + # each value will be repeated that number of times. + output_lengths = input_lengths * multiples[axis + 1] + + # Repeat ranges of the row lengths as necessary for them to be tiled in + # each ragged dimension `d < axis`. (Start with dimension d=axis-1, and + # work our way up to dimension d=0.) + repeats = 1 + for d in range(axis - 1, -1, -1): + if const_multiples is None or const_multiples[d + 1] != 1: + splits = projected_splits[d][axis - 1] * repeats + output_lengths = ragged_util.repeat_ranges(output_lengths, splits, + multiples[d + 1]) + repeats *= multiples[d + 1] + + # Tile splits for the outermost (uniform) dimension. + output_lengths = array_ops.tile(output_lengths, multiples[:1]) + + # Convert to splits. + result_splits.append(ragged_util.lengths_to_splits(output_lengths)) + + return result_splits + + +# =============================================================================== +# Reshaping +# =============================================================================== + + +@dispatch.dispatch_for_api(array_ops.expand_dims_v2) +def expand_dims(input: ragged_tensor.Ragged, axis, name=None): # pylint: disable=redefined-builtin + """Inserts a dimension with shape 1 into a potentially ragged tensor's shape. + + Given a potentially ragged tenor `input`, this operation inserts a + dimension with size 1 at the dimension `axis` of `input`'s shape. + + The following table gives some examples showing how `ragged.expand_dims` + impacts the shapes of different input tensors. Ragged dimensions are + indicated by enclosing them in parentheses. + + input.shape | axis | result.shape + ----------------------- | ---- | ----------------------------- + `[D1, D2]` | `0` | `[1, D1, D2]` + `[D1, D2]` | `1` | `[D1, 1, D2]` + `[D1, D2]` | `2` | `[D1, D2, 1]` + `[D1, (D2), (D3), D4]` | `0` | `[1, D1, (D2), (D3), D4]` + `[D1, (D2), (D3), D4]` | `1` | `[D1, 1, (D2), (D3), D4]` + `[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), 1, (D3), D4]` + `[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]` + `[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]` + + Args: + input: The potentially tensor that should be expanded with a new dimension. + axis: An integer constant indicating where the new dimension should be + inserted. + name: A name for the operation (optional). + + Returns: + A tensor with the same values as `input`, with an added dimension of + size 1 at `axis`. + + #### Examples: + + >>> rt = tf.ragged.constant([[1, 2], [3]]) + >>> print(rt.shape) + (2, None) + + >>> expanded = tf.expand_dims(rt, axis=0) + >>> print(expanded.shape, expanded) + (1, 2, None) + + >>> expanded = tf.expand_dims(rt, axis=1) + >>> print(expanded.shape, expanded) + (2, 1, None) + + >>> expanded = tf.expand_dims(rt, axis=2) + >>> print(expanded.shape, expanded) + (2, None, 1) + """ + with ops.name_scope(name, 'RaggedExpandDims', [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, name='input') + + if not ragged_tensor.is_ragged(input): + return array_ops.expand_dims(input, axis) + + ndims = None if input.shape.ndims is None else input.shape.ndims + 1 + axis = array_ops.get_positive_axis(axis, ndims, ndims_name='rank(input)') + + if axis == 0: + return ragged_tensor.RaggedTensor.from_uniform_row_length( + input, uniform_row_length=input.nrows(), nrows=1, validate=False) + elif axis == 1: + return ragged_tensor.RaggedTensor.from_uniform_row_length( + input, uniform_row_length=1, nrows=input.nrows(), validate=False) + else: + if ragged_tensor.is_ragged(input.values): + return input.with_values(expand_dims(input.values, axis - 1)) + else: + return input.with_values(array_ops.expand_dims(input.values, axis - 1)) + + +@dispatch.dispatch_for_api(array_ops.expand_dims) +def _ragged_expand_dims_v1( + input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + axis=None, + name=None, + dim=None): + if dim is not None: + axis = dim + return expand_dims(input=input, axis=axis, name=name) + + +# =============================================================================== +# RaggedTensor Size +# =============================================================================== + + +@dispatch.dispatch_for_api(array_ops.size_v2) +def size(input: ragged_tensor.Ragged, out_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin + """Returns the size of a potentially ragged tensor. + + The size of a ragged tensor is the size of its inner values. + + #### Example: + + >>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy().item() + 3 + + Args: + input: A potentially ragged `Tensor`. + out_type: The numeric output type for the operation. + name: A name for the operation (optional). + + Returns: + A Tensor of type `out_type`. + """ + if ragged_tensor.is_ragged(input): + return array_ops.size(input.flat_values, out_type=out_type, name=name) + else: + return array_ops.size(input, out_type=out_type, name=name) + + +@dispatch.dispatch_for_api(array_ops.size) +def _ragged_size_v1( + input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + name=None, + out_type=dtypes.int32): + return size(input=input, out_type=out_type, name=name) + + +# =============================================================================== +# ragged.rank +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.rank) +def rank(input: ragged_tensor.Ragged, name=None): # pylint: disable=redefined-builtin + """Returns the rank of a RaggedTensor. + + Returns a 0-D `int32` `Tensor` representing the rank of `input`. + + #### Example: + + >>> # shape of tensor 't' is [2, None, None] + >>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]]) + >>> tf.rank(t).numpy().item() + 3 + + Args: + input: A `RaggedTensor` + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + with ops.name_scope(name, 'RaggedRank', [input]) as name: + if not ragged_tensor.is_ragged(input): + return array_ops.rank(input, name) + + return input.ragged_rank + array_ops.rank(input.flat_values) + + +# =============================================================================== +# ragged.one_hot +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.one_hot) +def ragged_one_hot(indices: ragged_tensor.Ragged, + depth, + on_value=None, + off_value=None, + axis=None, + dtype=None, + name=None): + """Applies tf.one_hot along the values of a RaggedTensor.""" + # Get the adjusted axis value for the call to array_ops.one_hot. + # Note: the only negative `axis` value supported by array_ops.one_hot is -1. + if isinstance(axis, int) and axis >= 0: + if axis <= indices.ragged_rank: + raise ValueError('axis (%d) must be greater than indices.ragged_rank ' + '(%d).' % (axis, indices.ragged_rank)) + axis -= indices.ragged_rank + + with ops.name_scope(name, 'RaggedOneHot', + [indices, depth, on_value, off_value, axis]): + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + return indices.with_flat_values( + array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis, + dtype, name)) + + +# =============================================================================== +# ragged.stack_dynamic_partitions +# =============================================================================== +@tf_export('ragged.stack_dynamic_partitions') +@dispatch.add_dispatch_support +def stack_dynamic_partitions(data, partitions, num_partitions, name=None): + """Stacks dynamic partitions of a Tensor or RaggedTensor. + + Returns a RaggedTensor `output` with `num_partitions` rows, where the row + `output[i]` is formed by stacking all slices `data[j1...jN]` such that + `partitions[j1...jN] = i`. Slices of `data` are stacked in row-major + order. + + If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to + `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`. + + #### Example: + + >>> data = ['a', 'b', 'c', 'd', 'e'] + >>> partitions = [ 3, 0, 2, 2, 3] + >>> num_partitions = 5 + >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions) + + + Args: + data: A `Tensor` or `RaggedTensor` containing the values to stack. + partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the + partition that each slice of `data` should be added to. `partitions.shape` + must be a prefix of `data.shape`. Values must be greater than or equal to + zero, and less than `num_partitions`. `partitions` is not required to be + sorted. + num_partitions: An `int32` or `int64` scalar specifying the number of + partitions to output. This determines the number of rows in `output`. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` containing the stacked partitions. The returned tensor + has the same dtype as `data`, and its shape is + `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a + ragged dimension whose length is the number of data slices stacked for + each `partition`. + """ + with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]): + # Convert inputs to tensors. + data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') + row_splits_dtype = ( + data.row_splits.dtype + if isinstance(data, ragged_tensor.RaggedTensor) else None) + partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor( + partitions, name='partitions', preferred_dtype=row_splits_dtype) + num_partitions = ops.convert_to_tensor( + num_partitions, name='num_partitions', preferred_dtype=partitions.dtype) + if row_splits_dtype is not None: + partitions = math_ops.cast(partitions, row_splits_dtype) + num_partitions = math_ops.cast(num_partitions, partitions.dtype) + + # Sanity-checks for shapes. + partitions_rank = partitions.shape.ndims + if partitions_rank is None: + raise ValueError('partitions must have known rank.') + num_partitions.shape.assert_has_rank(0) + partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank]) + + if partitions_rank == 0: + # If partitions is a scalar, then just create a RaggedTensor containing + # that single the complete `data` value in the specified row. + return ragged_tensor.RaggedTensor.from_value_rowids( + values=array_ops_stack.stack([data]), + value_rowids=array_ops_stack.stack([partitions]), + nrows=num_partitions, + validate=False) + + elif partitions_rank == 1: + # If partitions is a vector (the typical case): we can just use data and + # partitions as the `values` and `value_rowids` for `from_value_rowids`, + # as long as we sort them first. + permutation = sort_ops.argsort(partitions, stable=True) + value_rowids = array_ops.gather(partitions, permutation) + values = array_ops.gather(data, permutation) + checks = [ + check_ops.assert_less( + value_rowids[-1:], num_partitions, + message='partitions must be less than num_partitions'), + check_ops.assert_non_negative( + partitions, message='partitions must be non-negative.') + ] + with ops.control_dependencies(checks): + return ragged_tensor.RaggedTensor.from_value_rowids( + values, value_rowids, nrows=num_partitions, validate=False) + + else: + # Handle higher-dimensional partitions via recursion. + if not isinstance(data, ragged_tensor.RaggedTensor): + data = ragged_tensor.RaggedTensor.from_tensor( + data, row_splits_dtype=partitions.dtype, ragged_rank=1) + if not isinstance(partitions, ragged_tensor.RaggedTensor): + partitions = ragged_tensor.RaggedTensor.from_tensor( + partitions, + row_splits_dtype=partitions.dtype, + ragged_rank=max(data.ragged_rank, partitions_rank - 1)) + check = check_ops.assert_equal( + data.row_splits, + partitions.row_splits, + message='data and partitions have incompatible ragged shapes') + with ops.control_dependencies([check]): + return stack_dynamic_partitions(data.values, partitions.values, + num_partitions) + + +# =============================================================================== +# Reverse +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.reverse) +def reverse(tensor: ragged_tensor.Ragged, axis, name=None): + """Reverses a RaggedTensor along the specified axes. + + #### Example: + + >>> data = tf.ragged.constant([ + ... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]]) + >>> tf.reverse(data, axis=[0, 2]) + + + Args: + tensor: A 'RaggedTensor' to reverse. + axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of + the axes to reverse. + name: A name prefix for the returned tensor (optional). + + Returns: + A 'RaggedTensor'. + """ + type_error_msg = ('`axis` must be a list of int or a constant tensor' + 'when reversing axes in a ragged tensor') + + with ops.name_scope(name, 'Reverse', [tensor, axis]): + if isinstance(axis, tensor_lib.Tensor): + axis = tensor_util.constant_value(axis) + if axis is None: + raise TypeError(type_error_msg) + elif not (isinstance(axis, (list, tuple)) and + all(isinstance(dim, int) for dim in axis)): + raise TypeError(type_error_msg) + + tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor( + tensor, name='tensor') + + # Allow usage of negative values to specify innermost axes. + axis = [ + array_ops.get_positive_axis(dim, tensor.shape.rank, 'axis[%d]' % i, + 'rank(tensor)') + for i, dim in enumerate(axis) + ] + + # We only need to slice up to the max axis. If the axis list + # is empty, it should be 0. + slices = [slice(None)] * (max(axis) + 1 if axis else 0) + + for dim in axis: + slices[dim] = slice(None, None, -1) + + return tensor[tuple(slices)] + + +# =============================================================================== +# Cross +# =============================================================================== + + +@tf_export('ragged.cross') +@dispatch.add_dispatch_support +def cross(inputs, name=None): + """Generates feature cross from a list of tensors. + + The input tensors must have `rank=2`, and must all have the same number of + rows. The result is a `RaggedTensor` with the same number of rows as the + inputs, where `result[row]` contains a list of all combinations of values + formed by taking a single value from each input's corresponding row + (`inputs[i][row]`). Values are combined by joining their strings with '_X_'. + E.g.: + + >>> tf.ragged.cross([tf.ragged.constant([['a'], ['b', 'c']]), + ... tf.ragged.constant([['d'], ['e']]), + ... tf.ragged.constant([['f'], ['g']])]) + + + Args: + inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`. + name: Optional name for the op. + + Returns: + A 2D `RaggedTensor` of type `string`. + """ + return _cross_internal(inputs=inputs, hashed_output=False, name=name) + + +@tf_export('ragged.cross_hashed') +@dispatch.add_dispatch_support +def cross_hashed(inputs, num_buckets=0, hash_key=None, name=None): + """Generates hashed feature cross from a list of tensors. + + The input tensors must have `rank=2`, and must all have the same number of + rows. The result is a `RaggedTensor` with the same number of rows as the + inputs, where `result[row]` contains a list of all combinations of values + formed by taking a single value from each input's corresponding row + (`inputs[i][row]`). Values are combined by hashing together their + fingerprints. E.g.: + + >>> tf.ragged.cross_hashed([tf.ragged.constant([['a'], ['b', 'c']]), + ... tf.ragged.constant([['d'], ['e']]), + ... tf.ragged.constant([['f'], ['g']])], + ... num_buckets=100) + + + Args: + inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`. + num_buckets: A non-negative `int` that used to bucket the hashed values. If + `num_buckets != 0`, then `output = hashed_value % num_buckets`. + hash_key: Integer hash_key that will be used by the `FingerprintCat64` + function. If not given, a default key is used. + name: Optional name for the op. + + Returns: + A 2D `RaggedTensor` of type `int64`. + """ + return _cross_internal( + inputs=inputs, + hashed_output=True, + num_buckets=num_buckets, + hash_key=hash_key, + name=name) + + +_DEFAULT_CROSS_HASH_KEY = 0xDECAFCAFFE + + +def _cross_internal(inputs, + hashed_output=False, + num_buckets=0, + hash_key=None, + name=None): + """Generates feature cross from a list of ragged and dense tensors.""" + if not isinstance(inputs, (tuple, list)): + raise TypeError('Inputs must be a list') + + if hash_key is None: + hash_key = _DEFAULT_CROSS_HASH_KEY + + ragged_inputs = [] + sparse_inputs = [] + dense_inputs = [] + input_order = [] + with ops.name_scope(name, 'RaggedCross', inputs): + for i, t in enumerate(inputs): + if sparse_tensor.is_sparse(t): + t = sparse_tensor.SparseTensor.from_value(t) + else: + t = ragged_tensor.convert_to_tensor_or_ragged_tensor(t) + if t.dtype.is_integer: + t = math_ops.cast(t, dtypes.int64) + elif t.dtype != dtypes.string: + raise ValueError('Unexpected dtype for inputs[%d]: %s' % (i, t.dtype)) + if isinstance(t, ragged_tensor.RaggedTensor): + if t.ragged_rank != 1: + raise ValueError('tf.ragged.cross only supports inputs with rank=2') + ragged_inputs.append(t) + input_order.append('R') + elif isinstance(t, sparse_tensor.SparseTensor): + sparse_inputs.append(t) + input_order.append('S') + else: + dense_inputs.append(t) + input_order.append('D') + + out_values_type = dtypes.int64 if hashed_output else dtypes.string + if ragged_inputs and all( + t.row_splits.dtype == dtypes.int32 for t in ragged_inputs): + out_row_splits_type = dtypes.int32 + else: + out_row_splits_type = dtypes.int64 + + # Convert hash_key from uint64 -> int64, since we need to pass it via + # an int64 attr. + if hash_key > 2**63: + hash_key -= 2**64 + + values_out, splits_out = gen_ragged_array_ops.ragged_cross( + ragged_values=[rt.values for rt in ragged_inputs], + ragged_row_splits=[rt.row_splits for rt in ragged_inputs], + sparse_indices=[st.indices for st in sparse_inputs], + sparse_values=[st.values for st in sparse_inputs], + sparse_shape=[st.dense_shape for st in sparse_inputs], + dense_inputs=dense_inputs, + input_order=''.join(input_order), + hashed_output=hashed_output, + num_buckets=num_buckets, + hash_key=hash_key, + out_values_type=out_values_type.as_datatype_enum, + out_row_splits_type=out_row_splits_type.as_datatype_enum, + name=name) + + return ragged_tensor.RaggedTensor.from_row_splits( + values_out, splits_out, validate=False) + + +def fill_empty_rows(ragged_input, default_value, name=None): + """Fills empty rows in the input `RaggedTensor` with rank 2 with a default + + value. + + This op adds entries with the specified `default_value` for any row in the + input that does not already have a value. + + The op also returns an indicator vector such that + + empty_row_indicator[i] = True iff row i was an empty row. + + Args: + ragged_input: A `RaggedTensor` with rank 2. + default_value: The value to fill for empty rows, with the same type as + `ragged_input.` + name: A name prefix for the returned tensors (optional) + + Returns: + ragged_ordered_output: A `RaggedTensor`with all empty rows filled in with + `default_value`. + empty_row_indicator: A bool vector indicating whether each input row was + empty. + + Raises: + TypeError: If `ragged_input` is not a `RaggedTensor`. + """ + with ops.name_scope(name, 'RaggedFillEmptyRows', [ragged_input]): + if not isinstance(ragged_input, ragged_tensor.RaggedTensor): + raise TypeError( + 'ragged_input must be RaggedTensor, got' + f' {type(ragged_input)}' + ) + default_value = ops.convert_to_tensor( + default_value, dtype=ragged_input.dtype + ) + ( + output_value_rowids, + output_values, + empty_row_indicator, + unused_reverse_index_map, + ) = gen_ragged_array_ops.ragged_fill_empty_rows( + value_rowids=ragged_input.value_rowids(), + values=ragged_input.values, + nrows=ragged_input.nrows(), + default_value=default_value, + ) + return ( + ragged_tensor.RaggedTensor.from_value_rowids( + values=output_values, + value_rowids=output_value_rowids, + validate=False, + ), + empty_row_indicator, + ) + + +@ops.RegisterGradient('RaggedFillEmptyRows') +def _ragged_fill_empty_rows_grad( + op, + unused_grad_output_indices, + output_grad_values, + unused_grad_empty_row_indicator, + unused_grad_reverse_index_map, +): + """Gradients for RaggedFillEmptyRows.""" + reverse_index_map = op.outputs[3] + + d_values, d_default_value = gen_ragged_array_ops.ragged_fill_empty_rows_grad( + reverse_index_map=reverse_index_map, grad_values=output_grad_values + ) + + # d_value_rowids, d_values, d_nrows, d_default_value. + return [None, d_values, None, d_default_value] + + +# =============================================================================== +# dynamic_partition +# =============================================================================== +@dispatch.dispatch_for_api(data_flow_ops.dynamic_partition) +def dynamic_partition(data: ragged_tensor.RaggedOrDense, + partitions: ragged_tensor.RaggedOrDense, + num_partitions, + name=None): + """RaggedTensor dispatch override for tf.dynamic_partition.""" + if not isinstance(num_partitions, int) or num_partitions < 0: + raise TypeError('num_partitions must be a non-negative integer') + result = stack_dynamic_partitions(data, partitions, num_partitions, name) + return [result[i] for i in range(num_partitions)] + + +# =============================================================================== +# split +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.split) +def split(value: ragged_tensor.Ragged, + num_or_size_splits, + axis=0, + num=None, + name=None): + """Splits a RaggedTensor `value` into a list of sub RaggedTensors. + + If `num_or_size_splits` is an `int`, then it splits `value` along the + dimension `axis` into `num_or_size_splits` smaller RaggedTensors. This + requires that `value.shape[axis]` is divisible by `num_or_size_splits`. + + If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into + `len(num_or_size_splits)` elements. The shape of the `i`-th element has the + same size as the `value` except along dimension `axis` where the size is + `num_or_size_splits[i]`. + + Splits along a ragged dimension is not allowed. + + For example: + + >>> rt = tf.RaggedTensor.from_row_lengths( + ... np.arange(6 * 3).reshape(6, 3), row_lengths=[1, 2, 2, 1]) + >>> rt.shape + TensorShape([4, None, 3]) + >>> + >>> rt1, rt2 = tf.split(rt, 2) # uniform splits + >>> rt1.shape + TensorShape([2, None, 3]) + >>> rt2.shape + TensorShape([2, None, 3]) + >>> + >>> rt3, rt4, rt5 = tf.split(rt, [1, 2, 1]) # ragged splits + >>> rt3.shape + TensorShape([1, None, 3]) + >>> rt4.shape + TensorShape([2, None, 3]) + >>> rt5.shape + TensorShape([1, None, 3]) + >>> + >>> rt6, rt7 = tf.split(rt, [1, 2], axis=2) # splits along axis 2 + >>> rt6.shape + TensorShape([4, None, 1]) + >>> rt7.shape + TensorShape([4, None, 2]) + + Args: + value: The `RaggedTensor` to split. + num_or_size_splits: Either an `int` indicating the number of splits + along `axis` or a 1-D integer `Tensor` or Python list containing the sizes + of each output tensor along `axis`. If a Python int, then it must evenly + divide `value.shape[axis]`; otherwise the sum of sizes along the split + axis must match that of the `value`. + axis: An `int` or scalar `int32` `Tensor`. The dimension along which + to split. Must be in the range `[-rank(value), rank(value))`. Defaults to + 0. + num: An `int` used to specify the number of outputs when + `num_or_size_splits` is a 1-D list or `Tensor` and its length is + statically unknown, e.g., specifying `tf.TensorSepc(None)` with + the `input_signature` argument of `tf.function` (optional). + name: A name for the operation (optional). + + Returns: + if `num_or_size_splits` is an `int` returns a list of `num_or_size_splits` + `RaggedTensor` objects; if `num_or_size_splits` is a 1-D Tensor returns + `num_or_size_splits.get_shape[0]` `RaggedTensor` objects resulting from + splitting `value`. + + Raises: + ValueError: If the dimension `axis` of `value` is a ragged dimension. + ValueError: If `num` is unspecified and cannot be inferred. + ValueError: If `num` is specified but doesn't match the length of + `num_or_size_splits`. + ValueError: If `num_or_size_splits` is an `int` and less than 1. + TypeError: If `num_or_size_splits` is not an `int` or 1-D + list or 1-D `Tensor`. + InvalidArgumentError: If the `axis` of `value` cannot be exactly splitted + by `num_or_size_splits`. + InvalidArgumentError: If `num_or_size_splits` is contains negative integers. + InvalidArgumentError: If `num_or_size_splits`'s static shape is unknown and + its dynamic shape is inconsistent `num`. + InvalidArgumentError: If `num_or_size_splits`'s static rank is unknown and + `axis` is a negative integer. + """ + with ops.name_scope(name, 'RaggedSplit'): + value = ragged_tensor.convert_to_tensor_or_ragged_tensor( + value, name='value') + if isinstance(num_or_size_splits, int) and num_or_size_splits == 1: + return [value] + + # static assert + check_ops.assert_integer_v2( + num_or_size_splits, + message=('`num_or_size_splits` must be an `int` or 1-D list or ' + '`Tensor` of integers.')) + value_shape = dynamic_ragged_shape.DynamicRaggedShape.from_tensor(value) + axis = array_ops.get_positive_axis(axis, value_shape.rank) + try: + dim_size = value_shape[axis] + except ValueError: + raise ValueError('Cannot split a ragged dimension. Got `value` with ' + f'shape {value_shape} and `axis` {axis}.') + if isinstance(num_or_size_splits, int): + # Uniform split + num_splits = num_or_size_splits + if num_splits < 1: + raise ValueError('`num_or_size_splits` must be >=1 if it is an `int`.' + f'Received {num_or_size_splits}.') + split_length = math_ops.floordiv(dim_size, num_splits) + split_lengths = array_ops.repeat(split_length, num_splits) + else: + # Ragged split + num_splits = None + split_lengths = ops.convert_to_tensor(num_or_size_splits) + if split_lengths.shape.ndims is not None: + if split_lengths.shape.ndims != 1: + raise TypeError('`num_or_size_splits` must be an `int` or 1-D list ' + f'or `Tensor`. Received {num_or_size_splits}.') + num_splits = tensor_shape.dimension_value(split_lengths.shape[0]) + + if num_splits is None: + if num is None: + raise ValueError('`num` must be specified as an `int` when the ' + 'size of `num_or_size_split` is statically ' + f'unknown. Received `num`: {num} and ' + f'`num_or_size_split`: {num_or_size_splits}.') + num_splits = num + else: + if num is not None and num != num_splits: + raise ValueError('`num` does not match the size of ' + f'`num_or_size_split`. Received `num`: {num} and ' + f'size of `num_or_size_split`: {num_splits}.') + + splits = array_ops.concat([[0], math_ops.cumsum(split_lengths)], axis=0) + checks = [] + checks.append( + check_ops.assert_non_negative_v2( + num_or_size_splits, + message='`num_or_size_splits` must be non-negative.')) + checks.append( + check_ops.assert_equal_v2( + num_splits, + array_ops.shape(split_lengths)[0], + message='`num` is inconsistent with `num_or_size_split.shape[0]`.')) + checks.append( + check_ops.assert_equal_v2( + math_ops.cast(dim_size, splits.dtype), + splits[-1], + message=('Cannot exactly split the `axis` dimension of `value` ' + 'with the given `num_or_size_split`.'))) + splits = control_flow_ops.with_dependencies(checks, splits) + splited_rts = [] + slices = [slice(None)] * (axis + 1) + for i in range(num_splits): + slices[-1] = slice(splits[i], splits[i + 1]) + splited_rts.append(value[tuple(slices)]) + return splited_rts + + +# =============================================================================== +# RaggedTensor shape operations +# =============================================================================== + + +@dispatch.dispatch_for_api(array_ops.reshape) +def ragged_reshape( + tensor: ragged_tensor.RaggedOrDense, + shape: dynamic_ragged_shape.DenseOrRaggedShape +) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]: + """Reshapes a tensor or ragged tensor.""" + tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor( + tensor, name='tensor') + if isinstance(tensor, ragged_tensor.RaggedTensor): + tensor = tensor.values + + if isinstance(shape, dynamic_ragged_shape.DynamicRaggedShape): + flat_values = array_ops.reshape(tensor, shape.inner_shape) + return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access + flat_values, + shape.row_partitions, + validate=False) + else: + shape = ops.convert_to_tensor(shape, name='shape') + return array_ops.reshape(tensor, shape) + + +@dispatch.dispatch_for_api(array_ops.broadcast_to) +def broadcast_to( + input: ragged_tensor.RaggedOrDense, # pylint: disable=redefined-builtin + shape: dynamic_ragged_shape.DynamicRaggedShape +) -> Union[ragged_tensor.RaggedTensor, tensor_lib.Tensor]: + """Broadcasts a potentially ragged tensor to a ragged shape. + + Tiles `input` as necessary to match the given shape. + + Behavior is undefined if `input` is not broadcast-compatible with `shape`. + + Args: + input: The potentially ragged tensor to broadcast. + shape: A `DynamicRaggedShape` + + Returns: + A potentially ragged tensor whose values are taken from + `input`, and whose shape matches `shape`. + """ + return dynamic_ragged_shape.broadcast_to(input, shape) + + +# Note: default value for out_type needs to be int32, to match the +# default for tf.shape's out_type parameter. +@dispatch.dispatch_for_api(array_ops.shape) +def ragged_shape( + input: ragged_tensor.Ragged, # pylint: disable=redefined-builtin + name: Optional[str] = None, + out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape: + """Returns the shape of a RaggedTensor. + + Args: + input: A `RaggedTensor` + name: A name for the operation (optional). + out_type: dtype used to encode the shape. + + Returns: + A `tf.experimental.DynamicRaggedShape` + """ + with ops.name_scope(name, 'RaggedShape', [input]): + return dynamic_ragged_shape.DynamicRaggedShape.from_tensor(input, out_type) + + +@dispatch.dispatch_for_api(array_ops.broadcast_dynamic_shape) +def broadcast_dynamic_shape( + shape_x: dynamic_ragged_shape.DenseOrRaggedShape, + shape_y: dynamic_ragged_shape.DenseOrRaggedShape +) -> dynamic_ragged_shape.DynamicRaggedShape: + """Returns the shape formed by broadcasting two shapes to be compatible. + + 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes + don't match. + 2. If neither has row_partitions and they have different dtypes, + go with int64. + 3. If one has row_partitions, go with that dtype. + + Args: + shape_x: A `DynamicRaggedShape` + shape_y: A `DynamicRaggedShape` + + Returns: + A `DynamicRaggedShape`. + Raises: + ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. + """ + if not isinstance(shape_x, dynamic_ragged_shape.DynamicRaggedShape): + shape_x = dynamic_ragged_shape.DynamicRaggedShape([], shape_x) + if not isinstance(shape_y, dynamic_ragged_shape.DynamicRaggedShape): + shape_y = dynamic_ragged_shape.DynamicRaggedShape([], shape_y) + return dynamic_ragged_shape.broadcast_dynamic_shape(shape_x, shape_y) + + +@dispatch.dispatch_for_api(array_ops.ones) +def ones( + shape: dynamic_ragged_shape.DynamicRaggedShape, + dtype=dtypes.float32, + name=None, + layout=None, +) -> ragged_tensor.RaggedOrDense: + """Returns ones shaped like x.""" + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'RaggedTensor only allows replicated layout. got {layout}' + ) + flat_values = array_ops.ones( + shape.inner_shape, dtype=dtype, name=name, layout=layout + ) + return shape._add_row_partitions(flat_values) # pylint: disable=protected-access + + +@dispatch.dispatch_for_api(array_ops.zeros) +def zeros( + shape: dynamic_ragged_shape.DynamicRaggedShape, + dtype=dtypes.float32, + name=None, + layout=None, +) -> ragged_tensor.RaggedOrDense: + """Returns ones shaped like x.""" + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'RaggedTensor only allows replicated layout. got {layout}' + ) + flat_values = array_ops.zeros( + shape.inner_shape, dtype=dtype, name=name, layout=layout + ) + return shape._add_row_partitions(flat_values) # pylint: disable=protected-access + + +@dispatch.dispatch_for_api(array_ops.fill) +def fill( + dims: dynamic_ragged_shape.DynamicRaggedShape, + value: core_types.TensorLike, + name: Optional[str] = None, + layout=None, +) -> ragged_tensor.RaggedOrDense: + """Creates a tensor with shape `dims` and fills it with `value`.""" + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'RaggedTensor only allows replicated layout. got {layout}' + ) + flat_values = array_ops.fill( + dims.inner_shape, value, name=name, layout=layout + ) + return dims._add_row_partitions(flat_values) # pylint: disable=protected-access + + +# =============================================================================== +# bitcast +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.bitcast) +def bitcast( + input: ragged_tensor.RaggedOrDense, # pylint: disable=redefined-builtin + type, # pylint: disable=redefined-builtin + name=None) -> ragged_tensor.RaggedOrDense: + """RaggedTensor dispatch override for tf.bitcast.""" + type = dtypes.as_dtype(type) + with ops.name_scope(name, 'Bitcast', [input]): + input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input, name='input') + if (input.dtype.size < type.size and input.flat_values.shape.rank < 2): + raise ValueError('`input.flat_values` is required to have rank >= 2 when ' + 'input.dtype.size < type.size. Actual rank: ' + f'{input.flat_values.shape.rank}') + return input.with_flat_values(array_ops.bitcast(input.flat_values, type)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_autograph.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_autograph.py new file mode 100644 index 0000000000000000000000000000000000000000..9e366d274069129adfe9b6b52a2eba522f4be237 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_autograph.py @@ -0,0 +1,73 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Autograph-specific overrides for ragged_tensor.""" +from tensorflow.python.autograph.operators import control_flow +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops.ragged import ragged_tensor + + +def _tf_ragged_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts +): + """Overload of for_stmt that iterates over TF ragged tensors.""" + init_vars = get_state() + control_flow.verify_loop_init_vars(init_vars, symbol_names) + + # TODO(mdan): Move this into len()? Requires eager support. + if iter_.shape and iter_.shape[0] is not None: + n = iter_.shape[0] + else: + n = iter_.row_lengths()[0] + + iterate_index = 0 + + def aug_get_state(): + return (iterate_index,) + get_state() + + def aug_set_state(aug_loop_vars): + nonlocal iterate_index + # TODO(b/171479293): Drop the lint override. + iterate_index, *loop_vars = aug_loop_vars # pylint:disable=unused-variable + # The iteration index is not "output" by the for loop. If the iteration + # index is used outside the loop, it will appear + # in the loop vars separately. + set_state(loop_vars) + + def aug_body(): + nonlocal iterate_index + body(iter_[iterate_index]) + iterate_index += 1 + + def aug_test(): + main_test = iterate_index < n + if extra_test is not None: + return tf_cond.cond(main_test, extra_test, lambda: False) + return main_test + + control_flow._add_max_iterations_hint(opts, n) # pylint: disable=protected-access + + control_flow._tf_while_stmt( # pylint: disable=protected-access + aug_test, + aug_body, + aug_get_state, + aug_set_state, + ('',) + symbol_names, + opts, + ) + + +control_flow.for_loop_registry.register( + ragged_tensor.RaggedTensor, _tf_ragged_for_stmt +) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py new file mode 100644 index 0000000000000000000000000000000000000000..2c8cfdc583f6801550ba501f4316a052855a0173 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py @@ -0,0 +1,179 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Array operations for RaggedTensors.""" + + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_dispatch # pylint: disable=unused-import +from tensorflow.python.ops.ragged import ragged_operators # pylint: disable=unused-import +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.ops.ragged import ragged_where_op + + +#=============================================================================== +# ragged.batch_gather_with_default +#=============================================================================== +def batch_gather_with_default(params, + indices, + default_value='', + name=None): + """Same as `batch_gather` but inserts `default_value` for invalid indices. + + This operation is similar to `batch_gather` except that it will substitute + the value for invalid indices with `default_value` as the contents. + See `batch_gather` for more details. + + + Args: + params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, + `M>0`). + indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). + default_value: A value to be inserted in places where `indices` are out of + bounds. Must be the same dtype as params and either a scalar or rank 1. + name: A name for the operation (optional). + + Returns: + A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. + `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. + + #### Example: + + >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) + >>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]]) + >>> batch_gather_with_default(params, indices, 'FOO') + + + """ + with ops.name_scope(name, 'RaggedBatchGatherWithDefault'): + params = ragged_tensor.convert_to_tensor_or_ragged_tensor( + params, name='params', + ) + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices', + ) + default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor( + default_value, name='default_value', + ) + row_splits_dtype, (params, indices, default_value) = ( + ragged_tensor.match_row_splits_dtypes(params, indices, default_value, + return_dtype=True)) + # TODO(hterry): lift this restriction and support default_values of + # of rank > 1 + if default_value.shape.ndims not in (0, 1): + raise ValueError('"default_value" must be a scalar or vector') + upper_bounds = None + if indices.shape.ndims is None: + raise ValueError('Indices must have a known rank.') + if params.shape.ndims is None: + raise ValueError('Params must have a known rank.') + + num_batch_dimensions = indices.shape.ndims - 1 + pad = None + # The logic for this works as follows: + # - create a padded params, where: + # padded_params[b1...bn, 0] = default_value + # padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0) + # - create an `upper_bounds` Tensor that contains the number of elements + # in each innermost rank. Broadcast `upper_bounds` to be the same shape + # as `indices`. + # - check to see which index in `indices` are out of bounds and substitute + # it with the index containing `default_value` (the first). + # - call batch_gather with the indices adjusted. + with ops.control_dependencies([ + check_ops.assert_greater_equal(array_ops.rank(params), + array_ops.rank(indices))]): + if ragged_tensor.is_ragged(params): + row_lengths = ragged_array_ops.expand_dims( + params.row_lengths(axis=num_batch_dimensions), + axis=-1) + upper_bounds = math_ops.cast(row_lengths, indices.dtype) + + pad_shape = _get_pad_shape(params, indices, row_splits_dtype) + + pad = ragged_tensor_shape.broadcast_to( + default_value, pad_shape) + else: + params_shape = array_ops.shape(params) + pad_shape = array_ops.concat([ + params_shape[:num_batch_dimensions], + [1], + params_shape[num_batch_dimensions + 1:params.shape.ndims] + ], 0) + upper_bounds = params_shape[num_batch_dimensions] + pad = array_ops.broadcast_to(default_value, pad_shape) + + # Add `default_value` as the first value in the innermost (ragged) rank. + pad = math_ops.cast(pad, params.dtype) + padded_params = array_ops.concat( + [pad, params], axis=num_batch_dimensions) + + # Adjust the indices by substituting out-of-bound indices to the + # default-value index (which is the first element) + shifted_indices = indices + 1 + is_out_of_bounds = (indices < 0) | (indices > upper_bounds) + adjusted_indices = ragged_where_op.where( + is_out_of_bounds, + x=array_ops.zeros_like(indices), y=shifted_indices, + ) + return array_ops.batch_gather( + params=padded_params, indices=adjusted_indices, name=name) + + +def _get_pad_shape(params, indices, row_splits_dtype): + """Gets the RaggedTensorDynamicShape for the pad tensor.""" + num_batch_dimensions = indices.shape.ndims - 1 + params_shape = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + params, dim_size_dtype=row_splits_dtype) + + # We want to create a pad tensor that can be concatenated with the params. + if params.shape.ndims == indices.shape.ndims: + # When params and indices are the same rank, the shape of the pad tensor is + # almost identical to params, except the last dimension which has size = 1. + if params_shape.num_inner_dimensions == 0: + pad_dims = params_shape.partitioned_dim_sizes[:-1] + ( + array_ops.ones_like(params_shape.partitioned_dim_sizes[-1]),) + return ragged_tensor_shape.RaggedTensorDynamicShape( + pad_dims, []) + else: + return ragged_tensor_shape.RaggedTensorDynamicShape( + params_shape.partitioned_dim_sizes, + array_ops.concat([params_shape.inner_dim_sizes[:-1], [1]], axis=0)) + else: + # When the rank of indices < params, the pad has the same dimension as + # params up to the 'num_batch_dimensions' rank. Every dimension after that + # has size 1. + pad_dims = None + if num_batch_dimensions == 0: + pad_dims = (constant_op.constant(1, dtype=row_splits_dtype),) + ( + constant_op.constant([1], dtype=row_splits_dtype),) * ( + params_shape.num_partitioned_dimensions - + num_batch_dimensions - 1) + else: + batch_dimensions = params_shape.partitioned_dim_sizes[ + :num_batch_dimensions] + gather_dimension = params_shape.partitioned_dim_sizes[ + num_batch_dimensions] + pad_dims = batch_dimensions + ( + array_ops.ones_like(gather_dimension),) * ( + params_shape.num_partitioned_dimensions - num_batch_dimensions) + + return ragged_tensor_shape.RaggedTensorDynamicShape( + pad_dims, params_shape.inner_dim_sizes) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_bincount_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_bincount_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7cae73ba66db8c3cabd2a52b4533a98173e1c9eb --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_bincount_ops.py @@ -0,0 +1,405 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# maxlengthations under the License. +# ============================================================================== +"""bincount ops for RaggedTensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import bincount_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_count_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(bincount_ops.bincount) +def bincount(arr: ragged_tensor.RaggedTensor, + weights=None, + minlength=None, + maxlength=None, + dtype=dtypes.int32, + name=None, + axis=None, + binary_output=False): + """Counts the number of occurrences of each value in an integer array. + + If `minlength` and `maxlength` are not given, returns a vector with length + `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. + + >>> data = tf.ragged.constant([[1, 1], [2, 3, 2, 4, 4, 5]]) + >>> tf.math.bincount(data) + + + Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6 + will be the vector length. + + Each bin value in the output indicates number of occurrences of the particular + index. Here, index 1 in output has a value 2. This indicates value 1 occurs + two times in `values`. + + **Bin-counting with weights** + + >>> data = tf.ragged.constant([[1, 1], [2, 3, 2, 4, 4, 5]]) + >>> weights = tf.ragged.constant([[1, 5], [0, 1, 0, 5, 4, 5]]) + >>> tf.math.bincount(data, weights=weights) + + + When `weights` is specified, bins will be incremented by the corresponding + weight instead of 1. Here, index 1 in output has a value 6. This is the + summation of `weights` corresponding to the value in `arr` (i.e. for index + 1, the first two values `arr` are 1 so the first two weights, 1 and 5, are + summed). + + There is an equivilance between bin-counting with weights and + `unsorted_segement_sum` where `data` is the weights and `segment_ids` are the + values. + + >>> data = tf.ragged.constant([[1, 1], [2, 3, 2, 4, 4, 5]]) + >>> weights = tf.ragged.constant([[1, 5], [0, 1, 0, 5, 4, 5]]) + >>> tf.math.unsorted_segment_sum(weights, data, num_segments=6).numpy() + array([0, 6, 0, 1, 9, 5], dtype=int32) + + On GPU, `bincount` with weights is only supported when XLA is enabled + (typically when a function decorated with `@tf.function(jit_compile=True)`). + `unsorted_segment_sum` can be used as a workaround for the non-XLA case on + GPU. + + **Bin-counting matrix rows independently** + + This example uses `axis=-1` with a 2 dimensional input and returns a + `Tensor` with bincounting where axis 0 is **not** flattened, i.e. an + independent bincount for each matrix row. + + >>> data = tf.ragged.constant([[1, 2], [3, 0, 0, 0, 1, 2]], dtype=np.int32) + >>> tf.math.bincount(data, axis=-1) + + + **Bin-counting with binary_output** + + This example gives binary output instead of counting the occurrence. + + >>> data = tf.ragged.constant([[1, 2], [3, 0, 0, 0, 1, 2]], dtype=np.int32) + >>> tf.math.bincount(data, axis=-1, binary_output=True) + + + Args: + arr: A RaggedTensor whose values should be counted. + These tensors must have a rank of 2 if `axis=-1`. + weights: If non-None, must be a RaggedTensor with the same row splits as + `arr`. For each value in `arr`, the bin will be incremented by the + corresponding weight instead of 1. If non-None, `binary_output` must be + False. + minlength: If given, ensures the output has length at least `minlength`, + padding with zeros at the end if necessary. + maxlength: If given, skips values in `arr` that are equal or greater than + `maxlength`, ensuring that the output has length at most `maxlength`. + dtype: If `weights` is None, determines the type of the output bins. + name: A name scope for the associated operations (optional). + axis: The axis to slice over. Axes at and below `axis` will be flattened + before bin counting. Currently, only `0`, and `-1` are supported. If None, + all axes will be flattened (identical to passing `0`). + binary_output: If True, this op will output 1 instead of the number of times + a token appears (equivalent to one_hot + reduce_any instead of one_hot + + reduce_add). Defaults to False. + + Returns: + A vector with the same dtype as `weights` or the given `dtype` containing + the bincount values. + + Raises: + `InvalidArgumentError` if negative values are provided as an input. + + """ + name = "bincount" if name is None else name + with ops.name_scope(name): + arr = ragged_tensor.convert_to_tensor_or_ragged_tensor(arr, name="arr") + if weights is not None: + if not isinstance(weights, sparse_tensor.SparseTensor): + weights = ragged_tensor.convert_to_tensor_or_ragged_tensor( + weights, name="weights") + + if weights is not None and binary_output: + raise ValueError("Arguments `binary_output` and `weights` are mutually " + "exclusive. Please specify only one.") + + if not arr.dtype.is_integer: + arr = math_ops.cast(arr, dtypes.int32) + if axis is None: + axis = 0 + + if axis not in [0, -1]: + raise ValueError(f"Unsupported value for argument axis={axis}. Only 0 and" + " -1 are currently supported.") + + array_is_nonempty = array_ops.size(arr) > 0 + output_size = math_ops.cast(array_is_nonempty, arr.dtype) * ( + math_ops.reduce_max(arr) + 1) + if minlength is not None: + minlength = ops.convert_to_tensor( + minlength, name="minlength", dtype=arr.dtype) + output_size = gen_math_ops.maximum(minlength, output_size) + if maxlength is not None: + maxlength = ops.convert_to_tensor( + maxlength, name="maxlength", dtype=arr.dtype) + output_size = gen_math_ops.minimum(maxlength, output_size) + + if axis == 0: + # Flatten RaggedTensors with multiple ragged dimensions which use a + # nested RaggedTensor for the values tensor. + while isinstance(arr, ragged_tensor.RaggedTensor): + if weights is not None: + weights = validate_ragged_weights(arr, weights, dtype) + arr = arr.values + + if isinstance(arr, ragged_tensor.RaggedTensor): + weights = validate_ragged_weights(arr, weights, dtype) + return gen_math_ops.ragged_bincount( + splits=arr.row_splits, + values=arr.values, + size=output_size, + weights=weights, + binary_output=binary_output) + else: + weights = bincount_ops.validate_dense_weights(arr, weights, dtype) + return gen_math_ops.dense_bincount( + input=arr, + size=output_size, + weights=weights, + binary_output=binary_output) + + +@dispatch.dispatch_for_api(sparse_ops.sparse_bincount) +def sparse_bincount(values: ragged_tensor.RaggedTensor, + weights=None, + axis=0, + minlength=None, + maxlength=None, + binary_output=False, + name=None): + """Count the number of times an integer value appears in a tensor. + + This op takes an N-dimensional `Tensor`, `RaggedTensor`, or `SparseTensor`, + and returns an N-dimensional int64 SparseTensor where element + `[i0...i[axis], j]` contains the number of times the value `j` appears in + slice `[i0...i[axis], :]` of the input tensor. Currently, only N=0 and + N=-1 are supported. + + Args: + values: A RaggedTensor whose values should be + counted. These tensors must have a rank of 2 if `axis=-1`. + weights: If non-None, must be a RaggedTensor with the same row splits as + `values`. For each value in `value`, the bin will be incremented by the + corresponding weight instead of 1. + axis: The axis to slice over. Axes at and below `axis` will be flattened + before bin counting. Currently, only `0`, and `-1` are supported. If None, + all axes will be flattened (identical to passing `0`). + minlength: If given, ensures the output has length at least `minlength`, + padding with zeros at the end if necessary. + maxlength: If given, skips values in `values` that are equal or greater than + `maxlength`, ensuring that the output has length at most `maxlength`. + binary_output: If True, this op will output 1 instead of the number of times + a token appears (equivalent to one_hot + reduce_any instead of one_hot + + reduce_add). Defaults to False. + name: A name for this op. + + Returns: + A SparseTensor with `output.shape = values.shape[:axis] + [N]`, where `N` is + * `maxlength` (if set); + * `minlength` (if set, and `minlength > reduce_max(values)`); + * `0` (if `values` is empty); + * `reduce_max(values) + 1` otherwise. + + Raises: + `InvalidArgumentError` if negative values are provided as an input. + + Examples: + + **Bin-counting every item in individual batches** + + This example takes an input (which could be a Tensor, RaggedTensor, or + SparseTensor) and returns a SparseTensor where the value of (i,j) is the + number of times value j appears in batch i. + + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> tf.sparse.bincount(data, axis=-1) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101] + [ 1 10001]], shape=(7, 2), dtype=int64), + values=tf.Tensor([1 1 2 1 1 1 1], shape=(7,), dtype=int64), + dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) + + **Bin-counting with defined output shape** + + This example takes an input (which could be a Tensor, RaggedTensor, or + SparseTensor) and returns a SparseTensor where the value of (i,j) is the + number of times value j appears in batch i. However, all values of j + above 'maxlength' are ignored. The dense_shape of the output sparse tensor + is set to 'minlength'. Note that, while the input is identical to the + example above, the value '10001' in batch item 2 is dropped, and the + dense shape is [2, 500] instead of [2,10002] or [2, 102]. + + >>> minlength = maxlength = 500 + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> tf.sparse.bincount( + ... data, axis=-1, minlength=minlength, maxlength=maxlength) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101]], shape=(6, 2), dtype=int64), + values=tf.Tensor([1 1 2 1 1 1], shape=(6,), dtype=int64), + dense_shape=tf.Tensor([ 2 500], shape=(2,), dtype=int64)) + + **Binary bin-counting** + + This example takes an input (which could be a Tensor, RaggedTensor, or + SparseTensor) and returns a SparseTensor where (i,j) is 1 if the value j + appears in batch i at least once and is 0 otherwise. Note that, even though + some values (like 20 in batch 1 and 11 in batch 2) appear more than once, + the 'values' tensor is all 1s. + + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> tf.sparse.bincount(data, binary_output=True, axis=-1) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101] + [ 1 10001]], shape=(7, 2), dtype=int64), + values=tf.Tensor([1 1 1 1 1 1 1], shape=(7,), dtype=int64), + dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) + + **Weighted bin-counting** + + This example takes two inputs - a values tensor and a weights tensor. These + tensors must be identically shaped, and have the same row splits or indices + in the case of RaggedTensors or SparseTensors. When performing a weighted + count, the op will output a SparseTensor where the value of (i, j) is the + sum of the values in the weight tensor's batch i in the locations where + the values tensor has the value j. In this case, the output dtype is the + same as the dtype of the weights tensor. + + >>> data = tf.ragged.constant( + ... [[10, 20], [30, 20, 11, 101, 11, 10001]], dtype=np.int64) + >>> weights = tf.ragged.constant( + ... [[2, 0.25], [15, 0.5, 2, 17, 3, 0.9]]) + >>> tf.sparse.bincount(data, weights=weights, axis=-1) + SparseTensor(indices=tf.Tensor( + [[ 0 10] + [ 0 20] + [ 1 11] + [ 1 20] + [ 1 30] + [ 1 101] + [ 1 10001]], shape=(7, 2), dtype=int64), + values=tf.Tensor([ 2. 0.25 5. 0.5 15. 17. 0.9 ], shape=(7,), dtype=float32), + dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) + + """ + with ops.name_scope(name, "count", [values, weights]): + values = ragged_tensor.convert_to_tensor_or_ragged_tensor( + values, name="values") + if weights is not None: + if not isinstance(weights, sparse_tensor.SparseTensor): + weights = ragged_tensor.convert_to_tensor_or_ragged_tensor( + weights, name="weights") + + if weights is not None and binary_output: + raise ValueError("Arguments `binary_output` and `weights` are mutually " + "exclusive. Please specify only one.") + + if axis is None: + axis = 0 + + if axis not in [0, -1]: + raise ValueError(f"Unsupported value for argument axis={axis}. Only 0 and" + " -1 are currently supported.") + + minlength_value = minlength if minlength is not None else -1 + maxlength_value = maxlength if maxlength is not None else -1 + + if axis == 0: + if weights is not None: + weights = validate_ragged_weights(values, weights) + values = values.values + + if isinstance(values, ragged_tensor.RaggedTensor): + weights = validate_ragged_weights(values, weights) + c_ind, c_val, c_shape = gen_count_ops.ragged_count_sparse_output( + values.row_splits, + values.values, + weights, + minlength=minlength_value, + maxlength=maxlength_value, + binary_output=binary_output) + else: + weights = bincount_ops.validate_dense_weights(values, weights) + c_ind, c_val, c_shape = gen_count_ops.dense_count_sparse_output( + values, + weights=weights, + minlength=minlength_value, + maxlength=maxlength_value, + binary_output=binary_output) + + return sparse_tensor.SparseTensor(c_ind, c_val, c_shape) + + +def validate_ragged_weights(values, weights, dtype=None): + """Validates the passed weight tensor or creates an empty one.""" + if weights is None: + if dtype: + return array_ops.constant([], dtype=dtype) + return array_ops.constant([], dtype=values.values.dtype) + + if not isinstance(weights, ragged_tensor.RaggedTensor): + raise ValueError( + "`weights` must be a RaggedTensor if `values` is a RaggedTensor. " + f"Received argument weights={weights} of type: " + f"{type(weights).__name__}.") + + checks = [] + if weights.row_splits is not values.row_splits: + checks.append( + check_ops.assert_equal( + weights.row_splits, + values.row_splits, + message="'weights' and 'values' must have the same row splits.")) + if checks: + with ops.control_dependencies(checks): + weights = array_ops.identity(weights.values) + else: + weights = weights.values + + return weights diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_concat_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_concat_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..04e9b29d8c3a87b4b7f07563c8fcfae60b448d1e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_concat_ops.py @@ -0,0 +1,330 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Concat and stack operations for RaggedTensors.""" + +import typing + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@dispatch.dispatch_for_api(array_ops.concat) +def concat(values: typing.List[ragged_tensor.RaggedOrDense], axis, name=None): + """Concatenates potentially ragged tensors along one dimension. + + Given a list of tensors with the same rank `K` (`K >= axis`), returns a + rank-`K` `RaggedTensor` `result` such that `result[i0...iaxis]` is the + concatenation of `[rt[i0...iaxis] for rt in values]`. + + Args: + values: A list of potentially ragged tensors. May not be empty. All + `values` must have the same rank and the same dtype; but unlike + `tf.concat`, they can have arbitrary shapes. + axis: A python integer, indicating the dimension along which to concatenate. + (Note: Unlike `tf.concat`, the `axis` parameter must be statically known.) + Negative values are supported only if the rank of at least one + `values` value is statically known. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` with rank `K`. + `result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`. + + Raises: + ValueError: If `values` is empty, if `axis` is out of bounds or if + the input tensors have different ranks. + + #### Example: + + >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) + >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) + >>> tf.concat([t1, t2], axis=0) + + >>> tf.concat([t1, t2], axis=1) + + """ + if not isinstance(values, (list, tuple)): + values = [values] + with ops.name_scope(name, 'RaggedConcat', values): + return _ragged_stack_concat_helper(values, axis, stack_values=False) + + +@tf_export('ragged.stack') +@dispatch.add_dispatch_support +@dispatch.dispatch_for_api(array_ops_stack.stack) +def stack(values: typing.List[ragged_tensor.RaggedOrDense], + axis=0, + name=None): + """Stacks a list of rank-`R` tensors into one rank-`(R+1)` `RaggedTensor`. + + Given a list of tensors or ragged tensors with the same rank `R` + (`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that + `result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`. + + #### Examples: + + >>> # Stacking two ragged tensors. + >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) + >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) + >>> tf.ragged.stack([t1, t2], axis=0) + + >>> tf.ragged.stack([t1, t2], axis=1) + + + >>> # Stacking two dense tensors with different sizes. + >>> t3 = tf.constant([[1, 2, 3], [4, 5, 6]]) + >>> t4 = tf.constant([[5], [6], [7]]) + >>> tf.ragged.stack([t3, t4], axis=0) + + + Args: + values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All + `values` must have the same rank and the same dtype; but unlike + `tf.stack`, they can have arbitrary dimension sizes. + axis: A python integer, indicating the dimension along which to stack. + (Note: Unlike `tf.stack`, the `axis` parameter must be statically known.) + Negative values are supported only if the rank of at least one + `values` value is statically known. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` with rank `R+1` (if `R>0`). + If `R==0`, then the result will be returned as a 1D `Tensor`, since + `RaggedTensor` can only be used when `rank>1`. + `result.ragged_rank=1+max(axis, max(rt.ragged_rank for rt in values]))`. + + Raises: + ValueError: If `values` is empty, if `axis` is out of bounds or if + the input tensors have different ranks. + """ + if not isinstance(values, (list, tuple)): + values = [values] + with ops.name_scope(name, 'RaggedConcat', values): + return _ragged_stack_concat_helper(values, axis, stack_values=True) + + +def _ragged_stack_concat_helper(rt_inputs, axis, stack_values): + """Helper function to concatenate or stack ragged tensors. + + Args: + rt_inputs: A list of RaggedTensors or Tensors to combine. + axis: The axis along which to concatenate or stack. + stack_values: A boolean -- if true, then stack values; otherwise, + concatenate them. + + Returns: + A RaggedTensor. + Raises: + ValueError: If rt_inputs is empty, or if axis is out of range. + """ + # Validate parameters. + if not rt_inputs: + raise ValueError('rt_inputs may not be empty.') + + # Convert input tensors. + rt_inputs = [ + ragged_tensor.convert_to_tensor_or_ragged_tensor( + rt_input, name='rt_input') for rt_input in rt_inputs + ] + row_splits_dtype, rt_inputs = ragged_tensor.match_row_splits_dtypes( + *rt_inputs, return_dtype=True) + rt_inputs = list(rt_inputs) + + # Special case: if there's only one input, then return it as-is. + if len(rt_inputs) == 1 and not stack_values: + return rt_inputs[0] + + # Check the rank (number of dimensions) of the input tensors. + ndims = None + for rt in rt_inputs: + if ndims is None: + ndims = rt.shape.ndims + else: + rt.shape.assert_has_rank(ndims) + + out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1 + axis = array_ops.get_positive_axis(axis, out_ndims) + + if stack_values and ndims == 1 and axis == 0: + return ragged_tensor.RaggedTensor.from_row_lengths( + values=array_ops.concat(rt_inputs, axis=0), + row_lengths=array_ops.concat([array_ops.shape(r) for r in rt_inputs], + axis=0)) + + # If all the inputs are Tensors, and we're combining the final dimension, + # then we can delegate to the tf.stack/tf.concat operation, and return a + # Tensor. + if all(not ragged_tensor.is_ragged(rt) for rt in rt_inputs): + if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1): + if stack_values: + return array_ops_stack.stack(rt_inputs, axis) + else: + return array_ops.concat(rt_inputs, axis) + + # Convert any Tensor inputs to RaggedTensors. This makes it + # possible to concatenate Tensors and RaggedTensors together. + for i in range(len(rt_inputs)): + if not ragged_tensor.is_ragged(rt_inputs[i]): + rt_inputs[i] = ragged_tensor.RaggedTensor.from_tensor( + rt_inputs[i], ragged_rank=1, row_splits_dtype=row_splits_dtype) + + # Convert the input tensors to all have the same ragged_rank. + ragged_rank = max(max(rt.ragged_rank for rt in rt_inputs), 1) + rt_inputs = [_increase_ragged_rank_to(rt, ragged_rank, row_splits_dtype) + for rt in rt_inputs] + + if axis == 0: + return _ragged_stack_concat_axis_0(rt_inputs, stack_values) + elif axis == 1: + return _ragged_stack_concat_axis_1(rt_inputs, stack_values) + else: # axis > 1: recurse. + values = [rt.values for rt in rt_inputs] + splits = [[rt_input.row_splits] for rt_input in rt_inputs] + with ops.control_dependencies(ragged_util.assert_splits_match(splits)): + return ragged_tensor.RaggedTensor.from_row_splits( + _ragged_stack_concat_helper(values, axis - 1, stack_values), + splits[0][0], validate=False) + + +def _ragged_stack_concat_axis_0(rt_inputs, stack_values): + """Helper function to concatenate or stack ragged tensors along axis 0. + + Args: + rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. + stack_values: Boolean. If true, then stack values; otherwise, concatenate + them. + + Returns: + A RaggedTensor. + """ + # Concatenate the inner values together. + flat_values = [rt.flat_values for rt in rt_inputs] + concatenated_flat_values = array_ops.concat(flat_values, axis=0) + + # Concatenate the splits together for each ragged dimension (adjusting + # split offsets as necessary). + nested_splits = [rt.nested_row_splits for rt in rt_inputs] + ragged_rank = rt_inputs[0].ragged_rank + concatenated_nested_splits = [ + _concat_ragged_splits([ns[dim] + for ns in nested_splits]) + for dim in range(ragged_rank) + ] + + # If we are performing a stack operation, then add another splits. + if stack_values: + stack_lengths = array_ops_stack.stack([rt.nrows() for rt in rt_inputs]) + stack_splits = ragged_util.lengths_to_splits(stack_lengths) + concatenated_nested_splits.insert(0, stack_splits) + + return ragged_tensor.RaggedTensor.from_nested_row_splits( + concatenated_flat_values, concatenated_nested_splits, validate=False) + + +def _ragged_stack_concat_axis_1(rt_inputs, stack_values): + """Helper function to concatenate or stack ragged tensors along axis 1. + + Args: + rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. + stack_values: Boolean. If true, then stack values; otherwise, concatenate + them. + + Returns: + A RaggedTensor. + """ + num_inputs = len(rt_inputs) + + nrows_checks = [] + rt_nrows = rt_inputs[0].nrows() + for index, rt in enumerate(rt_inputs[1:]): + nrows_checks.append( + check_ops.assert_equal( + rt_nrows, + rt.nrows(), + message=( + f'Input tensors at index 0 (=x) and {index+1} (=y) have' + ' incompatible shapes.' + ), + ) + ) + + with ops.control_dependencies(nrows_checks): + # Concatenate the inputs together to put them in a single ragged tensor. + concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False) + + # Use ragged.gather to permute the rows of concatenated_rt. In particular, + # permuted_rt = [rt_inputs[0][0], ..., rt_inputs[N][0], + # rt_inputs[0][1], ..., rt_inputs[N][1], + # ..., + # rt_inputs[0][M], ..., rt_input[N][M]] + # where `N=num_inputs-1` and `M=rt_nrows-1`. + row_indices = math_ops.range(rt_nrows * num_inputs) + row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1]) + transposed_row_index_matrix = array_ops.transpose(row_index_matrix) + row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1]) + permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation) + + if stack_values: + # Add a new splits tensor to group together the values. + stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs) + _copy_row_shape(rt_inputs, stack_splits) + return ragged_tensor.RaggedTensor.from_row_splits( + permuted_rt, stack_splits, validate=False) + else: + # Merge together adjacent rows by dropping the row-split indices that + # separate them. + concat_splits = permuted_rt.row_splits[::num_inputs] + _copy_row_shape(rt_inputs, concat_splits) + return ragged_tensor.RaggedTensor.from_row_splits( + permuted_rt.values, concat_splits, validate=False) + + +def _copy_row_shape(rt_inputs, splits): + """Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs.""" + for rt in rt_inputs: + if rt.shape[0] is not None: + splits.set_shape(tensor_shape.TensorShape(rt.shape[0] + 1)) + + +def _increase_ragged_rank_to(rt_input, ragged_rank, row_splits_dtype): + """Adds ragged dimensions to `rt_input` so it has the desired ragged rank.""" + if ragged_rank > 0: + if not ragged_tensor.is_ragged(rt_input): + rt_input = ragged_tensor.RaggedTensor.from_tensor( + rt_input, row_splits_dtype=row_splits_dtype) + if rt_input.ragged_rank < ragged_rank: + rt_input = rt_input.with_values( + _increase_ragged_rank_to(rt_input.values, ragged_rank - 1, + row_splits_dtype)) + return rt_input + + +def _concat_ragged_splits(splits_list): + """Concatenates a list of RaggedTensor splits to form a single splits.""" + pieces = [splits_list[0]] + splits_offset = splits_list[0][-1] + for splits in splits_list[1:]: + pieces.append(splits[1:] + splits_offset) + splits_offset += splits[-1] + return array_ops.concat(pieces, axis=0) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_config.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_config.py new file mode 100644 index 0000000000000000000000000000000000000000..cf19c5a62012f771122732ff8f7fb350ff5659f4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_config.py @@ -0,0 +1,29 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Configuration parameters for RaggedTensors.""" + + +def auto_cast_partition_dtype(): + """Whether incompatible row-partitioning dtypes should be auto-converted. + + If true, then operations that combine RaggedTensors but have different + row-partitioning tensor dtypes will be automatically cast to a + compatible dtype (`tf.int64`). If false, then such operations will result + in an error. + + Returns: + `bool` + """ + return False diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_dispatch.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..176e18be3d7f520a2e648c7ad3b0d5c2a43fca9c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_dispatch.py @@ -0,0 +1,160 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operator dispatch for RaggedTensors.""" + +from tensorflow.python.ops import logging_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import string_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.util import dispatch +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export +from tensorflow.python.util import tf_inspect + + +@dispatch.dispatch_for_unary_elementwise_apis(ragged_tensor.Ragged) +def ragged_unary_elementwise_op(op, x): + """Unary elementwise api handler for RaggedTensors.""" + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x) + return x.with_values(op(x.values)) + + +# TODO(martinz): This is deprecated. Delete. +def ragged_binary_elementwise_op(op, x, y): + """Binary elementwise api handler for RaggedTensors.""" + x_is_ragged = ragged_tensor.is_ragged(x) + y_is_ragged = ragged_tensor.is_ragged(y) + + # Convert args to tensors. + x = ragged_tensor.convert_to_tensor_or_ragged_tensor( + x, preferred_dtype=(y.dtype if y_is_ragged else None)) + y = ragged_tensor.convert_to_tensor_or_ragged_tensor( + y, preferred_dtype=x.dtype) + + if x_is_ragged and y_is_ragged: + x, y = ragged_tensor.match_row_splits_dtypes(x, y) + + # Perform broadcasting, when appropraite + if ((x_is_ragged and y_is_ragged) or + (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or + (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): + # If both x and y are ragged, they must have the same row_splits_dtype now. + if x_is_ragged: + dim_size_dtype = x.row_splits.dtype + else: + dim_size_dtype = y.row_splits.dtype + + shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + x, dim_size_dtype=dim_size_dtype) + shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + y, dim_size_dtype=dim_size_dtype) + bcast_shape = ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y) + x = ragged_tensor_shape.broadcast_to( + x, bcast_shape, broadcast_inner_dimensions=False) + y = ragged_tensor_shape.broadcast_to( + y, bcast_shape, broadcast_inner_dimensions=False) + + x_values = x.flat_values if ragged_tensor.is_ragged(x) else x + y_values = y.flat_values if ragged_tensor.is_ragged(y) else y + mapped_values = op(x_values, y_values) + if isinstance(mapped_values, bool): + return mapped_values # Special case for tensor_equals. + if ragged_tensor.is_ragged(x): + return x.with_flat_values(mapped_values) + else: + return y.with_flat_values(mapped_values) + + +# TODO(edloper): Update the documentation generation tools to automatically +# build lists of which types are supported by which ops (and then delete all +# the following code). + + +# We don't need to register a separate delegation handler for these v1 ops, +# since they delegate to the v2 ops (which already have a handler). But we +# still want to include them in the ragged_op_list() output. +_V2_OPS_THAT_ARE_DELEGATED_TO_FROM_V1_OPS = [ + math_ops.reduce_sum, + math_ops.reduce_prod, + math_ops.reduce_min, + math_ops.reduce_max, + math_ops.reduce_mean, + math_ops.reduce_variance, + math_ops.reduce_std, + math_ops.reduce_any, + math_ops.reduce_all, + string_ops.string_to_number, + string_ops.string_to_hash_bucket, + string_ops.reduce_join_v2, +] + + +def _ragged_op_signature(op, ragged_args, ragged_varargs=False): + """Returns a signature for the given op, marking ragged args in bold.""" + op_name = tf_export.get_canonical_name_for_symbol(op) + argspec = tf_inspect.getfullargspec(op) + arg_names = argspec.args + + # Mark ragged arguments in bold. + for pos in ragged_args: + arg_names[pos] = '**' + arg_names[pos] + '**' + + # Add argument defaults. + if argspec.defaults is not None: + for pos in range(-1, -len(argspec.defaults) - 1, -1): + arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos]) + + # Add varargs and keyword args + if argspec.varargs: + if ragged_varargs: + arg_names.append('***' + argspec.varargs + '**') + else: + arg_names.append('*' + argspec.varargs) + if argspec.varkw: + arg_names.append('**' + argspec.varkw) + + return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names)) + + +def _op_is_in_tf_version(op, version): + if version == 1: + return (tf_export.get_v1_names(tf_decorator.unwrap(op)[1]) or + op in _V2_OPS_THAT_ARE_DELEGATED_TO_FROM_V1_OPS) + elif version == 2: + return tf_export.get_v2_names(tf_decorator.unwrap(op)[1]) + else: + raise ValueError('Expected version 1 or 2.') + + +def ragged_op_list(tf_version=2): + """Returns a string listing operations that have dispathers registered.""" + lines = [] + api_signatures = dispatch.type_based_dispatch_signatures_for( + ragged_tensor.RaggedTensor) + for api, signatures in api_signatures.items(): + arg_names = tf_inspect.getargspec(api).args + ragged_args = set() + for signature in signatures: + for arg in signature: + ragged_args.add(arg if isinstance(arg, int) else arg_names.index(arg)) + if _op_is_in_tf_version(api, tf_version): + lines.append(_ragged_op_signature(api, ragged_args)) + + lines.append( + _ragged_op_signature(logging_ops.print_v2, [], ragged_varargs=True)) + return ('\n\n### Additional ops that support `RaggedTensor`\n\n' + 'Arguments that accept `RaggedTensor`s are marked in **bold**.\n\n' + + '\n'.join(sorted(lines)) + 'n') diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_embedding_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_embedding_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ea994bfa242d55b639461ed0f5eb0cc9fd4177 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_embedding_ops.py @@ -0,0 +1,432 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Embedding operations.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import embedding_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import variables +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(embedding_ops.embedding_lookup) +def embedding_lookup( + params, + ids: ragged_tensor.Ragged, + partition_strategy="mod", + name=None, + validate_indices=True, # pylint: disable=unused-argument + max_norm=None, +): + """Look up the ragged ids in a list of embedding tensors. + + Args: + params: A tensor representing the complete embedding tensor having the shape + [e1, ...eM] + ragged_ids: A 'RaggedTensor' with type 'int32' or 'int64' containing the ids + to be looked up in 'params' of shape [r0, ..rN]. Values must be in the + range '[0, params.shape[0]]'. + partition_strategy: A string specifying the partitioning strategy. + max_norm: If not `None`, each embedding is clipped if its l2-norm is larger + than this value. + name: A name for the operation (optional) + + Returns: + A ragged tensor of shape [r0, r1, ...rN, e1, ...eM]. + + Raises: + ValueError: When params is empty or the type of the ids is not int32 or + int64. + """ + if params is None: + raise ValueError("params must be specified.") + if isinstance(params, (list, tuple)) and not params: + raise ValueError("params should not be empty.") + if ids.dtype != dtypes.int32 and ids.dtype != dtypes.int64: + raise ValueError( + "The values contained by the inputs have type " + f"{str(ids.dtype)}" + " and cannot be processed. All values" + " should be indices, either of type `int32` or `int64`." + ) + + with ops.name_scope(name, "embedding_lookup_ragged") as name: + looked_up_ragged = ragged_functional_ops.map_flat_values( + embedding_ops.embedding_lookup, + params=params, + ids=ids, + partition_strategy=partition_strategy, + max_norm=max_norm, + ) + + return looked_up_ragged + + +@dispatch.dispatch_for_api(embedding_ops.embedding_lookup_sparse) +def embedding_lookup_sparse( + params, + sp_ids: ragged_tensor.Ragged, + sp_weights, + partition_strategy="mod", + name=None, + combiner=None, + max_norm=None, + allow_fast_lookup=False, +): + """Looks up embeddings for the given ids and weights from a list of tensors. + + This op assumes that there is at least one id for each row in the dense tensor + represented by sp_ids (i.e. there are no rows with empty features), and that + all the indices of sp_ids are in canonical row-major order. + + `sp_ids` and `sp_weights` (if not None) are `RaggedTensor`s with rank of 2. + Embeddings are always aggregated along the last dimension. + + It also assumes that all id values lie in the range [0, p0), where p0 + is the sum of the size of params along dimension 0. + + Args: + params: A single tensor representing the complete embedding tensor, or a + list tensors all of same shape except for the first dimension, + representing sharded embedding tensors. Alternatively, a + `PartitionedVariable`, created by partitioning along dimension 0. Each + element must be appropriately sized for the given `partition_strategy`. + sp_ids: `RaggedTensor` with rank 2. The rank is not verified for performance + reasons. + sparse_weights: `RaggedTensor` of same type and shape as `sparse_ids`, + containing float / double weights corresponding to `sparse_ids`, or `None` + if all weights are assumed to be 1.0. + partition_strategy: A string specifying the partitioning strategy, relevant + if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default + is `"mod"`. See `tf.nn.embedding_lookup` for more details. + name: Optional name for the op. + combiner: A string specifying the reduction op. Currently "mean", "sqrtn" + and "sum" are supported. "sum" computes the weighted sum of the embedding + results for each row. "mean" is the weighted sum divided by the total + weight. "sqrtn" is the weighted sum divided by the square root of the sum + of the squares of the weights. Defaults to `mean`. + max_norm: If not `None`, each embedding is clipped if its l2-norm is larger + than this value, before combining. + allow_fast_lookup: An optional boolean specifying whether to allow + simplified embedding lookups when `params` is a single tensor and + `max_norm` is `None`. Setting this flag to `True` during training can + cause the use of dense gradients with increased memory footprint. + + Returns: + A dense tensor representing the combined embeddings for the + sparse ids. For each row in the dense tensor represented by `sp_ids`, the op + looks up the embeddings for all ids in that row, multiplies them by the + corresponding weight, and combines these embeddings as specified. + + In other words, if + + `shape(combined params) = [p0, p1, ..., pm]` + + and + + `shape(sp_ids) = shape(sp_weights) = [d0, d1]` + + then + + `shape(output) = [d0, p1, ..., pm]`. + + For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are + + ```python + [0, 0]: id 1, weight 2.0 + [0, 1]: id 3, weight 0.5 + [1, 0]: id 0, weight 1.0 + [2, 3]: id 1, weight 3.0 + ``` + + with `combiner`="mean", then the output will be a 3x20 matrix where + + ```python + output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) + output[1, :] = (params[0, :] * 1.0) / 1.0 + output[2, :] = (params[1, :] * 3.0) / 3.0 + ``` + + Raises: + TypeError: If `sp_weights` is neither `None` nor of the same type as + `sp_ids`. + ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. + """ + rt_ids = sp_ids + rt_weights = sp_weights + if combiner is None: + combiner = "mean" + if combiner not in ("mean", "sqrtn", "sum"): + raise ValueError( + f"combiner must be one of 'mean', 'sqrtn' or 'sum', got {combiner}" + ) + if isinstance(params, variables.PartitionedVariable): + params = list(params) # Iterate to get the underlying Variables. + if not isinstance(params, list): + params = [params] + ignore_weights = rt_weights is None + if not ignore_weights: + if not isinstance(rt_weights, ragged_tensor.RaggedTensor): + raise TypeError( + f"sp_ids must be of the same type as sp_weights, " + f"received {{type(sp_ids).__name__!r}} for sp_ids and " + f"{{type(sp_weights).__name__!r}} for sp_weights." + ) + rt_ids.values.get_shape().assert_is_compatible_with( + rt_weights.values.get_shape() + ) + rt_ids.get_shape().assert_is_compatible_with(rt_weights.get_shape()) + + with ops.name_scope( + name, "embedding_lookup_sparse", params + [rt_ids] + ) as name: + segment_ids = rt_ids.value_rowids() + ids = rt_ids.flat_values + + return embedding_ops.embedding_lookup_sparse_impl( + params, + segment_ids, + sp_weights, + ids, + combiner, + ignore_weights, + max_norm, + allow_fast_lookup, + partition_strategy, + name, + ) + + +@dispatch.dispatch_for_api(embedding_ops.safe_embedding_lookup_sparse) +def safe_embedding_lookup_sparse( + embedding_weights, + sparse_ids: ragged_tensor.Ragged, + sparse_weights=None, + combiner="mean", + default_id=None, + name=None, + partition_strategy="div", + max_norm=None, + allow_fast_lookup=False, +): + """Lookup embedding results, accounting for invalid IDs and empty features. + + The partitioned embedding in `embedding_weights` must all be the same shape + except for the first dimension. The first dimension is allowed to vary as the + vocabulary size is not necessarily a multiple of `P`. `embedding_weights` + may be a `PartitionedVariable` as returned by using + `tf.compat.v1.get_variable()` with a + partitioner. + + Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs + with non-positive weight. For an entry with no features, the embedding vector + for `default_id` is returned, or the 0-vector if `default_id` is not supplied. + + The ids and weights may be multi-dimensional `SparseTensor`s or + `RaggedTensor`s with rank of 2. For `SpareTensor`s with left-aligned non-zero + entries which can be described as `RaggedTensor`s, use of `RaggedTensor`s can + yield higher performance. Embeddings are always aggregated along the last + dimension. + + Args: + embedding_weights: A single tensor representing the complete embedding + tensor, or a list tensors all of same shape except for the first + dimension, representing sharded embedding tensors. Alternatively, a + `PartitionedVariable`, created by partitioning along dimension 0. Each + element must be appropriately sized for the given `partition_strategy`. + sp_ids: `RaggedTensor` with rank 2. The rank is not verified for performance + reasons. + sparse_weights: `RaggedTensor` of same type and shape as `sparse_ids`, + containing float weights corresponding to `sparse_ids`, or `None` if all + weights are assumed to be 1.0. + combiner: A string specifying how to combine embedding results for each + entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the + default. + default_id: The id to use for an entry with no features. + name: A name for this operation (optional). + partition_strategy: A string specifying the partitioning strategy. Currently + `"div"` and `"mod"` are supported. Default is `"div"`. + max_norm: If not `None`, all embeddings are l2-normalized to max_norm before + combining. + allow_fast_lookup: An optional boolean specifying whether to allow + simplified embedding lookups when `params` is a single tensor and + `max_norm` is `None`. Setting this flag to `True` during training can + cause the use of dense gradients with increased memory footprint. + + Returns: + A dense tensor representing the combined embeddings for the + sparse ids. For each row in the dense tensor represented by `sp_ids`, the op + looks up the embeddings for all ids in that row, multiplies them by the + corresponding weight, and combines these embeddings as specified. + + In other words, if + + `shape(combined embedding_weights) = [p0, p1, ..., pm]` + + and + + `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]` + + then + + `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`. + + For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are + + ```python + [0, 0]: id 1, weight 2.0 + [0, 1]: id 3, weight 0.5 + [1, 0]: id -1, weight 1.0 + [2, 3]: id 1, weight 3.0 + ``` + + `default_id` is 0. + + with `combiner`="mean", then the output will be a 3x20 matrix where + + ```python + output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) + output[1, :] = (params[0, :] * 1.0) / 1.0 + output[2, :] = (params[1, :] * 3.0) / 3.0 + ``` + + Raises: + ValueError: if `embedding_weights` is empty. + """ + ragged_ids = sparse_ids + ragged_weights = sparse_weights + if embedding_weights is None: + raise ValueError(f"Missing embedding_weights {embedding_weights}.") + if isinstance(embedding_weights, variables.PartitionedVariable): + embedding_weights = list(embedding_weights) # get underlying Variables. + if not isinstance(embedding_weights, list): + embedding_weights = [embedding_weights] + if len(embedding_weights) < 1: + raise ValueError(f"Missing embedding_weights {embedding_weights}.") + + dtype = ragged_weights.dtype if ragged_weights is not None else None + embedding_weights = [ + w + if ( + resource_variable_ops.is_resource_variable(w) + and dtype in (None, w.dtype) + ) + else ops.convert_to_tensor(w, dtype=dtype) + for w in embedding_weights + ] + + with ops.name_scope( + name, "embedding_lookup", embedding_weights + [ragged_ids, ragged_weights] + ) as scope: + # Prune invalid ids and weights. + ragged_ids, ragged_weights = _prune_invalid_ids_ragged( + ragged_ids, ragged_weights + ) + if combiner != "sum": + ragged_ids, ragged_weights = _prune_invalid_weights_ragged( + ragged_ids, ragged_weights + ) + ragged_ids, is_row_empty = ragged_array_ops.fill_empty_rows( + ragged_ids, default_id or 0 + ) + if ragged_weights is not None: + ragged_weights, _ = ragged_array_ops.fill_empty_rows(ragged_weights, 1.0) + + result = embedding_lookup_sparse( + embedding_weights, + ragged_ids, + ragged_weights, + combiner=combiner, + partition_strategy=partition_strategy, + name=None if default_id is None else scope, + max_norm=max_norm, + allow_fast_lookup=allow_fast_lookup, + ) + + if default_id is None: + # Broadcast is_row_empty to the same shape as embedding_lookup_result, + # for use in Select. + is_row_empty = array_ops.tile( + array_ops.reshape(is_row_empty, [-1, 1]), + array_ops_stack.stack([1, array_ops.shape(result)[1]]), + ) + + result = array_ops.where( + is_row_empty, array_ops.zeros_like(result), result, name=scope + ) + + return result + + +def _prune_invalid_ids_ragged(ids, weights): + """Prune invalid IDs (< 0) from the input ids and weights.""" + is_id_valid = math_ops.greater_equal(ids.values, 0) + nrows = ids.nrows() + # TODO(philipphack): Consider calling ragged_array_ops.boolean_mask once the + # resulting performance is comparable to array_ops.boolean_mask. Currently, + # ragged_array_ops.boolean_mask constructs the returned RaggedTensor by + # calling its from_row_splits method which does not set value_row_ids and + # requires it to be computed on demand. + pruned_values = array_ops.boolean_mask_v2(ids.values, is_id_valid) + pruned_value_rowids = array_ops.boolean_mask_v2( + ids.value_rowids(), is_id_valid + ) + ids = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_values, pruned_value_rowids, nrows=nrows, validate=False + ) + if weights is not None: + pruned_weights_values = array_ops.boolean_mask_v2( + weights.values, is_id_valid + ) + weights = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_weights_values, pruned_value_rowids, nrows=nrows, validate=False + ) + + return ids, weights + + +def _prune_invalid_weights_ragged(ids, weights): + """Prune invalid weights (< 0) from the input ids and weights.""" + if weights is not None: + is_weights_valid = math_ops.greater(weights.values, 0) + nrows = ids.nrows() + # TODO(philipphack): Consider calling ragged_array_ops.boolean_mask once the + # resulting performance is comparable to array_ops.boolean_mask. Currently, + # ragged_array_ops.boolean_mask constructs the returned RaggedTensor by + # calling its from_row_splits method which does not set value_row_ids and + # requires it to be computed on demand. + pruned_values = array_ops.boolean_mask_v2(ids.values, is_weights_valid) + pruned_value_rowids = array_ops.boolean_mask_v2( + ids.value_rowids(), is_weights_valid + ) + ids = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_values, pruned_value_rowids, nrows=nrows, validate=False + ) + + pruned_weights_values = array_ops.boolean_mask_v2( + weights.values, is_weights_valid + ) + weights = ragged_tensor.RaggedTensor.from_value_rowids( + pruned_weights_values, pruned_value_rowids, nrows=nrows, validate=False + ) + + return ids, weights diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_factory_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_factory_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..55505df533d447559fe3a6facb211fc75d5d9673 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_factory_ops.py @@ -0,0 +1,385 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations for constructing RaggedTensors.""" + +from typing import Union + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_value +from tensorflow.python.util import dispatch +from tensorflow.python.util.numpy_compat import np_reshape +from tensorflow.python.util.tf_export import tf_export + + +#=============================================================================== +# Op to construct a constant RaggedTensor from a nested Python list. +#=============================================================================== +@tf_export("ragged.constant") +@dispatch.add_dispatch_support +def constant( + pylist, + dtype=None, + ragged_rank=None, + inner_shape=None, + name=None, + row_splits_dtype=dtypes.int64, +) -> Union[ragged_tensor.RaggedTensor, ops._EagerTensorBase, ops.Operation]: + """Constructs a constant RaggedTensor from a nested Python list. + + Example: + + >>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) + + + All scalar values in `pylist` must have the same nesting depth `K`, and the + returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar + values, then `K` is one greater than the maximum depth of empty lists in + `pylist`. All scalar values in `pylist` must be compatible with `dtype`. + + Args: + pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that + is not a `list`, `tuple` or `np.ndarray` must be a scalar value + compatible with `dtype`. + dtype: The type of elements for the returned `RaggedTensor`. If not + specified, then a default is chosen based on the scalar values in + `pylist`. + ragged_rank: An integer specifying the ragged rank of the returned + `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to + `max(0, K - 1)` if `inner_shape` is not specified. Defaults to + `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. + inner_shape: A tuple of integers specifying the shape for individual inner + values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank` + is not specified. If `ragged_rank` is specified, then a default is chosen + based on the contents of `pylist`. + name: A name prefix for the returned tensor (optional). + row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits. + One of `tf.int32` or `tf.int64`. + + Returns: + A potentially ragged tensor with rank `K` and the specified `ragged_rank`, + containing the values from `pylist`. + + Raises: + ValueError: If the scalar values in `pylist` have inconsistent nesting + depth; or if ragged_rank or inner_shape are incompatible with `pylist`. + """ + def ragged_factory(values, row_splits): + row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype) + return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits, + validate=False) + + with ops.name_scope(name, "RaggedConstant"): + return _constant_value(ragged_factory, constant_op.constant, pylist, dtype, + ragged_rank, inner_shape) + + +@tf_export(v1=["ragged.constant_value"]) +@dispatch.add_dispatch_support +def constant_value( + pylist, + dtype=None, + ragged_rank=None, + inner_shape=None, + row_splits_dtype="int64", +) -> Union[ragged_tensor_value.RaggedTensorValue, np.ndarray]: + """Constructs a RaggedTensorValue from a nested Python list. + + Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`. + If you wish to construct a constant `RaggedTensor`, use + [`ragged.constant(...)`](constant.md) instead. + + Example: + + >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]]) + tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]), + row_splits=array([0, 2, 3, 6])) + + All scalar values in `pylist` must have the same nesting depth `K`, and the + returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no + scalar values, then `K` is one greater than the maximum depth of empty lists + in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. + + Args: + pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that + is not a `list` or `tuple` must be a scalar value compatible with `dtype`. + dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`. + If not specified, then a default is chosen based on the scalar values in + `pylist`. + ragged_rank: An integer specifying the ragged rank of the returned + `RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to + `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K + - 1 - len(inner_shape))` if `inner_shape` is specified. + inner_shape: A tuple of integers specifying the shape for individual inner + values in the returned `RaggedTensorValue`. Defaults to `()` if + `ragged_rank` is not specified. If `ragged_rank` is specified, then a + default is chosen based on the contents of `pylist`. + row_splits_dtype: data type for the constructed `RaggedTensorValue`'s + row_splits. One of `numpy.int32` or `numpy.int64`. + + Returns: + A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified + `ragged_rank`, containing the values from `pylist`. + + Raises: + ValueError: If the scalar values in `pylist` have inconsistent nesting + depth; or if ragged_rank or inner_shape are incompatible with `pylist`. + """ + if dtype is not None and isinstance(dtype, dtypes.DType): + dtype = dtype.as_numpy_dtype + row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype + def _ragged_factory(values, row_splits): + row_splits = np.array(row_splits, dtype=row_splits_dtype) + return ragged_tensor_value.RaggedTensorValue(values, row_splits) + + def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument + if dtype is object or dtype is None: + return np_reshape(np.array(pylist, dtype=dtype), shape) + else: + return np_reshape(np.array(pylist).astype(dtype), shape) + + return _constant_value( + _ragged_factory, _inner_factory, pylist, dtype, ragged_rank, inner_shape + ) + + +def _constant_value( + ragged_factory, inner_factory, pylist, dtype, ragged_rank, inner_shape +): + """Constructs a constant RaggedTensor or RaggedTensorValue. + + Args: + ragged_factory: A factory function with the signature: + `ragged_factory(values, row_splits)` + inner_factory: A factory function with the signature: `inner_factory(pylist, + dtype, shape, name)` + pylist: A nested `list`, `tuple` or `np.ndarray`. + dtype: Data type for returned value. + ragged_rank: Ragged rank for returned value. + inner_shape: Inner value shape for returned value. + + Returns: + A value returned by `ragged_factory` or `inner_factory`. + + Raises: + ValueError: If the scalar values in `pylist` have inconsistent nesting + depth; or if ragged_rank or inner_shape are incompatible with `pylist`. + """ + if ragged_tensor.is_ragged(pylist): + raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.") + # np.ndim builds an array, so we short-circuit lists and tuples. + if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0: + # Scalar value + if ragged_rank is not None and ragged_rank != 0: + raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" % + (pylist, ragged_rank)) + if inner_shape is not None and inner_shape: + raise ValueError( + "Invalid pylist=%r: incompatible with dim(inner_shape)=%d" % + (pylist, len(inner_shape))) + return inner_factory(pylist, dtype, ()) + + if ragged_rank is not None and ragged_rank < 0: + raise ValueError( + "Invalid ragged_rank=%r: must be nonnegative" % ragged_rank) + + # Find the depth of scalar values in `pylist`. + scalar_depth, max_depth = _find_scalar_and_max_depth(pylist) + if scalar_depth is not None: + if max_depth > scalar_depth: + raise ValueError("Invalid pylist=%r: empty list nesting is greater " + "than scalar value nesting" % pylist) + if ragged_rank is not None and max_depth < ragged_rank: + raise ValueError(f"Invalid pylist={pylist}, max depth smaller than " + f"ragged_rank={ragged_rank}") + + # If both inner_shape and ragged_rank were specified, then check that + # they are compatible with pylist. + if inner_shape is not None and ragged_rank is not None: + expected_depth = ragged_rank + len(inner_shape) + 1 + if ((scalar_depth is not None and expected_depth != scalar_depth) or + (scalar_depth is None and expected_depth < max_depth)): + raise ValueError( + "Invalid pylist=%r: incompatible with ragged_rank=%d " + "and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape))) + + # Check if the result is a `Tensor`. + if (ragged_rank == 0 or + (ragged_rank is None and + ((max_depth < 2) or + (inner_shape is not None and max_depth - len(inner_shape) < 2)))): + return inner_factory(pylist, dtype, inner_shape) + + # Compute default value for inner_shape. + if inner_shape is None: + if ragged_rank is None: + inner_shape = () + else: + inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank) + + # Compute default value for ragged_rank. + if ragged_rank is None: + if scalar_depth is None: + ragged_rank = max(1, max_depth - 1) + else: + ragged_rank = max(1, scalar_depth - 1 - len(inner_shape)) + + # Build the splits for each ragged rank, and concatenate the inner values + # into a single list. + nested_splits = [] + values = pylist + for dim in range(ragged_rank): + nested_splits.append([0]) + concatenated_values = [] + for row in values: + nested_splits[dim].append(nested_splits[dim][-1] + len(row)) + concatenated_values.extend(row) + values = concatenated_values + + values = inner_factory( + values, dtype=dtype, shape=(len(values),) + inner_shape, name="values") + for row_splits in reversed(nested_splits): + values = ragged_factory(values, row_splits) + return values + + +def _find_scalar_and_max_depth(pylist): + """Finds nesting depth of scalar values in pylist. + + Args: + pylist: A nested python `list` or `tuple`. + + Returns: + A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting + depth of scalar values in `pylist`, or `None` if `pylist` contains no + scalars. `max_depth` is the maximum depth of `pylist` (including + empty lists). + + Raises: + ValueError: If pylist has inconsistent nesting depths for scalars. + """ + # Check if pylist is not scalar. np.ndim builds an array, so we + # short-circuit lists and tuples. + if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0: + scalar_depth = None + max_depth = 1 + for child in pylist: + child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child) + if child_scalar_depth is not None: + if scalar_depth is not None and scalar_depth != child_scalar_depth + 1: + raise ValueError("all scalar values must have the same nesting depth") + scalar_depth = child_scalar_depth + 1 + max_depth = max(max_depth, child_max_depth + 1) + return (scalar_depth, max_depth) + return (0, 0) + + +def _default_inner_shape_for_pylist(pylist, ragged_rank): + """Computes a default inner shape for the given python list.""" + + def get_inner_shape(item): + """Returns the inner shape for a python list `item`.""" + if not isinstance(item, (list, tuple)) and np.ndim(item) == 0: + return () + # Note that we need this check here in case `item` is not a Python list but + # fakes as being one (pylist). For a scenario of this, see test added in + # https://github.com/tensorflow/tensorflow/pull/48945 + elif len(item) > 0: # pylint: disable=g-explicit-length-test + return (len(item),) + get_inner_shape(item[0]) + return (0,) + + def check_inner_shape(item, shape): + """Checks that `item` has a consistent shape matching `shape`.""" + is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0 + if is_nested != bool(shape): + raise ValueError("inner values have inconsistent shape") + if is_nested: + if shape[0] != len(item): + raise ValueError("inner values have inconsistent shape") + for child in item: + check_inner_shape(child, shape[1:]) + + # Collapse the ragged layers to get the list of inner values. + flat_values = pylist + for dim in range(ragged_rank): + if not all( + isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values): + raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d " + "requires scalar value depth greater than %d" % + (dim + 1, ragged_rank, ragged_rank)) + flat_values = sum((list(v) for v in flat_values), []) + + # Compute the inner shape looking only at the leftmost elements; and then + # use check_inner_shape to verify that other elements have the same shape. + inner_shape = get_inner_shape(flat_values) + check_inner_shape(flat_values, inner_shape) + return inner_shape[1:] + + +@tf_export(v1=["ragged.placeholder"]) +@dispatch.add_dispatch_support +def placeholder(dtype, ragged_rank, value_shape=None, name=None): + """Creates a placeholder for a `tf.RaggedTensor` that will always be fed. + + **Important**: This ragged tensor will produce an error if evaluated. + Its value must be fed using the `feed_dict` optional argument to + `Session.run()`, `Tensor.eval()`, or `Operation.run()`. + + + Args: + dtype: The data type for the `RaggedTensor`. + ragged_rank: The ragged rank for the `RaggedTensor` + value_shape: The shape for individual flat values in the `RaggedTensor`. + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` that may be used as a handle for feeding a value, but + not evaluated directly. + + Raises: + RuntimeError: if eager execution is enabled + + @compatibility(TF2) + This API is not compatible with eager execution and `tf.function`. To migrate + to TF2, rewrite the code to be compatible with eager execution. Check the + [migration + guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls) + on replacing `Session.run` calls. In TF2, you can just pass tensors directly + into ops and layers. If you want to explicitly set up your inputs, also see + [Keras functional API](https://www.tensorflow.org/guide/keras/functional) on + how to use `tf.keras.Input` to replace `tf.compat.v1.ragged.placeholder`. + `tf.function` arguments also do the job of `tf.compat.v1.ragged.placeholder`. + For more details please read [Better + performance with tf.function](https://www.tensorflow.org/guide/function). + @end_compatibility + """ + if ragged_rank == 0: + return array_ops.placeholder(dtype, value_shape, name) + + with ops.name_scope(name, "RaggedPlaceholder", []): + flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape) + result = array_ops.placeholder(dtype, flat_shape, "flat_values") + for i in reversed(range(ragged_rank)): + row_splits = array_ops.placeholder(dtypes.int64, [None], + "row_splits_%d" % i) + result = ragged_tensor.RaggedTensor.from_row_splits(result, row_splits, + validate=False) + return result diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_functional_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_functional_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d011ce570397b32f7330ff68e96d2b0a7ef5a22d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_functional_ops.py @@ -0,0 +1,200 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Support for ragged tensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops.ragged import ragged_config +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("ragged.map_flat_values") +@dispatch.add_dispatch_support +def map_flat_values(op, *args, **kwargs): + """Applies `op` to the `flat_values` of one or more RaggedTensors. + + Replaces any `RaggedTensor` in `args` or `kwargs` with its `flat_values` + tensor (which collapses all ragged dimensions), and then calls `op`. Returns + a `RaggedTensor` that is constructed from the input `RaggedTensor`s' + `nested_row_splits` and the value returned by the `op`. + + If the input arguments contain multiple `RaggedTensor`s, then they must have + identical `nested_row_splits`. + + This operation is generally used to apply elementwise operations to each value + in a `RaggedTensor`. + + Warning: `tf.ragged.map_flat_values` does *not* apply `op` to each row of a + ragged tensor. This difference is important for non-elementwise operations, + such as `tf.reduce_sum`. If you wish to apply a non-elementwise operation to + each row of a ragged tensor, use `tf.map_fn` instead. (You may need to + specify an `output_signature` when using `tf.map_fn` with ragged tensors.) + + Examples: + + >>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]]) + >>> tf.ragged.map_flat_values(tf.ones_like, rt) + + >>> tf.ragged.map_flat_values(tf.multiply, rt, rt) + + >>> tf.ragged.map_flat_values(tf.add, rt, 5) + + + Example with a non-elementwise operation (note that `map_flat_values` and + `map_fn` return different results): + + >>> rt = tf.ragged.constant([[1.0, 3.0], [], [3.0, 6.0, 3.0]]) + >>> def normalized(x): + ... return x / tf.reduce_sum(x) + >>> tf.ragged.map_flat_values(normalized, rt) + + >>> tf.map_fn(normalized, rt) + + + Args: + op: The operation that should be applied to the RaggedTensor `flat_values`. + `op` is typically an element-wise operation (such as math_ops.add), but + any operation that preserves the size of the outermost dimension can be + used. I.e., `shape[0]` of the value returned by `op` must match + `shape[0]` of the `RaggedTensor`s' `flat_values` tensors. + *args: Arguments for `op`. + **kwargs: Keyword arguments for `op`. + + Returns: + A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all + input `RaggedTensor`s. + Raises: + ValueError: If args contains no `RaggedTensors`, or if the `nested_splits` + of the input `RaggedTensor`s are not identical. + """ + # Replace RaggedTensors with their values; and collect the partitions tensors + # from each RaggedTensor. + partition_lists = [] + flat_values_nrows = [] + inner_args = _replace_ragged_with_flat_values(args, partition_lists, + flat_values_nrows) + inner_kwargs = _replace_ragged_with_flat_values(kwargs, partition_lists, + flat_values_nrows) + if not partition_lists: + return op(*args, **kwargs) + + # If we can statically determine that the inputs are incompatible, then raise + # an error. (We can't guarantee full compatibility statically, so we need to + # perform some runtime checks too; but this allows us to fail sooner in some + # cases.) + if flat_values_nrows: + flat_values_nrows = set(flat_values_nrows) + if len(flat_values_nrows) != 1: + raise ValueError("Input RaggedTensors' flat_values must all have the " + "same outer-dimension size. Got sizes: %s" % + flat_values_nrows) + flat_values_nrows = flat_values_nrows.pop() # Get the single element + else: + flat_values_nrows = None + + partition_dtypes = set(p[0].dtype for p in partition_lists) + if len(partition_dtypes) > 1: + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError("Input RaggedTensors have mismatched row partition " + "dtypes; use RaggedTensor.with_row_splits_dtype() to " + "convert them to compatible dtypes.") + + partition_lists = [ + [p.with_dtype(dtypes.int64) + for p in partition_list] # pylint: disable=g-complex-comprehension + for partition_list in partition_lists + ] + + # Delegate to `op` + op_output = op(*inner_args, **inner_kwargs) + # Check that the result has the expected shape (if known). + if flat_values_nrows is not None: + if not op_output.shape[:1].is_compatible_with([flat_values_nrows]): + raise ValueError( + "tf.ragged.map_flat_values requires that the output of `op` have " + "the same outer-dimension size as flat_values of any ragged " + "inputs. (output shape: %s; expected outer dimension size: %s)" % + (op_output.shape, flat_values_nrows)) + # Compose the result from the transformed values and the partitions. + return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access + op_output, + _merge_partition_lists(partition_lists), + validate=False) + + +def _replace_ragged_with_flat_values(value, partition_lists, flat_values_nrows): + """Replace RaggedTensors with their flat_values, and record their partitions. + + Returns a copy of `value`, with any nested `RaggedTensor`s replaced by their + `flat_values` tensor. Looks inside lists, tuples, and dicts. + + Appends each `RaggedTensor`'s `RowPartition`s to `partition_lists`. + + Args: + value: The value that should be transformed by replacing `RaggedTensors`. + partition_lists: An output parameter used to record the row partitions + for any `RaggedTensors` that were replaced. + flat_values_nrows: An output parameter used to record the outer dimension + size for each replacement `flat_values` (when known). Contains a list of + int. + + Returns: + A copy of `value` with nested `RaggedTensors` replaced by their `values`. + """ + # Base case + if ragged_tensor.is_ragged(value): + value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value) + partition_lists.append(value._nested_row_partitions) # pylint: disable=protected-access + nrows = tensor_shape.dimension_at_index(value.flat_values.shape, 0).value + if nrows is not None: + flat_values_nrows.append(nrows) + return value.flat_values + + # Recursion cases + def recurse(v): + return _replace_ragged_with_flat_values(v, partition_lists, + flat_values_nrows) + + if isinstance(value, list): + return [recurse(v) for v in value] + elif isinstance(value, tuple): + return tuple(recurse(v) for v in value) + elif isinstance(value, dict): + return dict((k, recurse(v)) for (k, v) in value.items()) + else: + return value + + +def _merge_partition_lists(partition_lists): + """Merges the given list of lists of RowPartitions. + + Args: + partition_lists: A list of lists of RowPartition. + + Returns: + A list of RowPartitions, where `result[i]` is formed by merging + `partition_lists[j][i]` for all `j`, using + `RowPartition._merge_precomputed_encodings`. + """ + dst = list(partition_lists[0]) + for src in partition_lists[1:]: + if len(src) != len(dst): + raise ValueError("All ragged inputs must have the same ragged_rank.") + for i in range(len(dst)): + # pylint: disable=protected-access + dst[i] = dst[i]._merge_precomputed_encodings(src[i]) + return dst diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_gather_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_gather_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..95c99ef8599029851d3c461083917ba45a718134 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_gather_ops.py @@ -0,0 +1,538 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gather operations for RaggedTensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_ragged_array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +# =============================================================================== +# ragged_gather +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.gather_v2) +def gather(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + validate_indices=None, + axis=None, + batch_dims=0, + name=None): + """Gathers ragged slices from `params` axis `0` according to `indices`. + + See `tf.gather` for full documentation. (This version has the same API + as `tf.gather`, but supports ragged `params` and `indices`.) + + Examples: + + >>> params = tf.constant(['a', 'b', 'c', 'd', 'e']) + >>> indices = tf.constant([3, 1, 2, 1, 0]) + >>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) + >>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]]) + + >>> tf.gather(params, ragged_indices) + + + >>> tf.gather(ragged_params, indices) + + + >>> tf.gather(ragged_params, ragged_indices) + + + Args: + params: The potentially ragged tensor from which to gather values. Must be + at least rank 1. + indices: The potentially ragged tensor indicating which values to gather. + Must have dtype `int32` or `int64`. Values must be in the range `[0, + params.shape[0]]`. + validate_indices: Ignored. + axis: The axis in `params` to gather `indices` from. + batch_dims: The number of batch dimensions. + name: A name for the operation (optional). + + Returns: + A `RaggedTensor`, where `output.dtype=params.dtype` and + `output.shape=indices.shape + params.shape[1:]` and + `output.ragged_rank=indices.shape.ndims + params.ragged_rank`. + + Raises: + ValueError: If indices.shape.ndims is not known statically. + """ + del validate_indices + + with ops.name_scope(name, 'RaggedGather', [params, indices]): + params = ragged_tensor.convert_to_tensor_or_ragged_tensor( + params, name='params') + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) + + if batch_dims != indices.shape.rank: + batch_dims = array_ops.get_positive_axis( + batch_dims, + indices.shape.rank, + axis_name='batch_dims', + ndims_name='rank(indices)') + if params.shape.rank is not None and batch_dims >= params.shape.rank: + raise ValueError('batch_dims must be less than rank(params)') + if axis is None: + axis = batch_dims + axis = array_ops.get_positive_axis( + axis, params.shape.rank, ndims_name='rank(params)') + if axis < batch_dims: + raise ValueError('axis must be greater than or equal to batch_dims') + if indices.shape.rank is not None: + if not 0 <= batch_dims <= indices.shape.rank: + raise ValueError( + 'batch_dims=%s must be between 0 and rank(indices)=%s' % + (batch_dims, indices.shape.rank)) + + return _gather(params, indices, axis, batch_dims) + + +def _gather(params, indices, axis, batch_dims): + """Helper that implements the body for ragged gather(). + + Assumes that `params` and `indices` have been converted to tensors or + ragged tensors, and that `axis` and `batch_dims` have been normalized to + be positive. (So these conversions & normalizations can be skipped in + recursive calls to _gather). + + Args: + params: The tensor from which to gather values. + indices: The indices of values to gather. + axis: The axis in `params` to gather `indices` from. + batch_dims: The number of batch dimensions. + + Returns: + A potentially ragged tensor. + """ + params_is_ragged = ragged_tensor.is_ragged(params) + indices_is_ragged = ragged_tensor.is_ragged(indices) + + if not (params_is_ragged or indices_is_ragged): + return array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims) + + if batch_dims > 0: + return _batch_gather(params, indices, axis, batch_dims) + + if axis > 0: + return _axis_gather(params, indices, axis) + + if indices_is_ragged: + return indices.with_values(_gather(params, indices.values, 0, 0)) + + if indices.shape.ndims is None: + raise ValueError('rank(indices) must be known statically') + + out_ragged_rank = indices.shape.ndims + len(params.nested_row_splits) - 1 + result = gen_ragged_array_ops.ragged_gather( + indices=indices, + params_dense_values=params.flat_values, + params_nested_splits=params.nested_row_splits, + OUTPUT_RAGGED_RANK=out_ragged_rank) + + result = ragged_tensor.RaggedTensor.from_nested_row_splits( + result.output_dense_values, result.output_nested_splits, validate=False) + + # Inject uniform_row_lengths into the result RaggedTensors for dimensions + # corresponding to dense outer dimensions of `indices`. + # TODO(edloper): Change this to construct the result using RowPartition + # objects instead, so we don't need to modify private variables. + if indices.shape.ndims > 1: + target = result + indices_shape = array_ops.shape(indices, out_type=params.row_splits.dtype) + shape_cumprod = math_ops.cumprod(indices_shape) + for dim in range(indices.shape.ndims - 1): + # pylint: disable=protected-access + target._cached_nrows = shape_cumprod[dim] + target._uniform_row_length = indices_shape[dim + 1] + target = target.values + + return result + + +def _batch_gather(params, indices, axis, batch_dims): + """Helper that implements the body for ragged gather() when batch_dims>0. + + Args: + params: The tensor from which to gather values. + indices: The indices of values to gather. + axis: The axis in `params` to gather `indices` from. + batch_dims: The number of batch dimensions. + + Returns: + A potentially ragged tensor. + """ + # Perform static checks that `params` and `indices` have compatible batch + # dimensions. Note: we do not perform *runtime* checks that `params` and + # `indices` actually have the same row-splits (because we wish to avoid the + # runtime cost of those checks). If `params` and `indices` are + # incompatible, the resulting `RaggedTensor` may be nonsensical. + if not params.shape[:batch_dims].is_compatible_with( + indices.shape[:batch_dims]): + raise ValueError('batch shape from indices %s does not match params ' + 'shape %s' % (indices.shape[:batch_dims], params.shape)) + + if batch_dims > 1: + # Convert params & indices to ragged tensors. + if not isinstance(params, ragged_tensor.RaggedTensor): + if indices.uniform_row_length is None: + raise ValueError( + 'batch shape from indices does not match params shape: ragged ' + 'indices dimension corresponds to uniform params dimension') + params = ragged_tensor.RaggedTensor.from_tensor( + params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) + if not isinstance(indices, ragged_tensor.RaggedTensor): + if params.uniform_row_length is None: + raise ValueError( + 'batch shape from indices does not match params shape: ragged ' + 'params dimension corresponds to uniform indices dimension') + indices = ragged_tensor.RaggedTensor.from_tensor( + indices, ragged_rank=1, row_splits_dtype=params.row_splits.dtype) + # Flatten the two outer batch dimensions into a single batch dimension, + # and recurse. + return params.with_values( + _gather(params.values, indices.values, axis - 1, batch_dims - 1)) + + if axis > 1: + # Convert an axis dimension into a batch dimension, by adding a dimension + # to `indices`, and tiling it to match `params`. E.g., if `params` + # had shape `[B, P1, P2]`, and `indices` had shape `[B, I1, I2]`, then we + # tile `indices` to have shape `[B, P1, I1, I2]`. That way, we can treat + # the `P1` dimension as a batch dimension. + if not isinstance(indices, ragged_tensor.RaggedTensor): + adjusted_indices = params.with_values( + array_ops.repeat(indices, params.row_lengths(), 0)) + else: + if not isinstance(params, ragged_tensor.RaggedTensor): + params = ragged_tensor.RaggedTensor.from_tensor( + params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) + adjusted_indices = _gather( + indices, + params.with_values( + array_ops.repeat( + math_ops.range(params.nrows()), params.row_lengths())), 0, 0) + return _batch_gather(params, adjusted_indices, axis, batch_dims + 1) + + if indices.shape.rank is None: + raise ValueError('rank(indices) must be known statically') + + assert batch_dims == 1 + # If params.shape=[B, P1...PN] and indices.shape=[B, I1...IM], then: + # + # output[b, i1...im, p2...pn] = + # params[b, indices[b, i1...im], p2...pn] + # + # We construct `output` by flattening `params`, adjusting the `indices` to + # point into that flattened list, and recursively calling `gather`. + flat_params = _flatten_dims_0_and_1(params) + adjustments = _row_starts(params, indices.dtype) # offset for each batch + # increase adjustments's rank so it broadcasts w/ the outer dim of indices + adjustments = _increase_rank_to(adjustments, indices.shape.ndims) + adjusted_indices = indices + adjustments + return _gather(flat_params, adjusted_indices, axis - 1, 0) + + +def _axis_gather(params, indices, axis): + """Helper that implements ragged gather when axis>0 and batch_dims==0. + + Args: + params: The tensor from which to gather values. + indices: The indices of values to gather. + axis: The axis in `params` to gather `indices` from. + + Returns: + A potentially ragged tensor. + """ + if axis > 1: + if not isinstance(params, ragged_tensor.RaggedTensor): + params = ragged_tensor.RaggedTensor.from_tensor( + params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) + # Recurse, using the flattened params (but do not flatten indices). + return params.with_values(_gather(params.values, indices, axis - 1, 0)) + + if indices.shape.rank is None: + raise ValueError('rank(indices) must be known statically') + + # Note: there is no checking of indices. If there is some index + # out of bounds, the results may be nonsensical. + + assert axis == 1 + # If params.shape=[P1...PN] and indices.shape=[I1...IM], then: + # + # output[p1, i1...im, p3...pn] = + # params[p1, indices[i1...im], p3...pn] + # + # We construct `output` by flattening `params`, adjusting the `indices` to + # have one additional dimension, and to point into that flattened list, and + # recursively calling `gather`. + flat_params = _flatten_dims_0_and_1(params) + adjustments = _row_starts(params, indices.dtype) # offset for each batch + adjustments = _increase_rank_to(adjustments, indices.shape.ndims + 1) + adjusted_indices = indices + adjustments + return _gather(flat_params, adjusted_indices, axis - 1, 0) + + +def _flatten_dims_0_and_1(t): + """Returns a copy of `t` with the outer two dimensions merged.""" + if isinstance(t, ragged_tensor.RaggedTensor): + return t.values + else: + t_shape = array_ops.shape(t) + return array_ops.reshape(t, array_ops.concat([[-1], t_shape[2:]], axis=0)) + + +def _row_starts(t, dtype): + """Returns the start indices for the rows in `t`.""" + if isinstance(t, ragged_tensor.RaggedTensor): + return math_ops.cast(t.row_starts(), dtype) + else: + t_shape = array_ops.shape(t, out_type=dtype) + return math_ops.range(t_shape[0]) * t_shape[1] + + +def _increase_rank_to(t, rank): + """Adds *trailing* size-1 dimensions to `t` until it has the given rank.""" + if isinstance(t, ragged_tensor.RaggedTensor): + return t.with_values(_increase_rank_to(t, rank - 1)) + else: + old_dims = array_ops.shape(t) + new_dims = array_ops.ones([rank - array_ops.rank(t)], old_dims.dtype) + new_shape = array_ops.concat([old_dims, new_dims], axis=0) + return array_ops.reshape(t, new_shape) + + +@dispatch.dispatch_for_api(array_ops.gather) +def _ragged_gather_v1(params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + validate_indices=None, + name=None, + axis=0, + batch_dims=0): + return gather(params, indices, validate_indices, axis, batch_dims, name) + + +# =============================================================================== +# ragged.gather_nd +# =============================================================================== +@dispatch.dispatch_for_api(array_ops.gather_nd_v2) +def gather_nd( + params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + batch_dims=0, + name=None, + bad_indices_policy='', +): + """Gather slices from `params` using `n`-dimensional indices. + + This operation is similar to `gather`, but it uses the innermost dimension + of `indices` to define a slice into `params`. In particular, if: + + * `indices` has shape `[A1...AN, I]` + * `params` has shape `[B1...BM]` + + Then: + + * `result` has shape `[A1...AN, B_{I+1}...BM]`. + * `result[a1...aN] = params[indices[a1...aN, :]]` + + Args: + params: A potentially ragged tensor with shape `[A1...AN, I]`. + indices: A potentially ragged tensor with shape `[B1...BM]`. + batch_dims: Must be zero. + name: A name for the operation (optional). + bad_indices_policy: A string. If `""` or `"DEFAULT"`, the default behavior + is used (error on CPU and ignore on GPU). If `"IGNORE"`, the bad indices + are ignored and 0 is stored in the + + Returns: + A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`. + + #### Examples: + + >>> params = tf.ragged.constant( + ... [ [ ['000', '001'], ['010' ] ], + ... [ ['100' ], ['110', '111', '112'], ['120'] ], + ... [ [ ], ['210' ] ] ]) + + >>> # Gather 2D slices from a 3D tensor + >>> tf.gather_nd(params, [[2], [0]]) + + + >>> # Gather 1D slices from a 3D tensor + >>> tf.gather_nd(params, [[2, 1], [0, 0]]) + + + >>> # Gather scalars from a 3D tensor + >>> tf.gather_nd(params, [[0, 0, 1], [1, 1, 2]]).numpy() + array([b'001', b'112'], dtype=object) + """ + if not isinstance(batch_dims, int) or batch_dims != 0: + raise ValueError('batch_dims != 0 is not supported for ragged gather yet.') + if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)): + return array_ops.gather_nd( + params, indices, name=name, bad_indices_policy=bad_indices_policy + ) + + if bad_indices_policy not in ('', 'DEFAULT'): + raise ValueError( + 'non-default bad_indices_policy not supported for ragged gather' + ) + + with ops.name_scope(name, 'RaggedGatherNd', [params, indices]): + + params = ragged_tensor.convert_to_tensor_or_ragged_tensor( + params, name='params') + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) + indices_shape = indices.shape + indices_ndims = indices_shape.ndims + if indices_ndims is None: + raise ValueError('indices.rank be statically known.') + if indices_ndims == 0: + raise ValueError('indices.rank must be at least 1.') + if (ragged_tensor.is_ragged(indices) and + indices_ndims == indices.ragged_rank + 1): + raise ValueError('The innermost dimension of indices may not be ragged') + + # `index_size` is the "n" in "gather_nd" -- i.e., the number of dimensions + # that each index slices into. + index_size = tensor_shape.dimension_value(indices_shape[-1]) + if index_size is None: + raise ValueError('indices.shape[-1] must be statically known.') + + # If `indices` has more than 2 dimensions, then recurse. If `indices` is + # dense, then we convert it to ragged before recursing, and then convert + # the result back to `dense` if appropriate. + if indices_ndims > 2: + indices_is_dense = not ragged_tensor.is_ragged(indices) + if indices_is_dense: + indices = ragged_tensor.RaggedTensor.from_tensor( + indices, ragged_rank=indices_ndims - 2, + row_splits_dtype=params.row_splits.dtype) + result = indices.with_flat_values(gather_nd(params, indices.flat_values)) + if (indices_is_dense and ragged_tensor.is_ragged(result) and + result.ragged_rank == indices_ndims - 2): + result = ragged_tensor.RaggedTensor.to_tensor(result) + return result + + # indices_ndims <= 2, and the innermost dimension of indices may not be + # ragged, so `indices` must not be ragged. + assert not ragged_tensor.is_ragged(indices) + assert ragged_tensor.is_ragged(params) + + # Handle corner case: An empty index tuple selects the entire `params` + # value. So if `index_size` is zero, then tile `params`. + if index_size == 0: + params_ndims = params.ragged_rank + array_ops.rank(params.flat_values) + for dim in range(indices_ndims - 1): + params = ragged_array_ops.expand_dims(params, axis=0) + multiples = array_ops.concat([ + array_ops.shape(indices)[:-1], + array_ops.ones([params_ndims], dtypes.int32) + ], + axis=0) + return ragged_array_ops.tile(params, multiples) + + # When index_size=1, we can just flatten the index tuples and use gather. + elif index_size == 1: + flattened_index_tuples = array_ops.reshape(indices, [-1]) + return gather(params, flattened_index_tuples) + + # Otherwise, params is a RaggedTensor, and indices is a 1D or 2D Tensor. + # Flatten both the index tuples and the params, such that the flattened + # index tuples point to the correct values in the flattened params; and + # then use ragged.gather on the flattened index tuples & params. + else: + indices = math_ops.cast(indices, params.row_splits.dtype) + + # Flatten the outermost 2 dimensions of the index tuples & params. + flattened_index_tuples = array_ops.gather(params.row_splits, + indices[..., 0]) + flattened_index_tuples += indices[..., 1] + flattened_params = params.values + + # Flatten any remaining dimensions. + for dim in range(2, index_size): + if not ragged_tensor.is_ragged(flattened_params): + flattened_index_tuples = array_ops.expand_dims( + flattened_index_tuples, axis=1) + flattened_index_tuples = array_ops.concat( + [flattened_index_tuples, indices[..., dim:]], axis=1) + return array_ops.gather_nd(flattened_params, flattened_index_tuples) + + flattened_index_tuples = array_ops.gather( + flattened_params.row_starts(), flattened_index_tuples) + flattened_index_tuples += indices[..., dim] + flattened_params = flattened_params.values + + # Gather using the flattened index tuples and params. + return gather(flattened_params, flattened_index_tuples) + + +@dispatch.dispatch_for_api(array_ops.gather_nd) +def _ragged_gather_nd_v1( + params: ragged_tensor.RaggedOrDense, + indices: ragged_tensor.RaggedOrDense, + name=None, + batch_dims=0, + bad_indices_policy='', +): + return gather_nd( + params, indices, batch_dims, name, bad_indices_policy=bad_indices_policy + ) + + +# =============================================================================== +# Gradient for the RaggedGather kernel +# =============================================================================== +@ops.RegisterGradient('RaggedGather') +def _ragged_gather_grad(op, *grads): + """Gradient for RaggedGather op.""" + param_nested_splits = op.inputs[:-2] + param_inner_values = op.inputs[-2] + indices = op.inputs[-1] + grad_inner_values = grads[-1] + + # For each row in `params`, find the range of values in `params.inner_values` + # that is covered by that row. In particular, the values in row `i` are + # `param_inner_values[combined_splits[i]:combined_splits[i+1]`. + combined_splits = param_nested_splits[0] + for row_splits in param_nested_splits[1:]: + combined_splits = array_ops.gather(row_splits, combined_splits) + + # The outer dimensions of `indices` correspond 1:1 with the outer dimensions + # of `ragged_grad` that are encoded by `grad_nested_splits`. Thus, the + # flattened `indices` correspond 1:1 with `grad_inner_values`. + flat_indices = array_ops.reshape(indices, [-1]) + + # Build an IndexedSlices where the values are taken from `flat_grad`. + grad_indices = ragged_math_ops.range( + array_ops.gather(combined_splits, flat_indices), + array_ops.gather(combined_splits[1:], flat_indices)).values + + param_inner_values_grad = indexed_slices.IndexedSlices( + values=grad_inner_values, indices=grad_indices, + dense_shape=array_ops.shape(param_inner_values)) + return [None for _ in param_nested_splits] + [param_inner_values_grad, None] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_image_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_image_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f139b97f55eb52955cd9807094c3dc85b7cb51e0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_image_ops.py @@ -0,0 +1,98 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Image operations for RaggedTensors.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_spec +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import image_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(image_ops.resize_images_v2) +def resize_images_v2(images: ragged_tensor.RaggedTensor, + size, + method=image_ops.ResizeMethod.BILINEAR, + preserve_aspect_ratio=False, + antialias=False, + name=None): + """RaggedTensor dispatcher for tf.image.resize (tf-v2).""" + with ops.name_scope(name, "RaggedResizeImages", [images, size]): + return _resize_images( + image_ops.resize_images_v2, + images, + size, + method=method, + preserve_aspect_ratio=preserve_aspect_ratio, + antialias=antialias) + + +@dispatch.dispatch_for_api(image_ops.resize_images) +def resize_images_v1(images: ragged_tensor.RaggedTensor, + size, + method=image_ops.ResizeMethodV1.BILINEAR, + align_corners=False, + preserve_aspect_ratio=False, + name=None): + """RaggedTensor dispatcher for tf.image.resize (tf-v1).""" + with ops.name_scope(name, "RaggedResizeImages", [images, size]): + return _resize_images( + image_ops.resize_images, + images, + size, + method=method, + preserve_aspect_ratio=preserve_aspect_ratio, + align_corners=align_corners) + + +def _resize_images(resize_op, images, size, **kwargs): + """RaggedTensor dispatcher for tf.image.resize.""" + if images.shape.rank != 4: + raise ValueError( + "tf.image.resize: images.shape.rank must be 4 if images is ragged.") + + # Determine the output shape (excluding the batch dimension). + static_batch_size = tensor_shape.dimension_value(images.shape[0]) + size = ops.convert_to_tensor(size, dtypes.int32, "size") + size_as_shape = tensor_util.constant_value_as_shape(size).with_rank(2) + out_shape = size_as_shape + images.shape[-1:] + out_spec = tensor_spec.TensorSpec(out_shape, dtypes.float32) + + def resize_one(image): + if isinstance(image, ragged_tensor.RaggedTensor): + image = image.to_tensor() + return resize_op(image, size, **kwargs) + + def resize_with_map(): + return map_fn.map_fn_v2(resize_one, images, fn_output_signature=out_spec) + + def empty_result(): + channels = array_ops.shape(images.flat_values)[-1:] + return array_ops.zeros(array_ops.concat([[0], size, channels], axis=0)) + + if static_batch_size == 0: + return empty_result() + elif static_batch_size is not None: + return resize_with_map() + else: + empty_batch = math_ops.equal(images.nrows(), 0) + return cond.cond(empty_batch, empty_result, resize_with_map) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_math_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fac49983845728e307f94a8a53be8d367e6212ab --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_math_ops.py @@ -0,0 +1,1261 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Support for ragged tensors.""" + +import functools +import typing + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_ragged_math_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import segment_id_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +# =============================================================================== +# ragged.range +# =============================================================================== +# pylint: disable=redefined-builtin +@tf_export('ragged.range') +@dispatch.add_dispatch_support +def range(starts, + limits=None, + deltas=1, + dtype=None, + name=None, + row_splits_dtype=dtypes.int64): + """Returns a `RaggedTensor` containing the specified sequences of numbers. + + Each row of the returned `RaggedTensor` contains a single sequence: + + ```python + ragged.range(starts, limits, deltas)[i] == + tf.range(starts[i], limits[i], deltas[i]) + ``` + + If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an + empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then + `output[i]` will be an empty list. This behavior is consistent with the + Python `range` function, but differs from the `tf.range` op, which returns + an error for these cases. + + Examples: + + >>> tf.ragged.range([3, 5, 2]).to_list() + [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]] + >>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list() + [[0, 1, 2], [], [8, 9, 10, 11]] + >>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list() + [[0, 2], [], [8, 10]] + + The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. + The vector inputs must all have the same size. Scalar inputs are broadcast + to match the size of the vector inputs. + + Args: + starts: Vector or scalar `Tensor`. Specifies the first entry for each range + if `limits` is not `None`; otherwise, specifies the range limits, and the + first entries default to `0`. + limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for + each range. + deltas: Vector or scalar `Tensor`. Specifies the increment for each range. + Defaults to `1`. + dtype: The type of the elements of the resulting tensor. If not specified, + then a value is chosen based on the other args. + name: A name for the operation. + row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` + tensor. One of `tf.int32` or `tf.int64`. + + Returns: + A `RaggedTensor` of type `dtype` with `ragged_rank=1`. + """ + row_splits_dtype = dtypes.as_dtype(row_splits_dtype) + if limits is None: + starts, limits = 0, starts + + with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name: + starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts') + limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits') + deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas') + + # infer dtype if not explicitly provided + if dtype is None: + starts, limits, deltas = _infer_matching_dtype( + [starts, limits, deltas], + [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]) + + result = gen_ragged_math_ops.ragged_range( + starts, limits, deltas, Tsplits=row_splits_dtype, name=name) + return ragged_tensor.RaggedTensor.from_row_splits( + result.rt_dense_values, result.rt_nested_splits, validate=False) + + +def _infer_matching_dtype(tensors, dtype_hierarchy): + """Infers a matching dtype for tensors, and casts them to that dtype.""" + assert all(t.dtype in dtype_hierarchy for t in tensors) + inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index) + return [math_ops.cast(t, inferred_dtype) for t in tensors] + + +ops.no_gradient('RaggedRange') + +# =============================================================================== +# ragged_segment_ +# =============================================================================== + +# Docstring template used for the raggged_segment_ ops. +_RAGGED_SEGMENT_DOCSTRING = """\ +Computes the %(combination)s along segments of a RaggedTensor. + + Returns a RaggedTensor `output` with `num_segments` rows, where the row + `output[i]` is formed by taking the %(combination)s of all rows of `data` + whose corresponding `segment_id` is `i`. + + The length of the row `output[i]` will be the maximum of the lengths of + all rows of `data` whose corresponding `segment_id` is `i`. If no `data` + rows correspond to a given segment ID, then the output row for that segment + ID will be empty. + + Args: + data: A `RaggedTensor` containing the values to combine. + segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or + `int32`. `segment_ids.shape` must be a prefix of `data.shape`. + Must be greater than or equal to zero, and less than `num_segments`. + `segment_ids` is not required to be sorted. + num_segments: An `int32` or `int64` scalar specifying the number of + distinct segment ids. + name: A name prefix for the returned tensor (optional). + Returns: + A `RaggedTensor` containing the %(combined)s values. The returned tensor + has the same dtype as `data`, and its shape is + `[num_segments] + data.shape[segment_ids.rank:]`. + Raises: + ValueError: If `segment_ids.shape` is not a prefix of `data.shape`. +""" + + +def _ragged_segment_aggregate(unsorted_segment_op, + data, + segment_ids, + num_segments, + separator=None, + name=None): + """Aggregates along segments of a RaggedTensor using `unsorted_segment_op`. + + Returns a RaggedTensor `output` with `num_segments` rows, where the row + `output[i]` is formed by combining all rows of `data` whose corresponding + `segment_id` is `i`. The values in each row are combined using + `unsorted_segment_op`. + + The length of the row `output[i]` will be the maximum of the lengths of + all rows of `data` whose corresponding `segment_id` is `i`. If no `data` + rows correspond to a given segment ID, then the output row for that segment + ID will be empty. + + Args: + unsorted_segment_op: The tensorflow `op` that should be used to combine + values in each row. Must have the same signature and basic behavior as + `unsorted_segment_sum`, `unsorted_segment_max`, etc. + data: A `RaggedTensor` containing the values to be combined. + segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or + `int32`. `segment_ids.shape` must be a prefix of `data.shape`. + `segment_ids` is not required to be sorted. + num_segments: An `int32` or `int64` scalar. + separator: An optional string. Defaults to None. The separator to use when + joining. Only used for string types. + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` containing the aggregated values. The returned tensor + has the same dtype as `data`, and its shape is + `[num_segments] + data.shape[segment_ids.rank:]`. + Raises: + ValueError: If segment_ids.shape is not a prefix of data.shape. + """ + if not (ragged_tensor.is_ragged(data) or + ragged_tensor.is_ragged(segment_ids)): + if separator is not None: + # It uses unsorted_segment_join. + return unsorted_segment_op(data, segment_ids, num_segments, separator, + name) + else: + return unsorted_segment_op(data, segment_ids, num_segments, name) + + with ops.name_scope(name, 'RaggedSegment', + [data, segment_ids, num_segments]) as name: + data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') + segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor( + segment_ids, name='segment_ids') + data, segment_ids = ragged_tensor.match_row_splits_dtypes(data, segment_ids) + if segment_ids.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError('segment_ids must have dtype int32 or int64.') + + if ragged_tensor.is_ragged(segment_ids): + if not ragged_tensor.is_ragged(data): + raise ValueError('segment_ids.shape must be a prefix of data.shape, ' + 'but segment_ids is ragged and data is not.') + check_splits = check_ops.assert_equal( + segment_ids.row_splits, + data.row_splits, + message='segment_ids.shape must be a prefix of data.shape') + with ops.control_dependencies([check_splits]): + return _ragged_segment_aggregate(unsorted_segment_op, data.values, + segment_ids.values, num_segments, + separator) + + # Find the length of each row in data. (shape=[data_nrows]) + data_row_lengths = data.row_splits[1:] - data.row_splits[:-1] + + # Find the length that each output row will have. The length of the row + # corresponding to segment `id` is `max(data_row_lengths[i])` where + # `segment_ids[i]=id`. (shape=[output_nrows]) + output_row_lengths = math_ops.maximum( + math_ops.unsorted_segment_max(data_row_lengths, segment_ids, + num_segments), 0) + + # Build the splits tensor for the output RaggedTensor. + output_splits = array_ops.concat([ + array_ops.zeros([1], output_row_lengths.dtype), + math_ops.cumsum(output_row_lengths) + ], + axis=0) + + # For each row in `data`, find the start & limit position where that row's + # values will be aggregated in output.values. + data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids) + data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths + + # For each value in `data.values`, find the position where it will + # aggregated in `output.values`. + # Get the target output values index for each data values index. + data_val_to_out_val_index = range(data_row_to_out_row_start, + data_row_to_out_row_limit).values + + # Recursively aggregate the values. + output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values, + data_val_to_out_val_index, + output_splits[-1], separator) + return ragged_tensor.RaggedTensor.from_row_splits( + output_values, output_splits, validate=False) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_sum) +def segment_sum(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_sum, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentSum')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_prod) +def segment_prod(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_prod, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentProd')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_min) +def segment_min(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_min, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentMin')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_max) +def segment_max(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + # For docs, see: _RAGGED_SEGMENT_DOCSTRING + return _ragged_segment_aggregate( + math_ops.unsorted_segment_max, + data=data, + segment_ids=segment_ids, + num_segments=num_segments, + name=(name or 'RaggedSegmentMax')) + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_mean) +def segment_mean(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + """For docs, see: _RAGGED_SEGMENT_DOCSTRING.""" + with ops.name_scope(name, 'RaggedSegmentMean', + [data, segment_ids, num_segments]): + total = segment_sum(data, segment_ids, num_segments) + ones = ragged_tensor.RaggedTensor.from_nested_row_splits( + array_ops.ones_like(data.flat_values), + data.nested_row_splits, + validate=False) + count = segment_sum(ones, segment_ids, num_segments) + if ragged_tensor.is_ragged(total): + return total.with_flat_values(total.flat_values / count.flat_values) + else: + return total / count + + +@dispatch.dispatch_for_api(math_ops.unsorted_segment_sqrt_n) +def segment_sqrt_n(data: ragged_tensor.RaggedOrDense, + segment_ids: ragged_tensor.RaggedOrDense, + num_segments, + name=None): + """For docs, see: _RAGGED_SEGMENT_DOCSTRING.""" + with ops.name_scope(name, 'RaggedSegmentSqrtN', + [data, segment_ids, num_segments]): + total = segment_sum(data, segment_ids, num_segments) + ones = ragged_tensor.RaggedTensor.from_nested_row_splits( + array_ops.ones_like(data.flat_values), + data.nested_row_splits, + validate=False) + count = segment_sum(ones, segment_ids, num_segments) + if ragged_tensor.is_ragged(total): + return total.with_flat_values(total.flat_values / + math_ops.sqrt(count.flat_values)) + else: + return total / math_ops.sqrt(count) + + +def _set_ragged_segment_docstring(func, combination, combined): + func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict( + combination=combination, combined=combined) + + +_set_ragged_segment_docstring(segment_sum, 'sum', 'summed') +_set_ragged_segment_docstring(segment_prod, 'product', 'multiplied') +_set_ragged_segment_docstring(segment_min, 'minimum', 'minimized') +_set_ragged_segment_docstring(segment_max, 'maximum', 'maximized') +_set_ragged_segment_docstring(segment_mean, 'mean', 'averaged') +_set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)', + 'summed') + +# =============================================================================== +# ragged_reduce_ +# =============================================================================== + +# Docstring template used for ragged_reduce_ ops. +_RAGGED_REDUCE_DOCSTRING = """\ +Computes the %(combination)s of elements across dimensions of a `RaggedTensor`. + + Reduces `input_tensor` along the dimensions given in `axis` by taking the + %(combination)s of values. If a reduced dimension has no elements for + some index, then the value for that index will be %(default)s. + + The rank of the tensor is reduced by `1` for each entry in `axis`. If + `axis` is not specified, then all dimensions are reduced, and a scalar + value is returned. + Args: + input_tensor: A `RaggedTensor` containing the values to be %(combined)s. + axis: The dimensions to reduce. May be `None` (to reduce all axes), an + `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce + a given set of axes), or a `Tensor` with a constant value. Must be in + the range `[0, input_tensor.rank]`. + name: A name prefix for the returned tensor (optional). + Returns: + A `RaggedTensor` containing the %(combined)s values. The returned tensor + has the same dtype as `data`, and its shape is given by removing the + dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank` + of the returned tensor is given by substracting any ragged dimensions + specified in `axis` from `input_tensor.ragged_rank`. + Raises: + ValueError: If `axis` contains a `Tensor` whose value is not constant. + ####Example: + %(example)s +""" +_RAGGED_REDUCE_SUM_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_sum(rt, axis=0).numpy() # = [3+1+9+2, 1+5+6, 4] + array([15, 12, 4], dtype=int32) + >>> tf.reduce_sum(rt, axis=1).numpy() # = [3+1+4, 1+5, 9, 2+6] + array([8, 6, 9, 8], dtype=int32) +""" +_RAGGED_REDUCE_PROD_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_prod(rt, axis=0).numpy() # = [3*1*9*2, 1*5*6, 4] + array([54, 30, 4], dtype=int32) + >>> tf.reduce_prod(rt, axis=1).numpy() # = [3*1*4, 1*5, 9, 2*6] + array([12, 5, 9, 12], dtype=int32) +""" +_RAGGED_REDUCE_MIN_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_min(rt, axis=0).numpy() + array([1, 1, 4], dtype=int32) + >>> tf.reduce_min(rt, axis=1).numpy() + array([1, 1, 9, 2], dtype=int32) +""" +_RAGGED_REDUCE_MAX_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_max(rt, axis=0).numpy() + array([9, 6, 4], dtype=int32) + >>> tf.reduce_max(rt, axis=1).numpy() + array([4, 5, 9, 6], dtype=int32) +""" +_RAGGED_REDUCE_MEAN_EXAMPLE = """ + >>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) + >>> tf.reduce_mean(rt, axis=0).numpy() + array([3.75, 4. , 4. ]) + >>> tf.reduce_mean(rt, axis=1).numpy() + array([2.66666667, 3. , 9. , 4. ]) +""" +_RAGGED_REDUCE_VARIANCE_EXAMPLE = """ + >>> rt = tf.ragged.constant([[1, 1, 4], [2, 1], [3], [4, 1]], + ... dtype=tf.float64) + >>> tf.math.reduce_variance(rt, axis=0).numpy() + array([1.25, 0., 0.]) + >>> tf.math.reduce_variance(rt, axis=1).numpy() + array([2., 0.25, 0., 2.25]) +""" +_RAGGED_REDUCE_STD_EXAMPLE = """ + >>> rt = tf.ragged.constant([[1, 0], [2, 1], [3], [4, 1]], + ... dtype=tf.float64) + >>> tf.math.reduce_std(rt, axis=0).numpy() + array([1.11803399, 0.47140452]) + >>> tf.math.reduce_std(rt, axis=1).numpy() + array([0.5, 0.5, 0., 1.5]) +""" +_RAGGED_REDUCE_ALL_EXAMPLE = """ + >>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]]) + >>> tf.reduce_all(rt, axis=0).numpy() + array([False, True, False, True]) + >>> tf.reduce_all(rt, axis=1).numpy() + array([ True, False, False]) +""" +_RAGGED_REDUCE_ANY_EXAMPLE = """ + >>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]]) + >>> tf.reduce_any(rt, axis=0).numpy() + array([ True, True, False, True]) + >>> tf.reduce_any(rt, axis=1).numpy() + array([ True, True, True]) +""" + + +def ragged_reduce_aggregate(reduce_op, + unsorted_segment_op, + rt_input, + axis, + keepdims, + separator=None, + name=None): + """Aggregates across axes of a RaggedTensor using the given `Tensor` ops. + + Reduces `rt_input` along the dimensions given in `axis`. The rank of the + tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified, + then all dimensions are reduced, and a scalar value is returned. + + This op assumes that `reduce_op` and `unsorted_segment_op` are associative; + if not, then reducing multiple axes will return incorrect results. (In + particular, reducing multiple axes is currently implemented by reducing the + axes one at a time.) + + Args: + reduce_op: The tensorflow `op` that should be used to reduce values in + uniform dimensions. Must have the same signature and basic behavior as + `reduce_sum`, `reduce_max`, etc. + unsorted_segment_op: The tensorflow `op` that should be used to combine + values in ragged dimensions. Must have the same signature and basic + behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc. + rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced. + axis: The axis or axes to reduce. May be `None` (to reduce all axes), an + `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a + given set of axes), or a `Tensor` with a constant value. Must be in the + range `[0, rt_input.rank)`. + keepdims: If true, retains reduced dimensions with length 1. + separator: An optional string. Defaults to None. The separator to use when + joining. The separator must not be set for non-string data types. (i.e. if + separator is not None then it uses string ops) + name: A name prefix for the returned tensor (optional). + + Returns: + A `RaggedTensor` containing the reduced values. The returned tensor + has the same dtype as `data`, and its shape is given by removing the + dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank` + of the returned tensor is given by substracting any ragged dimensions + specified in `axis` from `rt_input.ragged_rank`. + Raises: + ValueError: If `axis` contains a `Tensor` whose value is not constant. + """ + # When separator is not None, We infer that dtype is string and + # reduce_join will be called. + if separator is None: + maybe_separator = {} + else: + maybe_separator = {'separator': separator} + + if not ragged_tensor.is_ragged(rt_input): + return reduce_op( + rt_input, axis, keepdims=keepdims, name=name, **maybe_separator) + + if isinstance(axis, tensor.Tensor): + axis = tensor_util.constant_value(axis) + if axis is None: + raise ValueError('axis must be known at graph construction time.') + if isinstance(axis, np.ndarray): + axis = axis.tolist() + + # When reducing all axes, just ignore splits & reduce the inner values. + if axis is None: + result = reduce_op(rt_input.flat_values, None, keepdims=keepdims, + name=name, **maybe_separator) + if keepdims: + # Expand the result to the input number of dimensions. + for _ in rt_input.shape[1:]: + result = array_ops.expand_dims(result, axis=0) + return result + + with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]): + if isinstance(axis, (tuple, list)): + if not axis: + return rt_input + elif len(axis) == 1: + axis = axis[0] + else: + # When reducing multiple axes, as we reduce one at a time (see below), + # the negative axis has to be converted to positive at the first run + # as the sort with negative axis will have different orders. + # See GitHub issue 27497. + axis = [ + array_ops.get_positive_axis(a, rt_input.shape.ndims, 'axis[%s]' % i, + 'rank(input_tensor)') + for i, a in enumerate(axis) + ] + # When reducing multiple axes, just reduce one at a time. This is less + # efficient, and only works for associative ops. (In particular, it + # does not work for reduce_mean.) However, reducing multiple axes at + # once will probably require a nontrivial c++ op. + axis = sorted(axis) + inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op, + rt_input, axis[-1], keepdims, + separator) + return ragged_reduce_aggregate(reduce_op, unsorted_segment_op, + inner_reduced, axis[:-1], keepdims, + separator) + + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor( + rt_input, name='rt_input') + + axis = array_ops.get_positive_axis( + axis, rt_input.shape.ndims, ndims_name='rank(input_tensor)') + + if axis == 0: + # out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N] + row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1] + num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0) + segment_ids = range(row_lengths).values + result = _ragged_segment_aggregate(unsorted_segment_op, rt_input.values, + segment_ids, num_segments, separator) + if keepdims: + result = array_ops.expand_dims(result, axis=0) + return result + elif axis == 1: + # out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N] + num_segments = array_ops.shape(rt_input.row_splits)[0] - 1 + segment_ids = segment_id_ops.row_splits_to_segment_ids( + rt_input.row_splits) + result = _ragged_segment_aggregate(unsorted_segment_op, rt_input.values, + segment_ids, num_segments, separator) + if keepdims: + result = array_ops.expand_dims(result, axis=1) + return result + else: + # out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] = + # sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N] + return rt_input.with_values( + ragged_reduce_aggregate(reduce_op, unsorted_segment_op, + rt_input.values, axis - 1, keepdims, + separator)) + + +@dispatch.dispatch_for_api(math_ops.reduce_sum) +def reduce_sum(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_sum, + unsorted_segment_op=math_ops.unsorted_segment_sum, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceSum')) + + +@dispatch.dispatch_for_api(math_ops.reduce_prod) +def reduce_prod(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_prod, + unsorted_segment_op=math_ops.unsorted_segment_prod, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceProd')) + + +@dispatch.dispatch_for_api(math_ops.reduce_min) +def reduce_min(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_min, + unsorted_segment_op=math_ops.unsorted_segment_min, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceMin')) + + +@dispatch.dispatch_for_api(math_ops.reduce_max) +def reduce_max(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + return ragged_reduce_aggregate( + reduce_op=math_ops.reduce_max, + unsorted_segment_op=math_ops.unsorted_segment_max, + rt_input=input_tensor, + axis=axis, + keepdims=keepdims, + name=(name or 'RaggedReduceMax')) + + +@dispatch.dispatch_for_api(math_ops.reduce_mean) +def reduce_mean(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]): + total = reduce_sum(input_tensor, axis, keepdims) + if ragged_tensor.is_ragged(input_tensor): + ones = ragged_tensor.RaggedTensor.from_nested_row_splits( + array_ops.ones_like(input_tensor.flat_values), + input_tensor.nested_row_splits, + validate=False) + else: + ones = array_ops.ones_like(input_tensor) + count = reduce_sum(ones, axis, keepdims) + if ragged_tensor.is_ragged(total): + return ragged_tensor.RaggedTensor.from_nested_row_splits( + total.flat_values / count.flat_values, + total.nested_row_splits, + validate=False) + else: + return total / count + + +@dispatch.dispatch_for_api(math_ops.reduce_variance) +def reduce_variance(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=False, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceVariance', [input_tensor, axis]): + input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor( + input_tensor, name='input_tensor') + if input_tensor.dtype.is_complex: + raise ValueError( + 'reduce_variance is not supported for RaggedTensors with complex' + ' dtypes.' + ) + square_of_input = math_ops.square(input_tensor) + mean_of_square = reduce_mean(square_of_input, axis=axis, keepdims=keepdims) + mean = reduce_mean(input_tensor, axis=axis, keepdims=keepdims) + square_of_mean = math_ops.square(mean) + # Note: the above method of computing variance is not numerically stable, + # and can result in negative variances. Here we clip to >= 0. + return math_ops.maximum(mean_of_square - square_of_mean, 0) + + +@dispatch.dispatch_for_api(math_ops.reduce_std) +def reduce_std(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=False, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceStd', [input_tensor, axis]): + variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims) + return math_ops.sqrt(variance) + + +def _cast(input_tensor, dtype): + return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor, + dtype) + + +@dispatch.dispatch_for_api(math_ops.reduce_all) +def reduce_all(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]): + return _cast( + reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims), + dtypes.bool) + + +@dispatch.dispatch_for_api(math_ops.reduce_any) +def reduce_any(input_tensor: ragged_tensor.Ragged, + axis=None, + keepdims=None, + name=None): + """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" + with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]): + return _cast( + reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims), + dtypes.bool) + + +def _set_ragged_reduce_docstring(func, combination, combined, default, example): + func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict( + combination=combination, + combined=combined, + default=default, + example=example) + + +_set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0', + _RAGGED_REDUCE_SUM_EXAMPLE) +_set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1', + _RAGGED_REDUCE_PROD_EXAMPLE) +_set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized', + '`input_tensor.dtype.min`', + _RAGGED_REDUCE_MIN_EXAMPLE) +_set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized', + '`input_tensor.dtype.max`', + _RAGGED_REDUCE_MAX_EXAMPLE) +_set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN', + _RAGGED_REDUCE_MEAN_EXAMPLE) +_set_ragged_reduce_docstring(reduce_variance, 'variance', 'averaged', 'NaN', + _RAGGED_REDUCE_VARIANCE_EXAMPLE) +_set_ragged_reduce_docstring(reduce_std, 'std', 'averaged', 'NaN', + _RAGGED_REDUCE_STD_EXAMPLE) +_set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True', + _RAGGED_REDUCE_ALL_EXAMPLE) +_set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False', + _RAGGED_REDUCE_ANY_EXAMPLE) + + +# =============================================================================== +# ragged.matmul +# =============================================================================== +@dispatch.dispatch_for_api(math_ops.matmul) +def matmul( + a: ragged_tensor.RaggedOrDense, + b: ragged_tensor.RaggedOrDense, + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + a_is_sparse=False, + b_is_sparse=False, + output_type=None, + grad_a=False, + grad_b=False, + name=None, +): + """Multiplies matrix `a` by matrix `b`. + + If all transpose or adjoint attributes are `False` then: + + ``` + output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j]), for all indices i, j. + ``` + + The inputs `a` and `b` must have `rank >= 2`, where the outermost `rank - 2` + dimensions are batch dimensions. The inputs must have the same dtype. See + `tf.matmul` for more information. + + Args: + a: `tf.Tensor` or `RaggedTensor` with `rank > 1`. + b: `tf.Tensor` or `RaggedTensor` with same type and rank as `a`. + transpose_a: If `True`, `a` is transposed before multiplication. + transpose_b: If `True`, `b` is transposed before multiplication. + adjoint_a: If `True`, `a` is conjugated & transposed before multiplication. + adjoint_b: If `True`, `b` is conjugated & transposed before multiplication. + a_is_sparse: If `True`, optimize assuming `a` is mostly zero. + b_is_sparse: If `True`, optimize assuming `b` is mostly zero. + output_type: The output datatype (optional). + grad_a: Unused. + grad_b: Unused. + name: Name for the operation (optional). + + Returns: + A `Tensor` or `RaggedTensor` with the same rank and shape as `a`, where + each inner-most matrix is the product of the corresponding matrices in `a` + and `b`. + """ + del grad_a + del grad_b + if transpose_a and adjoint_a: + raise ValueError('Only one of transpose_a and adjoint_a can be True.') + if transpose_b and adjoint_b: + raise ValueError('Only one of transpose_b and adjoint_b can be True.') + + kwargs = dict( + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + output_type=output_type) + + with ops.name_scope(name, 'RaggedMatMul', [a, b]) as name: + a = ragged_tensor.convert_to_tensor_or_ragged_tensor(a, name='a') + b = ragged_tensor.convert_to_tensor_or_ragged_tensor(b, name='b') + + a_is_ragged = isinstance(a, ragged_tensor.RaggedTensor) + b_is_ragged = isinstance(b, ragged_tensor.RaggedTensor) + if not (a_is_ragged or b_is_ragged): + return math_ops.matmul(a, b, **kwargs) + + if a.dtype != b.dtype: + raise ValueError('`a` and `b` must have the same dtype.') + + # TODO(edloper): Support broadcasting inputs. (Broadcast support is not + # documented by https://www.tensorflow.org/api_docs/python/tf/linalg/matmul, + # but it is supported by the op.) + + # Find the rank of the input tensors. + if a.shape.rank is None: + if b.shape.rank is None: + raise ValueError('matmul requires at least one input to have known ' + 'rank if either input is ragged.') + rank = b.shape.rank + else: + if b.shape.rank is not None and a.shape.rank != b.shape.rank: + raise ValueError('`a` and `b` must have the same rank.') + rank = a.shape.rank + + # At least one of `a` and `b` is ragged; and ragged tensors always have + # rank>=2. + if rank < 2: + # This can happen if e.g. `a` is a 1D dense tensor and `b` is a + # ragged tensor with unknown rank. Since ragged tensors always have + # `rank>=2`, this implies that `a` and `b` have different ranks. + raise ValueError('`a` and `b` must have the same rank.') + + # Rank>3: We have multiple batch dimensions. Merge them into a single + # batch dimension, recursively call `matmul`, and then restore the original + # batch dimension (using a.row_splits). + if rank > 3: + shape_err = 'Batch dimensions of `a` and `b` do not have the same size.' + if not a_is_ragged: + a = ragged_tensor.RaggedTensor.from_tensor(a, ragged_rank=1) + if not b_is_ragged: + b = ragged_tensor.RaggedTensor.from_tensor(b, ragged_rank=1) + with ops.control_dependencies([ + check_ops.assert_equal(a.row_splits, b.row_splits, message=shape_err) + ]): + flat_result = matmul(a.values, b.values, **kwargs) + return a.with_values(flat_result) + + if rank == 2: + return _matmul_2d(a, b, **kwargs) + + assert rank == 3 # I.e., we have a single batch dimension. + + a_ragged_rank = a.ragged_rank if a_is_ragged else 0 + if a_ragged_rank == 1 and not (b_is_ragged or transpose_a or adjoint_a): + # If `a.shape=[B, (I), J]` and `b.shape=[B, J, K], then we can compute + # the result with a single dense `matmul`. + return _matmul_3d_with_batch_dim_folding(a, b, **kwargs) + else: + # Otherwie, fall back on using `map_fn`. + return _matmul_3d_with_map_fn(a, b, **kwargs) + + +def _matmul_2d(a, b, **kwargs): + """Multiplies potentially ragged 2D tensors. + + Args: + a: A 2D Tensor or RaggedTensor with `shape=[I, J]` + b: A 2D Tensor or RaggedTensor with `shape=[J, K]` + **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). + + Returns: + A 2D Tensor with `shape=[I, K]`. + """ + # multiplying `a` and `b` is only well-defined if `a` and `b` are + # actually uniform (and just happened to be stored as ragged tensors). + # Check that they're uniform, convert them to tf.Tensor. + ragged_err = ('The matrices in `a` and `b` may not be ' + 'ragged in their innermost dimension.') + checks = [] + if isinstance(a, ragged_tensor.RaggedTensor): + original_size = array_ops.size(a.flat_values) + a = a.to_tensor() + checks.append( + check_ops.assert_equal( + original_size, array_ops.size(a), message=ragged_err)) + if isinstance(b, ragged_tensor.RaggedTensor): + original_size = array_ops.size(b.flat_values) + b = b.to_tensor() + checks.append( + check_ops.assert_equal( + original_size, array_ops.size(b), message=ragged_err)) + with ops.control_dependencies(checks): + return math_ops.matmul(a, b, **kwargs) + + +def _matmul_3d_with_map_fn(a, b, **kwargs): + """Multiplies batches of 2D matrices using map_fn. + + `output[n, i, k]` = sum_j (a[n, i, j] * b[n, j, k])` (for all `n`, `i`, `k`). + + Requires that `a[n, i].nrows()` == `b[n].nrows()` (for all `n` and `i`). + + Args: + a: A 3D Tensor or RaggedTensor with `shape=[B, I, J]`, where dimensions `I` + and `J` may be ragged. + b: A 3D Tensor or RaggedTensor with `shape=[B, J, K]`, where dimensions `J` + and `K` may be ragged. + **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). + + Returns: + A 3D RaggedTensor with `shape=[B, (I), (K)]`. + """ + # Determine the ragged rank of the result. In the normal case, we have: + # [B, I, J] * [B, J, K] -> [B, I, K] + # Or if we're using transpose_b, then we have: + # [B, I, J] * [B, K, J] -> [B, I, K] + # In either case, output_ragged_rank=2 iff the K dimension is ragged. + if (isinstance(b, ragged_tensor.RaggedTensor) and + (b.ragged_rank == 2 or kwargs.get('transpose_b') or + kwargs.get('adjoint_b'))): + output_ragged_rank = 2 + else: + output_ragged_rank = 1 + + def single_batch_matmul(x): + out = _matmul_2d(x[0], x[1], **kwargs) + if output_ragged_rank == 2: + out = ragged_tensor.RaggedTensor.from_tensor(out) + return out + + fn_out_shape = None # Figure out proper shape. + row_splits_dtype = ( + a.row_splits.dtype + if isinstance(a, ragged_tensor.RaggedTensor) else b.row_splits.dtype) + output_type = kwargs['output_type'] + if output_type is None: + output_type = a.dtype + spec = ragged_tensor.RaggedTensorSpec( + shape=fn_out_shape, + dtype=output_type, + ragged_rank=output_ragged_rank - 1, + row_splits_dtype=row_splits_dtype) + result = map_fn.map_fn( + single_batch_matmul, elems=(a, b), fn_output_signature=spec) + + # map_fn loses shape information; restore it, where possible. + # pylint: disable=protected-access + if kwargs.get('transpose_a') or kwargs.get('adjoint_a'): + result._set_shape(a.shape[:-2] + a.shape[-1:] + [None]) + else: + result._set_shape(a.shape[:-2] + a.shape[-2:-1] + [None]) + if kwargs.get('transpose_b') or kwargs.get('adjoint_b'): + result._set_shape(b.shape[:-2] + [None] + b.shape[-2:-1]) + else: + result._set_shape(b.shape[:-2] + [None] + b.shape[-1:]) + + return result + + +def _matmul_3d_with_batch_dim_folding(a, b, **kwargs): + """Multiply batches of 2D matrices where only `a.shape[1]` is ragged. + + Args: + a: A RaggedTensor with `shape=[B, (I), J]`. (ragged_rank must be 1.) + b: A Tensor with `shape=[B, J, K]` + **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a). + transpose_a and adjoint_a must not be true. + + Returns: + A RaggedTensor with `shape=[B, (I), K]. + """ + # reshaped_a.shape = [sum(i_1, i_2, ..., i_B), 1, J] + reshaped_a = array_ops.expand_dims(a.values, 1) + # reshaped_b.shape = [sum(i_1, i_2, ..., i_B), J, K] + reshaped_b = array_ops.repeat(b, a.row_lengths(), axis=0) + # flat_result.shape = [sum(i_1, i_2, ..., i_B), 1, K] + flat_result = math_ops.matmul(reshaped_a, reshaped_b, **kwargs) + # result.shape = [B, (I), K] + return a.with_values(array_ops.squeeze(flat_result, axis=1)) + + +# =============================================================================== +# ragged.softmax +# =============================================================================== +@dispatch.dispatch_for_api(nn_ops.softmax_v2) +def softmax(logits: ragged_tensor.Ragged, axis=None, name=None): + """Computes softmax activations. + + Used for multi-class predictions. The sum of all outputs generated by softmax + is 1. + + This function performs the equivalent of + + softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) + + Example usage: + + >>> softmax = tf.nn.softmax([-1, 0., 1.]) + >>> softmax + + >>> sum(softmax) + + + Args: + logits: A non-empty `Tensor`. Must be one of the following types: `half`, + `float32`, `float64`. + axis: The dimension softmax would be performed on. The default is -1 which + indicates the last dimension. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type and shape as `logits`. + + Raises: + InvalidArgumentError: if `logits` is empty or `axis` is beyond the last + dimension of `logits`. + """ + if axis is None: + axis = -1 + + with ops.name_scope(name, 'RaggedSoftmax', [logits]) as name: + max_input = reduce_max(logits, axis=axis, keepdims=True) + logits_exp = math_ops.exp(math_ops.subtract(logits, max_input)) + denominator = reduce_sum(logits_exp, axis=axis, keepdims=True) + return math_ops.divide(logits_exp, denominator) + + +# =============================================================================== +# ragged.add_n +# =============================================================================== +@dispatch.dispatch_for_api(math_ops.add_n) +def add_n(inputs: typing.List[ragged_tensor.RaggedOrDense], name=None): + """RaggedTensor implementation for tf.math.add_n.""" + if len(inputs) < 0: + raise ValueError('tf.add_n: expected at least one input.') + with ops.name_scope(name, 'RaggedAddN', inputs): + return ragged_functional_ops.map_flat_values(math_ops.add_n, inputs) + + +# =============================================================================== +# Ragged version of nn_ops.dropout +# =============================================================================== +@dispatch.dispatch_for_api(nn_ops.dropout) +def dropout_v1(x: ragged_tensor.Ragged, + keep_prob=None, + noise_shape=None, + seed=None, + name=None, + rate=None): + """Ragged dispatch target for tf.nn.dropout.""" + if noise_shape is not None: + raise ValueError('noise_shape is not supported yet for RaggedTensor x') + with ops.name_scope(name, 'RaggedNNDropout', [x, rate]): + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + return x.with_flat_values( + nn_ops.dropout( + x.flat_values, keep_prob=keep_prob, seed=seed, rate=rate)) + + +@dispatch.dispatch_for_api(nn_ops.dropout_v2) +def dropout_v2(x: ragged_tensor.Ragged, + rate, + noise_shape=None, + seed=None, + name=None): + """Ragged dispatch target for tf.nn.dropout.""" + if noise_shape is not None: + raise ValueError('noise_shape is not supported yet for RaggedTensor x') + with ops.name_scope(name, 'RaggedNNDropout', [x, rate]): + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + return x.with_flat_values( + nn_ops.dropout_v2(x.flat_values, rate=rate, seed=seed)) + + +@dispatch.dispatch_for_api(nn_ops.stateless_dropout) +def stateless_dropout(x: ragged_tensor.Ragged, + rate, + seed, + rng_alg=None, + noise_shape=None, + name=None): + """Ragged dispatch target for tf.nn.experimental.stateless_dropout.""" + if noise_shape is not None: + raise ValueError('noise_shape is not supported yet for RaggedTensor x') + with ops.name_scope(name, 'RaggedNNStatelessDropout', [x, rate]): + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + return x.with_flat_values( + nn_ops.stateless_dropout( + x.flat_values, rate=rate, seed=seed, rng_alg=rng_alg)) + + +# =============================================================================== +# Ragged version of Tensor.__eq__ and Tensor.__ne__ +# =============================================================================== +@dispatch.dispatch_for_api(math_ops.tensor_equals) +def tensor_equals(self: ragged_tensor.RaggedOrDense, + other: ragged_tensor.RaggedOrDense): + """Ragged version of the operation invoked by `Tensor.__eq__`.""" + if other is None: + return False + elif _use_legacy_mode_for_tensor_equality(self): + return self is other + else: + try: + return math_ops.equal(self, other) + except (errors.InvalidArgumentError, ValueError): + return False # values are not broadcast-compatbile. + + +@dispatch.dispatch_for_api(math_ops.tensor_not_equals) +def tensor_not_equals(self: ragged_tensor.RaggedOrDense, + other: ragged_tensor.RaggedOrDense): + """Ragged version of the operation invoked by `Tensor.__ne__`.""" + if other is None: + return False + elif _use_legacy_mode_for_tensor_equality(self): + return self is not other + else: + try: + return math_ops.not_equal(self, other) + except (errors.InvalidArgumentError, ValueError): + return True # values are not broadcast-compatbile. + + +def _use_legacy_mode_for_tensor_equality(self): + g = getattr(self, 'graph', None) + return not (tensor.Tensor._USE_EQUALITY and # pylint: disable=protected-access + ops.executing_eagerly_outside_functions() and + (g is None or g.building_function)) + + +def _cumsum_flat_values_at_ragged_rank(last_rp, flat_values, exclusive=False, + reverse=False): + """Calculate flat_values for math_ops.cumsum when axis==ragged_rank.""" + if not exclusive: + partial = _cumsum_flat_values_at_ragged_rank( + last_rp, flat_values, exclusive=True, reverse=reverse) + return partial + flat_values + + if reverse: + youngest_sibling = array_ops.gather( + params=last_rp.row_splits(), indices=last_rp.value_rowids() + 1) - 1 + new_flat_values = math_ops.cumsum(flat_values, exclusive=True, reverse=True) + initial_values = array_ops.gather(params=new_flat_values, + indices=youngest_sibling) + + return new_flat_values - initial_values + else: + eldest_sibling = array_ops.gather( + params=last_rp.row_splits(), indices=last_rp.value_rowids()) + new_flat_values = math_ops.cumsum(flat_values, exclusive=True) + initial_values = array_ops.gather(params=new_flat_values, + indices=eldest_sibling) + return new_flat_values - initial_values + + +@dispatch.dispatch_for_api(math_ops.cumsum) +def ragged_cumsum(x: ragged_tensor.Ragged, + axis: int = 0, + exclusive: bool = False, + reverse: bool = False, + name: typing.Optional[str] = None): + """Calculate math_ops.cumsum for a RaggedTensor. + + Given a ragged tensor `x`, the `result` is a ragged tensor with the same + shape. One can calculate the value of `result[i_1...i_k]` as follows: + ``` + dense_result=tf.math.cumsum(rt.to_tensor(), axis=axis, exclusive=exclusive, + reverse=reverse) + result[i_1...i_k]=dense_result[i_1...i_k] + ``` + + Args: + x: the original ragged tensor to sum. + axis: the axis along which to sum, can range -rank<=axis x.ragged_rank: + new_axis = axis - x.ragged_rank + cumsum_bound = functools.partial( + math_ops.cumsum, axis=new_axis, exclusive=exclusive, reverse=reverse) + return ragged_functional_ops.map_flat_values(cumsum_bound, x) + else: + dense_version = x.to_tensor() + result = math_ops.cumsum( + dense_version, axis, exclusive=exclusive, reverse=reverse, name=name) + return ragged_tensor.RaggedTensor.from_tensor( + result, lengths=x.nested_row_lengths()) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_operators.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..479442c3f4055a53045247a5a4ce9ef977a84b10 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_operators.py @@ -0,0 +1,342 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operator overloads for `RaggedTensor`.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_getitem +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import tf_decorator + + +# ============================================================================= +# Equality Docstring +# ============================================================================= +def ragged_eq(self, other): # pylint: disable=g-doc-args + """Returns result of elementwise `==` or False if not broadcast-compatible. + + Compares two ragged tensors elemewise for equality if they are + broadcast-compatible; or returns False if they are not + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + + Note that this behavior differs from `tf.math.equal`, which raises an + exception if the two ragged tensors are not broadcast-compatible. + + For example: + + >>> rt1 = tf.ragged.constant([[1, 2], [3]]) + >>> rt1 == rt1 + + + >>> rt2 = tf.ragged.constant([[1, 2], [4]]) + >>> rt1 == rt2 + + + >>> rt3 = tf.ragged.constant([[1, 2], [3, 4]]) + >>> # rt1 and rt3 are not broadcast-compatible. + >>> rt1 == rt3 + False + + >>> # You can also compare a `tf.RaggedTensor` to a `tf.Tensor`. + >>> t = tf.constant([[1, 2], [3, 4]]) + >>> rt1 == t + False + >>> t == rt1 + False + >>> rt4 = tf.ragged.constant([[1, 2], [3, 4]]) + >>> rt4 == t + + >>> t == rt4 + + + Args: + other: The right-hand side of the `==` operator. + + Returns: + The ragged tensor result of the elementwise `==` operation, or `False` if + the arguments are not broadcast-compatible. + """ + return math_ops.tensor_equals(self, other) + + +# ============================================================================= +# Ordering Docstring +# ============================================================================= +def ragged_ge(self, other): # pylint: disable=g-doc-args + """Elementwise `>=` comparison of two convertible-to-ragged-tensor values. + + Computes the elemewise `>=` comparison of two values that are convertible to + ragged tenors, with [broadcasting] + (http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) support. + Raises an exception if two values are not broadcast-compatible. + + For example: + + >>> rt1 = tf.ragged.constant([[1, 2], [3]]) + >>> rt1 >= rt1 + + + >>> rt2 = tf.ragged.constant([[2, 1], [3]]) + >>> rt1 >= rt2 + + + >>> rt3 = tf.ragged.constant([[1, 2], [3, 4]]) + >>> # rt1 and rt3 are not broadcast-compatible. + >>> rt1 >= rt3 + Traceback (most recent call last): + ... + InvalidArgumentError: ... + + >>> # You can also compare a `tf.RaggedTensor` to a `tf.Tensor`. + >>> rt4 = tf.ragged.constant([[1, 2],[3, 4]]) + >>> t1 = tf.constant([[2, 1], [4, 3]]) + >>> rt4 >= t1 + + >>> t1 >= rt4 + + + >>> # Compares a `tf.RaggedTensor` to a `tf.Tensor` with broadcasting. + >>> t2 = tf.constant([[2]]) + >>> rt4 >= t2 + + >>> t2 >= rt4 + + + Args: + other: The right-hand side of the `>=` operator. + + Returns: + A `tf.RaggedTensor` of dtype `tf.bool` with the shape that `self` and + `other` broadcast to. + + Raises: + InvalidArgumentError: If `self` and `other` are not broadcast-compatible. + """ + return math_ops.greater_equal(self, other) + + +# ============================================================================= +# Logical Docstring +# ============================================================================= + + +# ============================================================================= +# Arithmetic Docstring +# ============================================================================= +def ragged_abs(self, name=None): # pylint: disable=g-doc-args + r"""Computes the absolute value of a ragged tensor. + + Given a ragged tensor of integer or floating-point values, this operation + returns a ragged tensor of the same type, where each element contains the + absolute value of the corresponding element in the input. + + Given a ragged tensor `x` of complex numbers, this operation returns a tensor + of type `float32` or `float64` that is the absolute value of each element in + `x`. For a complex number \\(a + bj\\), its absolute value is computed as + \\(\sqrt{a^2 + b^2}\\). + + For example: + + >>> # real number + >>> x = tf.ragged.constant([[-2.2, 3.2], [-4.2]]) + >>> tf.abs(x) + + + >>> # complex number + >>> x = tf.ragged.constant([[-2.2 + 4.7j], [-3.2 + 5.7j], [-4.2 + 6.7j]]) + >>> tf.abs(x) + + + Args: + name: A name for the operation (optional). + + Returns: + A `RaggedTensor` of the same size and type as `x`, with absolute values. + Note, for `complex64` or `complex128` input, the returned `RaggedTensor` + will be of type `float32` or `float64`, respectively. + """ + return math_ops.abs(self, name=name) + + +# =========================================================================== +def ragged_and(self, y, name=None): # pylint: disable=g-doc-args + r"""Returns the truth value of elementwise `x & y`. + + Logical AND function. + + Requires that `x` and `y` have the same shape or have + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shapes. For example, `y` can be: + + - A single Python boolean, where the result will be calculated by applying + logical AND with the single element to each element in `x`. + - A `tf.Tensor` object of dtype `tf.bool` of the same shape or + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shape. In this case, the result will be the element-wise logical AND of + `x` and `y`. + - A `tf.RaggedTensor` object of dtype `tf.bool` of the same shape or + [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + shape. In this case, the result will be the element-wise logical AND of + `x` and `y`. + + For example: + + >>> # `y` is a Python boolean + >>> x = tf.ragged.constant([[True, False], [True]]) + >>> y = True + >>> x & y + + >>> tf.math.logical_and(x, y) # Equivalent of x & y + + >>> y & x + + >>> tf.math.reduce_all(x & y) # Reduce to a scalar bool Tensor. + + + >>> # `y` is a tf.Tensor of the same shape. + >>> x = tf.ragged.constant([[True, False], [True, False]]) + >>> y = tf.constant([[True, False], [False, True]]) + >>> x & y + + + >>> # `y` is a tf.Tensor of a broadcast-compatible shape. + >>> x = tf.ragged.constant([[True, False], [True]]) + >>> y = tf.constant([[True], [False]]) + >>> x & y + + + >>> # `y` is a `tf.RaggedTensor` of the same shape. + >>> x = tf.ragged.constant([[True, False], [True]]) + >>> y = tf.ragged.constant([[False, True], [True]]) + >>> x & y + + + >>> # `y` is a `tf.RaggedTensor` of a broadcast-compatible shape. + >>> x = tf.ragged.constant([[[True, True, False]], [[]], [[True, False]]]) + >>> y = tf.ragged.constant([[[True]], [[True]], [[False]]], ragged_rank=1) + >>> x & y + + + Args: + y: A Python boolean or a `tf.Tensor` or `tf.RaggedTensor` of dtype + `tf.bool`. + name: A name for the operation (optional). + + Returns: + A `tf.RaggedTensor` of dtype `tf.bool` with the shape that `x` and `y` + broadcast to. + """ + return math_ops.logical_and(self, y, name) + + +# Helper Methods. +def _right(operator): + """Right-handed version of an operator: swap args x and y.""" + return tf_decorator.make_decorator(operator, lambda y, x: operator(x, y)) + + +def ragged_hash(self): + """The operation invoked by the `RaggedTensor.__hash__` operator.""" + g = getattr(self.row_splits, "graph", None) + # pylint: disable=protected-access + if ( + tensor.Tensor._USE_EQUALITY + and ops.executing_eagerly_outside_functions() + and (g is None or g.building_function) + ): + raise TypeError("RaggedTensor is unhashable.") + else: + return id(self) + + +# Indexing +ragged_tensor.RaggedTensor.__getitem__ = ragged_getitem.ragged_tensor_getitem + +# Equality +ragged_tensor.RaggedTensor.__eq__ = ragged_eq +ragged_tensor.RaggedTensor.__ne__ = math_ops.tensor_not_equals +ragged_tensor.RaggedTensor.__hash__ = ragged_hash + +# Ordering operators +ragged_tensor.RaggedTensor.__ge__ = ragged_ge +ragged_tensor.RaggedTensor.__gt__ = math_ops.greater +ragged_tensor.RaggedTensor.__le__ = math_ops.less_equal +ragged_tensor.RaggedTensor.__lt__ = math_ops.less + +# Logical operators +ragged_tensor.RaggedTensor.__and__ = ragged_and +ragged_tensor.RaggedTensor.__rand__ = _right(ragged_and) + +ragged_tensor.RaggedTensor.__invert__ = math_ops.logical_not +ragged_tensor.RaggedTensor.__ror__ = _right(math_ops.logical_or) +ragged_tensor.RaggedTensor.__or__ = math_ops.logical_or +ragged_tensor.RaggedTensor.__xor__ = math_ops.logical_xor +ragged_tensor.RaggedTensor.__rxor__ = _right(math_ops.logical_xor) + +# Arithmetic operators +ragged_tensor.RaggedTensor.__abs__ = ragged_abs +ragged_tensor.RaggedTensor.__add__ = math_ops.add +ragged_tensor.RaggedTensor.__radd__ = _right(math_ops.add) +ragged_tensor.RaggedTensor.__div__ = math_ops.div +ragged_tensor.RaggedTensor.__rdiv__ = _right(math_ops.div) +ragged_tensor.RaggedTensor.__floordiv__ = math_ops.floordiv +ragged_tensor.RaggedTensor.__rfloordiv__ = _right(math_ops.floordiv) +ragged_tensor.RaggedTensor.__mod__ = math_ops.floormod +ragged_tensor.RaggedTensor.__rmod__ = _right(math_ops.floormod) +ragged_tensor.RaggedTensor.__mul__ = math_ops.multiply +ragged_tensor.RaggedTensor.__rmul__ = _right(math_ops.multiply) +ragged_tensor.RaggedTensor.__neg__ = math_ops.negative +ragged_tensor.RaggedTensor.__pow__ = math_ops.pow +ragged_tensor.RaggedTensor.__rpow__ = _right(math_ops.pow) +ragged_tensor.RaggedTensor.__sub__ = math_ops.subtract +ragged_tensor.RaggedTensor.__rsub__ = _right(math_ops.subtract) +ragged_tensor.RaggedTensor.__truediv__ = math_ops.truediv +ragged_tensor.RaggedTensor.__rtruediv__ = _right(math_ops.truediv) + + +def ragged_bool(self): # pylint: disable=g-doc-args + """Raises TypeError when a RaggedTensor is used as a Python bool. + + To prevent RaggedTensor from being used as a bool, this function always raise + TypeError when being called. + + For example: + + >>> x = tf.ragged.constant([[1, 2], [3]]) + >>> result = True if x else False # Evaluate x as a bool value. + Traceback (most recent call last): + ... + TypeError: RaggedTensor may not be used as a boolean. + + >>> x = tf.ragged.constant([[1]]) + >>> r = (x == 1) # tf.RaggedTensor [[True]] + >>> if r: # Evaluate r as a bool value. + ... pass + Traceback (most recent call last): + ... + TypeError: RaggedTensor may not be used as a boolean. + """ + raise TypeError("RaggedTensor may not be used as a boolean.") + + +ragged_tensor.RaggedTensor.__bool__ = ragged_bool # Python3 bool conversion. +ragged_tensor.RaggedTensor.__nonzero__ = ragged_bool # Python2 bool conversion. diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0809d152bac3230f0bac37a0333cafde7130416e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_ops.py @@ -0,0 +1,51 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Import all modules in the `ragged` package that define exported symbols. + +Additional, import ragged_dispatch (which has the side-effect of registering +dispatch handlers for many standard TF ops) and ragged_operators (which has the +side-effect of overriding RaggedTensor operators, such as RaggedTensor.__add__). + +We don't import these modules from ragged/__init__.py, since we want to avoid +circular dependencies. +""" + + +# pylint: disable=unused-import +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_autograph +from tensorflow.python.ops.ragged import ragged_batch_gather_ops +from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op +from tensorflow.python.ops.ragged import ragged_bincount_ops +from tensorflow.python.ops.ragged import ragged_check_ops +from tensorflow.python.ops.ragged import ragged_concat_ops +from tensorflow.python.ops.ragged import ragged_conversion_ops +from tensorflow.python.ops.ragged import ragged_dispatch +from tensorflow.python.ops.ragged import ragged_embedding_ops +from tensorflow.python.ops.ragged import ragged_factory_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_getitem +from tensorflow.python.ops.ragged import ragged_image_ops +from tensorflow.python.ops.ragged import ragged_map_ops +from tensorflow.python.ops.ragged import ragged_math_ops +from tensorflow.python.ops.ragged import ragged_operators +from tensorflow.python.ops.ragged import ragged_squeeze_op +from tensorflow.python.ops.ragged import ragged_string_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.ops.ragged import ragged_tensor_value +from tensorflow.python.ops.ragged import ragged_where_op +from tensorflow.python.ops.ragged import segment_id_ops diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_shape.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..8fb0c56e8ed6447fff091fc7ff2a1dc81d14c01c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_shape.py @@ -0,0 +1,628 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Shapes & broadcasting for RaggedTensors.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_array_ops +from tensorflow.python.ops.ragged import ragged_config +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_util + + +class RaggedTensorDynamicShape: + """A collection of tensors encoding the shape of a potentially ragged tensor. + + Each `RaggedTensorDynamicShape` consists of an ordered list of dimension + sizes. There are two dimension types: + + * "Uniform dimensions" are dimensions where all slices have the same + length. `RaggedTensorDynamicShape` records the size of each uniform + dimension using a single scalar integer. + + * "Ragged dimensions" are dimensions whose slices may have different + lengths. `RaggedTensorDynamicShape` records the size of each ragged + dimension using an integer vector containing the slice lengths for all + the slices across that dimension. + + Furthermore, there are two ways a dimension might be encoded: + + * "Partitioned dimensions" are dimensions that are encoded using a + `RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned + dimension must be uniform, and the innermost partitioned dimension must + be ragged. + + * "Inner dimensions" are dimensions that are encoded using a + `RaggedTensor`'s `flat_values`. Inner dimensions are always uniform. + + The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes` + and `inner_dim_sizes`: + + * `partitioned_dim_sizes` is a list of tensors (one for each partitioned + dimension). + + * For uniform dimensions, the tensor is an integer scalar specifying the + size of all slices across that dimension. + * For ragged dimensions, the tensor is an integer vector specifying the + size of each slice across that dimension. + + * `inner_dim_sizes` is a single integer vector, where each element + specifies the size of a single inner dimension. + + Examples: + + Tensor | Ragged | Partitioned Dim Sizes | Inner Dim + : Rank : : Sizes + ------------------------------ | ------ | ---------------------- | ---------- + `[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3` + `[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` | + `[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2 + `[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` | + """ + + def __init__(self, partitioned_dim_sizes, inner_dim_sizes, + dim_size_dtype=None): + """Creates a RaggedTensorDynamicShape. + + Args: + partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for + each partitioned dimension. If dimension `d` is uniform, then + `partitioned_dim_sizes[d]` must be an integer scalar, specifying the + size of all slices across dimension `d`. If dimension `d` is ragged, + then `partitioned_dim_sizes[d]` must be an integer vector, specifying + the size of each slice across dimension `d`. + inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the + number of inner dimensions. `inner_dim_sizes[n]` is the size of all + slices across the `n`th inner dimension (which is the + `(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor. + dim_size_dtype: dtype for dimension sizes. If not specified, then it + is chosen based on the dtypes of `partitioned_dim_sizes` and + `inner_dim_sizes`. + """ + assert isinstance(partitioned_dim_sizes, (list, tuple)) + + with ops.name_scope(None, 'RaggedTensorDynamicShape', + (partitioned_dim_sizes, inner_dim_sizes)): + partitioned_dim_sizes = tuple( + ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) + for (i, size) in enumerate(partitioned_dim_sizes)) + inner_dim_sizes = ops.convert_to_tensor( + inner_dim_sizes, name='inner_dim_sizes') + + # Validate shapes. + if partitioned_dim_sizes: + for axis, dimension_size in enumerate(partitioned_dim_sizes): + if dimension_size.shape.ndims is None: + raise ValueError( + 'rank of partitioned_dim_sizes[%d] is unknown' % axis) + dimension_size.shape.with_rank_at_most(1) + if partitioned_dim_sizes[0].shape.ndims == 1: + raise ValueError('outermost partitioned dimension must be uniform') + if partitioned_dim_sizes[-1].shape.ndims == 0: + raise ValueError('innermost partitioned dimension must be ragged') + inner_dim_sizes.shape.assert_has_rank(1) + + # Convert dimension size tensors to a single dtype. + if dim_size_dtype is None: + dim_size_dtypes = set( + p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1) + if not dim_size_dtypes: + dim_size_dtype = dtypes.int64 + elif len(dim_size_dtypes) == 1: + dim_size_dtype = dim_size_dtypes.pop() + else: + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError('partitioned_dim_sizes must have matching dtypes') + dim_size_dtype = dtypes.int64 + partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype) + for p in partitioned_dim_sizes) + inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype) + + self._partitioned_dim_sizes = partitioned_dim_sizes + self._inner_dim_sizes = inner_dim_sizes + + def __repr__(self): + return ('RaggedTensorDynamicShape' + '(partitioned_dim_sizes=%r, inner_dim_sizes=%r)' % + (self._partitioned_dim_sizes, self._inner_dim_sizes)) + + @staticmethod + def from_dim_sizes(dim_sizes): + """Constructs a ragged shape from a list of dimension sizes. + + This list contains a single tensor for each dimension, where the tensor + is a scalar if the dimension is uniform, or a vector if the dimension is + ragged. + + Args: + dim_sizes: List of int32 or int64 scalars or vectors. + + Returns: + A RaggedTensorDynamicShape. + """ + with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', + [dim_sizes]): + dim_sizes = tuple( + ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, + name='dim_sizes') for size in dim_sizes) + # Split the dimensions into partitioned & inner dimensions. + inner_split = 0 + for dim, dim_size in enumerate(dim_sizes): + if dim_size.shape.ndims == 1: + inner_split = dim + 1 + elif dim_size.shape.ndims != 0: + raise ValueError('Each dim_size must be a scalar or a vector') + return RaggedTensorDynamicShape(dim_sizes[:inner_split], + dim_sizes[inner_split:]) + + @classmethod + def from_tensor(cls, rt_input, dim_size_dtype=None): + """Constructs a ragged shape for a potentially ragged tensor.""" + with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]): + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) + if not ragged_tensor.is_ragged(rt_input): + return cls([], array_ops.shape(rt_input), dim_size_dtype=dim_size_dtype) + else: + partitioned_dim_sizes = ( + (rt_input.nrows(),) + rt_input.nested_row_lengths()) + return RaggedTensorDynamicShape( + partitioned_dim_sizes, + array_ops.shape(rt_input.flat_values)[1:], + dim_size_dtype=dim_size_dtype) + + def dimension_size(self, axis): + """Returns the size of slices across the specified dimension.""" + if not isinstance(axis, int): + raise TypeError('axis must be an integer') + partitioned_ndims = len(self._partitioned_dim_sizes) + if axis < partitioned_ndims: + return self._partitioned_dim_sizes[axis] + else: + return self._inner_dim_sizes[axis - partitioned_ndims] + + def is_ragged(self, axis): + """Returns true if the indicated dimension is ragged.""" + if not isinstance(axis, int): + raise TypeError('axis must be an integer') + rank = self.rank + if axis < 0: + raise ValueError('Negative axis values are not supported') + elif rank is not None and axis >= rank: + raise ValueError('Expected axis=%s < rank=%s' % (axis, rank)) + else: + return (axis > 0 and axis < len(self._partitioned_dim_sizes) and + self._partitioned_dim_sizes[axis].shape.ndims == 1) + + @property + def rank(self): + """The number of dimensions in this shape, or None if unknown.""" + inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0]) + if inner_ndims is None: + return None + else: + return len(self._partitioned_dim_sizes) + inner_ndims + + @property + def partitioned_dim_sizes(self): + """The partitioned dimension sizes for this shape. + + Returns: + A `list` of 0-D or 1-D integer `Tensor`. + """ + return self._partitioned_dim_sizes + + @property + def inner_dim_sizes(self): + """The inner dimension sizes for this shape. + + Returns: + A 1-D integer `Tensor`. + """ + return self._inner_dim_sizes + + @property + def num_partitioned_dimensions(self): + """The number of partitioned dimensions in this shape.""" + return len(self._partitioned_dim_sizes) + + @property + def num_inner_dimensions(self): + """The number of inner dimensions, or `None` if not statically known.""" + return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0]) + + @property + def dim_size_dtype(self): + """DType used by this shape for dimension sizes.""" + return self._inner_dim_sizes.dtype + + def broadcast_to_rank(self, rank): + """Adds leading size-1 dimensions to broadcast `self` to the given rank. + + E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)` + is `[1, 1, 3, (D2), 4]`. + + Args: + rank: The rank for the returned shape. + + Returns: + A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions + have the same size as `self` and whose outer dimensions have size `1`. + + Raises: + ValueError: If `self.rank` is unknown or greater than `rank`. + """ + if self.rank is None: + raise ValueError('Unable to broadcast: self.rank is unknown') + dims_to_add = rank - self.rank + if dims_to_add < 0: + raise ValueError('Unable to broadcast: rank=%d must be greater than ' + 'self.rank=%d.' % (rank, self.rank)) + elif dims_to_add == 0: + return self + elif self._partitioned_dim_sizes: + partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes + return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, + self.dim_size_dtype) + else: + inner_dims = array_ops.concat( + [array_ops.ones([dims_to_add], self.dim_size_dtype), + self.inner_dim_sizes], + axis=0) + return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype) + + def broadcast_dimension(self, axis, lengths): + """Returns a shape that is broadcast-compatible with self & lengths. + + * If dimension[axis] is uniform and lengths is a scalar, the check + that either lengths==1 or axis==1 or lengths==axis, and tile + dimension[axis] with tf.where(lengths==axis, 1, axis) repeats. + + * If dimension[axis] is uniform and lengths is a vector, then check + that dimension[axis]==1, and raggedly tile dimension[axis] with + lengths repeats. (we can skip tiling if we statically know that + slice_lengths == 1??) + + * If dimension[axis] is ragged and lengths is a scalar, then check + that lengths==1. + + * If dimension[axis] is ragged and lengths is a vector, then check + that self.dimension_size(axis) == lengths. + + Args: + axis: `int`. The dimension to broadcast. + lengths: 0-D or 1-D integer `Tensor`. + + Returns: + A `RaggedTensorDynamicShape`. + """ + lengths = ragged_util.convert_to_int_tensor( + lengths, name='lengths', dtype=self.dim_size_dtype) + # Check whether lengths is a scalar (for uniform dimensions) or + # vector (for ragged dimensions). + if lengths.shape.ndims is None: + raise ValueError('lengths must have a known rank.') + elif lengths.shape.ndims > 1: + raise ValueError('lengths must be a scalar or vector') + else: + lengths_is_scalar = (lengths.shape.ndims == 0) + + # Verify that the shapes are compatible. + if self.is_ragged(axis): + if lengths_is_scalar: + condition = math_ops.equal(lengths, 1) + else: + condition = math_ops.reduce_all( + math_ops.equal(lengths, self.dimension_size(axis))) + else: + axis_dim_size = self.dimension_size(axis) + if lengths_is_scalar: + condition = ( + math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1) + | math_ops.equal(axis_dim_size, lengths)) + else: + condition = math_ops.equal(axis_dim_size, 1) + broadcast_err = [ + 'Unable to broadcast: dimension size mismatch in dimension', axis, + 'lengths=', lengths, 'dim_size=', + self.dimension_size(axis) + ] + broadcast_check = control_flow_assert.Assert( + condition, data=broadcast_err, summarize=10) + + with ops.control_dependencies([broadcast_check]): + # Partitioned dimensions: + if axis < self.num_partitioned_dimensions: + if self.is_ragged(axis): + # Use an identity op to make sure the check actually gets run. + return RaggedTensorDynamicShape( + self._partitioned_dim_sizes, + array_ops.identity(self.inner_dim_sizes), self.dim_size_dtype) + else: + return self._broadcast_uniform_partitioned_dimension(axis, lengths) + + # Inner dimensions: + else: + if lengths_is_scalar: + return self._broadcast_inner_dimension_to_uniform(axis, lengths) + else: + if axis == 0: + raise ValueError('Unable to broadcast: ' + 'outermost dimension must be uniform.') + return self._broadcast_inner_dimension_to_ragged(axis, lengths) + + def num_slices_in_dimension(self, axis): + """Returns the total number of slices across the indicated dimension.""" + if axis < 0: + return constant_op.constant(1, dtype=self.dim_size_dtype) + elif self.is_ragged(axis): + return math_ops.reduce_sum(self._partitioned_dim_sizes[axis]) + else: + return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1) + + def _broadcast_uniform_partitioned_dimension(self, axis, lengths): + """Broadcasts the partitioned dimension `axis` to match `lengths`.""" + axis_dim_size = self.dimension_size(axis) + partitioned_sizes = list(self._partitioned_dim_sizes[:axis]) + + if lengths.shape.ndims == 0: + lengths = array_ops.where( + math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size) + repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1) + splits = array_ops_stack.stack([0, self.num_slices_in_dimension(axis)]) + else: + splits = math_ops.range( + array_ops.size(lengths, out_type=self.dim_size_dtype) + 1) + repeats = lengths + + partitioned_sizes.append(lengths) + + for dim_size in self._partitioned_dim_sizes[axis + 1:]: + if dim_size.shape.ndims == 0: + partitioned_sizes.append(dim_size) + splits *= dim_size + else: + partitioned_sizes.append( + ragged_util.repeat_ranges(dim_size, splits, repeats)) + splits = array_ops.gather( + ragged_util.lengths_to_splits(dim_size), splits) + inner_sizes = self._inner_dim_sizes + return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, + self.dim_size_dtype) + + def _broadcast_inner_dimension_to_uniform(self, axis, length): + """Broadcasts the inner dimension `axis` to match `lengths`.""" + dim_size = self.dimension_size(axis) + axis_in_inner_dims = axis - self.num_partitioned_dimensions + partitioned_sizes = self._partitioned_dim_sizes + inner_sizes = array_ops.concat([ + self._inner_dim_sizes[:axis_in_inner_dims], + [array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)], + self._inner_dim_sizes[axis_in_inner_dims + 1:] + ], + axis=0) + return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, + self.dim_size_dtype) + + def _broadcast_inner_dimension_to_ragged(self, axis, lengths): + axis_in_inner_dims = axis - self.num_partitioned_dimensions + partitioned_sizes = ( + self._partitioned_dim_sizes + tuple([ + self._inner_dim_sizes[i] for i in range(axis_in_inner_dims) + ]) + (lengths,)) + inner_sizes = self._inner_dim_sizes[axis_in_inner_dims + 1:] + return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes) + + def with_dim_size_dtype(self, dtype): + if dtype not in (dtypes.int32, dtypes.int64): + raise ValueError('dtype must be int32 or int64') + if self.dim_size_dtype == dtype: + return self + return RaggedTensorDynamicShape( + [math_ops.cast(p, dtype) for p in self._partitioned_dim_sizes], + math_ops.cast(self._inner_dim_sizes, dtype)) + + +def broadcast_dynamic_shape(shape_x, shape_y): + """Returns the shape formed by broadcasting two shapes to be compatible. + + Args: + shape_x: A `RaggedTensorDynamicShape` + shape_y: A `RaggedTensorDynamicShape` + + Returns: + A `RaggedTensorDynamicShape`. + Raises: + ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. + """ + if not isinstance(shape_x, RaggedTensorDynamicShape): + raise TypeError('shape_x must be a RaggedTensorDynamicShape') + if not isinstance(shape_y, RaggedTensorDynamicShape): + raise TypeError('shape_y must be a RaggedTensorDynamicShape') + + # Broadcast both shapes to have the same rank. + if shape_x.rank is None or shape_y.rank is None: + raise ValueError('Unable to broadcast: unknown rank') + broadcast_rank = max(shape_x.rank, shape_y.rank) + shape_x = shape_x.broadcast_to_rank(broadcast_rank) + shape_y = shape_y.broadcast_to_rank(broadcast_rank) + + # Broadcast dimensions one at a time, starting from the outermost dimension. + for axis in range(broadcast_rank): + shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis)) + shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis)) + + return shape_x + + +def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True): + """Broadcasts a potentially ragged tensor to a ragged shape. + + Tiles `rt_input` as necessary to match the given shape. + + Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`. + + Args: + rt_input: The potentially ragged tensor to broadcast. + shape: A `RaggedTensorDynamicShape` + broadcast_inner_dimensions: If false, then inner dimensions will not be + tiled. + + Returns: + A potentially ragged tensor whose values are taken from + `rt_input`, and whose shape matches `shape`. + """ + if not isinstance(shape, RaggedTensorDynamicShape): + raise TypeError('shape must be a RaggedTensorDynamicShape') + rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) + + # Broadcasting to a uniform shape. + if shape.num_partitioned_dimensions == 0: + return _broadcast_to_uniform_shape(rt_input, shape, + broadcast_inner_dimensions) + else: + return _broadcast_to_ragged_shape(rt_input, shape, + broadcast_inner_dimensions) + + +def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions): + """Broadcasts rt_input to the uniform shape `shape`.""" + if isinstance(rt_input, ragged_tensor.RaggedTensor): + raise ValueError('Incompatible with shape: ragged rank mismatch') + if broadcast_inner_dimensions: + return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes) + else: + return rt_input + + +def _broadcast_to_ragged_shape(rt_input, dst_shape, broadcast_inner_dimensions): + """Broadcasts rt_input to the ragged shape `dst_shape`.""" + # Check that rt_input and dst_shape have the same row_splits dtype. + if (isinstance(rt_input, ragged_tensor.RaggedTensor) and + rt_input.row_splits.dtype != dst_shape.dim_size_dtype): + if not ragged_config.auto_cast_partition_dtype(): + raise ValueError('rt_input and dst_shape have different row_split ' + 'dtypes; use RaggedTensor.with_row_splits_dtype() or ' + 'RaggedTensorDynamicShape.with_dim_size_dtype() to ' + 'convert to a compatible dtype.') + rt_input = rt_input.with_row_splits_dtype(dtypes.int64) + dst_shape = dst_shape.with_dim_size_dtype(dtypes.int64) + + # dst_shape's rank and ragged_rank must be greater than or equal to rt_input's + if rt_input.shape.ndims is None or dst_shape.rank is None: + raise ValueError('Unable to broadcast: unknown rank') + if rt_input.shape.ndims > dst_shape.rank: + raise ValueError('Incompatible with shape: rank mismatch') + if (isinstance(rt_input, ragged_tensor.RaggedTensor) and + rt_input.ragged_rank >= dst_shape.num_partitioned_dimensions): + raise ValueError('Incompatible with shape: ragged rank mismatch') + + src_shape = RaggedTensorDynamicShape.from_tensor(rt_input) + src_shape = src_shape.broadcast_to_rank(dst_shape.rank) + + # Add dimensions to rt_input so its rank and ragged_rank matches dst_shape. + if dst_shape.rank > rt_input.shape.ndims: + if rt_input.shape.ndims < dst_shape.num_inner_dimensions + 1: + rt_input = array_ops.reshape( + rt_input, array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0)) + for _ in range(dst_shape.rank - rt_input.shape.ndims): + if ragged_tensor.is_ragged(rt_input): + nrows = rt_input.nrows() + else: + nrows = array_ops.shape(rt_input, + out_type=dst_shape.dim_size_dtype)[0] + rt_input = ragged_tensor.RaggedTensor.from_row_lengths(rt_input, [nrows], + validate=False) + + # Add ragged dimensions to match dst_shape. + if ragged_tensor.is_ragged(rt_input): + inner_rank_diff = ( + rt_input.flat_values.shape.ndims - 1 - dst_shape.num_inner_dimensions) + if inner_rank_diff > 0: + rt_input = rt_input.with_flat_values( + ragged_tensor.RaggedTensor.from_tensor( + rt_input.flat_values, ragged_rank=inner_rank_diff, + row_splits_dtype=dst_shape.dim_size_dtype)) + else: + rt_input = ragged_tensor.RaggedTensor.from_tensor( + rt_input, ragged_rank=dst_shape.num_partitioned_dimensions - 1, + row_splits_dtype=dst_shape.dim_size_dtype) + + # Do broadcasting for any dimensions that will remain uniform. We can do + # these all at once, since they're independent of one another. + multiples = [1] * dst_shape.rank + for axis in range(dst_shape.num_partitioned_dimensions): + if not src_shape.is_ragged(axis) and not dst_shape.is_ragged(axis): + src_size = src_shape.dimension_size(axis) + dst_size = dst_shape.dimension_size(axis) + if ((tensor_util.constant_value(src_size) in (1, None)) and + (tensor_util.constant_value(dst_size) != 1)): + multiples[axis] = array_ops.where( + math_ops.equal(src_size, 1), dst_size, 1) + if not all(isinstance(v, int) and v == 1 for v in multiples): + multiples = array_ops_stack.stack(multiples, axis=0) + rt_input = ragged_array_ops.tile(rt_input, multiples) + + if broadcast_inner_dimensions: + new_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape( + rt_input.flat_values, out_type=dst_shape.dim_size_dtype), + array_ops.concat([[1], dst_shape.inner_dim_sizes], axis=0)) + rt_input = rt_input.with_flat_values( + array_ops.broadcast_to(rt_input.flat_values, new_shape)) + + # Do broadcasting for dimensions that become ragged. We must do these from + # outermost to innermost. + for axis in range(dst_shape.num_partitioned_dimensions): + if not src_shape.is_ragged(axis) and dst_shape.is_ragged(axis): + dst_size = dst_shape.dimension_size(axis) + rt_input = _ragged_tile_axis(rt_input, axis, dst_size, + dst_shape.dim_size_dtype) + + return rt_input + + +def _ragged_tile_axis(rt_input, axis, repeats, row_splits_dtype): + """Tile a dimension of a RaggedTensor to match a ragged shape.""" + assert axis > 0 # Outermost dimension may not be ragged. + + if not ragged_tensor.is_ragged(rt_input): + rt_input = ragged_tensor.RaggedTensor.from_tensor( + rt_input, ragged_rank=1, row_splits_dtype=row_splits_dtype) + + if axis > 1: + return rt_input.with_values( + _ragged_tile_axis(rt_input.values, axis - 1, repeats, + row_splits_dtype)) + else: + src_row_splits = rt_input.nested_row_splits + src_row_lengths = rt_input.nested_row_lengths() + splits = src_row_splits[0] + + dst_row_lengths = [repeats] + for i in range(1, len(src_row_lengths)): + dst_row_lengths.append( + ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats)) + splits = array_ops.gather(src_row_splits[i], splits) + dst_values = ragged_util.repeat_ranges(rt_input.flat_values, splits, + repeats) + return ragged_tensor.RaggedTensor.from_nested_row_lengths( + dst_values, dst_row_lengths, validate=False) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_test_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..05c978e1deccd2fdfaad9c371bb3a6a3eca75229 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_test_ops.py @@ -0,0 +1,185 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""It lists ops of RaggedTensor for the interest of test.""" + +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import gen_bitwise_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_impl +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import parsing_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops import string_ops + + +# Constants listing various op types to test. Each operation +# should be included in at least one list below, or tested separately if +# necessary (e.g., because it expects additional arguments). +UNARY_FLOAT_OPS = [ + math_ops.abs, + math_ops.acos, + math_ops.acosh, + math_ops.angle, + math_ops.asin, + math_ops.asinh, + math_ops.atan, + math_ops.atanh, + math_ops.ceil, + math_ops.conj, + math_ops.cos, + math_ops.cosh, + math_ops.digamma, + math_ops.erf, + math_ops.erfc, + math_ops.erfcinv, + math_ops.erfinv, + math_ops.exp, + math_ops.expm1, + math_ops.floor, + math_ops.imag, + math_ops.is_finite, + math_ops.is_inf, + math_ops.is_nan, + math_ops.lgamma, + math_ops.log, + math_ops.log1p, + math_ops.log_sigmoid, + math_ops.ndtri, + math_ops.negative, + math_ops.real, + math_ops.reciprocal, + math_ops.reciprocal_no_nan, + math_ops.rint, + math_ops.round, + math_ops.rsqrt, + math_ops.sign, + math_ops.sigmoid, + math_ops.sin, + math_ops.sinh, + math_ops.softplus, + math_ops.sqrt, + math_ops.square, + math_ops.tan, + math_ops.tanh, + nn_ops.elu, + nn_ops.gelu, + nn_ops.leaky_relu, + nn_ops.log_softmax, + nn_ops.relu, + nn_ops.relu6, + nn_ops.selu, + nn_ops.softsign, + nn_impl.swish, + array_ops.ones_like, + array_ops.ones_like_v2, + array_ops.zeros_like, + array_ops.zeros_like_v2, + special_math_ops.bessel_i0, + special_math_ops.bessel_i0e, + special_math_ops.bessel_i1, + special_math_ops.bessel_j0, + special_math_ops.bessel_j1, + special_math_ops.bessel_i1e, + special_math_ops.bessel_k0, + special_math_ops.bessel_k0e, + special_math_ops.bessel_k1, + special_math_ops.bessel_k1e, + special_math_ops.bessel_y0, + special_math_ops.bessel_y1, + special_math_ops.dawsn, + special_math_ops.expint, + special_math_ops.fresnel_cos, + special_math_ops.fresnel_sin, + special_math_ops.spence, + string_ops.as_string, +] +UNARY_BOOL_OPS = [ + math_ops.logical_not, +] +UNARY_STRING_OPS = [ + string_ops.decode_base64, + string_ops.encode_base64, + string_ops.string_strip, + string_ops.string_lower, + string_ops.string_upper, + string_ops.string_length, + string_ops.string_length_v2, + parsing_ops.decode_compressed, +] +BINARY_FLOAT_OPS = [ + math_ops.add, + math_ops.atan2, + math_ops.complex, + math_ops.div, + math_ops.div_no_nan, + math_ops.divide, + math_ops.equal, + math_ops.floor_div, + math_ops.floordiv, + math_ops.floormod, + math_ops.greater, + math_ops.greater_equal, + math_ops.less, + math_ops.less_equal, + math_ops.maximum, + math_ops.minimum, + math_ops.multiply, + math_ops.multiply_no_nan, + math_ops.not_equal, + math_ops.pow, + math_ops.realdiv, + math_ops.squared_difference, + math_ops.subtract, + math_ops.truediv, + math_ops.xdivy, + math_ops.xlog1py, + math_ops.xlogy, + math_ops.zeta, +] +BINARY_BOOL_OPS = [ + math_ops.logical_and, + math_ops.logical_or, + math_ops.logical_xor, +] +UNARY_INT_OPS = [ + gen_bitwise_ops.invert, + string_ops.unicode_script, +] +BINARY_INT_OPS = [ + gen_bitwise_ops.bitwise_and, + gen_bitwise_ops.bitwise_or, + gen_bitwise_ops.bitwise_xor, + gen_bitwise_ops.left_shift, + gen_bitwise_ops.right_shift, + math_ops.truncatediv, + math_ops.truncatemod, +] +BINARY_ASSERT_OPS = [ + check_ops.assert_equal, + check_ops.assert_equal_v2, + check_ops.assert_near, + check_ops.assert_near_v2, + check_ops.assert_none_equal, + check_ops.assert_none_equal_v2, + check_ops.assert_greater, + check_ops.assert_greater_v2, + check_ops.assert_greater_equal, + check_ops.assert_greater_equal_v2, + check_ops.assert_less, + check_ops.assert_less_v2, + check_ops.assert_less_equal, + check_ops.assert_less_equal_v2, +] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_value.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_value.py new file mode 100644 index 0000000000000000000000000000000000000000..638dc5207ee632a47ea264252a954c7413f4ae21 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_tensor_value.py @@ -0,0 +1,114 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Value for RaggedTensor.""" + +import numpy as np + +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["ragged.RaggedTensorValue"]) +@dispatch.register_dispatchable_type +class RaggedTensorValue: + """Represents the value of a `RaggedTensor`. + + Warning: `RaggedTensorValue` should only be used in graph mode; in + eager mode, the `tf.RaggedTensor` class contains its value directly. + + See `tf.RaggedTensor` for a description of ragged tensors. + """ + + def __init__(self, values, row_splits): + """Creates a `RaggedTensorValue`. + + Args: + values: A numpy array of any type and shape; or a RaggedTensorValue. + row_splits: A 1-D int32 or int64 numpy array. + """ + if not (isinstance(row_splits, (np.ndarray, np.generic)) and + row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1): + raise TypeError("row_splits must be a 1D int32 or int64 numpy array") + if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)): + raise TypeError("values must be a numpy array or a RaggedTensorValue") + if (isinstance(values, RaggedTensorValue) and + row_splits.dtype != values.row_splits.dtype): + raise ValueError("row_splits and values.row_splits must have " + "the same dtype") + self._values = values + self._row_splits = row_splits + + row_splits = property( + lambda self: self._row_splits, + doc="""The split indices for the ragged tensor value.""") + values = property( + lambda self: self._values, + doc="""The concatenated values for all rows in this tensor.""") + dtype = property( + lambda self: self._values.dtype, + doc="""The numpy dtype of values in this tensor.""") + + @property + def flat_values(self): + """The innermost `values` array for this ragged tensor value.""" + rt_values = self.values + while isinstance(rt_values, RaggedTensorValue): + rt_values = rt_values.values + return rt_values + + @property + def nested_row_splits(self): + """The row_splits for all ragged dimensions in this ragged tensor value.""" + rt_nested_splits = [self.row_splits] + rt_values = self.values + while isinstance(rt_values, RaggedTensorValue): + rt_nested_splits.append(rt_values.row_splits) + rt_values = rt_values.values + return tuple(rt_nested_splits) + + @property + def ragged_rank(self): + """The number of ragged dimensions in this ragged tensor value.""" + values_is_ragged = isinstance(self._values, RaggedTensorValue) + return self._values.ragged_rank + 1 if values_is_ragged else 1 + + @property + def shape(self): + """A tuple indicating the shape of this RaggedTensorValue.""" + return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:] + + @property + def _nested_row_partitions(self): + """The row_partitions representing this shape.""" + return [RowPartition.from_row_splits(rs) for rs in self.nested_row_splits] + + def __str__(self): + return "" % self.to_list() + + def __repr__(self): + return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values, + self._row_splits) + + def to_list(self): + """Returns this ragged tensor value as a nested Python list.""" + if isinstance(self._values, RaggedTensorValue): + values_as_list = self._values.to_list() + else: + values_as_list = self._values.tolist() + return [ + values_as_list[self._row_splits[i]:self._row_splits[i + 1]] + for i in range(len(self._row_splits) - 1) + ] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_where_op.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_where_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f638d51ea0b3a42d48bfd7d3a348632811853fd5 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/ragged_where_op.py @@ -0,0 +1,259 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""where operation for RaggedTensors.""" + +import typing + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_concat_ops +from tensorflow.python.ops.ragged import ragged_functional_ops +from tensorflow.python.ops.ragged import ragged_gather_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged import ragged_tensor_shape +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(array_ops.where_v2) +def where_v2(condition: ragged_tensor.RaggedOrDense, + x: typing.Optional[ragged_tensor.RaggedOrDense] = None, + y: typing.Optional[ragged_tensor.RaggedOrDense] = None, + name=None): + """Return the elements where `condition` is `True`. + + : If both `x` and `y` are None: Retrieve indices of true elements. + + Returns the coordinates of true elements of `condition`. The coordinates + are returned in a 2-D tensor with shape + `[num_true_values, dim_size(condition)]`, where `result[i]` is the + coordinates of the `i`th true value (in row-major order). + + : If both `x` and `y` are non-`None`: Multiplex between `x` and `y`. + + Choose an output shape from the shapes of `condition`, `x`, and `y` that + all three shapes are broadcastable to; and then use the broadcasted + `condition` tensor as a mask that chooses whether the corredsponding element + in the output should be taken from `x` (if `condition` is true) or `y` (if + `condition` is false). + + >>> # Example: retrieve indices of true elements + >>> tf.where(tf.ragged.constant([[True, False], [True]])) + + + >>> # Example: multiplex between `x` and `y` + >>> tf.where(tf.ragged.constant([[True, False], [True, False, True]]), + ... tf.ragged.constant([['A', 'B'], ['C', 'D', 'E']]), + ... tf.ragged.constant([['a', 'b'], ['c', 'd', 'e']])) + + + Args: + condition: A potentially ragged tensor of type `bool` + x: A potentially ragged tensor (optional). + y: A potentially ragged tensor (optional). Must be specified if `x` is + specified. Must have the same rank and type as `x`. + name: A name of the operation (optional). + + Returns: + : If both `x` and `y` are `None`: + A `Tensor` with shape `(num_true, rank(condition))`. + : Otherwise: + A potentially ragged tensor with the same type as `x` and `y`, and whose + shape is broadcast-compatible with `x`, `y`, and `condition`. + + Raises: + ValueError: When exactly one of `x` or `y` is non-`None`; or when + `condition`, `x`, and `y` have incompatible shapes. + """ + if (x is None) != (y is None): + raise ValueError('x and y must be either both None or both non-None') + + with ops.name_scope('RaggedWhere', name, [condition, x, y]): + condition = ragged_tensor.convert_to_tensor_or_ragged_tensor( + condition, name='condition') + if x is None: + return _coordinate_where(condition) + else: + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y') + condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y) + return _elementwise_where_v2(condition, x, y) + + +@dispatch.dispatch_for_api(array_ops.where) +def where(condition: ragged_tensor.RaggedOrDense, + x: typing.Optional[ragged_tensor.RaggedOrDense] = None, + y: typing.Optional[ragged_tensor.RaggedOrDense] = None, + name=None): + """Return the elements, either from `x` or `y`, depending on the `condition`. + + : If both `x` and `y` are `None`: + Returns the coordinates of true elements of `condition`. The coordinates + are returned in a 2-D tensor with shape + `[num_true_values, dim_size(condition)]`, where `result[i]` is the + coordinates of the `i`th true value (in row-major order). + + : If both `x` and `y` are non-`None`: + Returns a tensor formed by selecting values from `x` where condition is + true, and from `y` when condition is false. In particular: + + : If `condition`, `x`, and `y` all have the same shape: + + * `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true. + * `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false. + + : Otherwise: + + * `condition` must be a vector. + * `x` and `y` must have the same number of dimensions. + * The outermost dimensions of `condition`, `x`, and `y` must all have the + same size. + * `result[i] = x[i]` if `condition[i]` is true. + * `result[i] = y[i]` if `condition[i]` is false. + + Args: + condition: A potentially ragged tensor of type `bool` + x: A potentially ragged tensor (optional). + y: A potentially ragged tensor (optional). Must be specified if `x` is + specified. Must have the same rank and type as `x`. + name: A name of the operation (optional) + + Returns: + : If both `x` and `y` are `None`: + A `Tensor` with shape `(num_true, dim_size(condition))`. + : Otherwise: + A potentially ragged tensor with the same type, rank, and outermost + dimension size as `x` and `y`. + `result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`. + + Raises: + ValueError: When exactly one of `x` or `y` is non-`None`; or when + `condition`, `x`, and `y` have incompatible shapes. + + #### Examples: + + >>> # Coordinates where condition is true. + >>> condition = tf.ragged.constant([[True, False, True], [False, True]]) + >>> print(where(condition)) + tf.Tensor( [[0 0] [0 2] [1 1]], shape=(3, 2), dtype=int64) + + >>> # Elementwise selection between x and y, based on condition. + >>> condition = tf.ragged.constant([[True, False, True], [False, True]]) + >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']]) + >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']]) + >>> print(where(condition, x, y)) + + + >>> # Row selection between x and y, based on condition. + >>> condition = [True, False] + >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']]) + >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']]) + >>> print(where(condition, x, y)) + + """ + if (x is None) != (y is None): + raise ValueError('x and y must be either both None or both non-None') + with ops.name_scope('RaggedWhere', name, [condition, x, y]): + condition = ragged_tensor.convert_to_tensor_or_ragged_tensor( + condition, name='condition') + if x is None: + return _coordinate_where(condition) + else: + x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') + y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y') + condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y) + return _elementwise_where(condition, x, y) + + +def _elementwise_where(condition, x, y): + """Ragged version of tf.where(condition, x, y).""" + condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor) + x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor) + y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor) + + if not (condition_is_ragged or x_is_ragged or y_is_ragged): + return array_ops.where(condition, x, y) + + elif condition_is_ragged and x_is_ragged and y_is_ragged: + return ragged_functional_ops.map_flat_values(array_ops.where, condition, x, + y) + elif not condition_is_ragged: + # Concatenate x and y, and then use `gather` to assemble the selected rows. + condition.shape.assert_has_rank(1) + x_and_y = ragged_concat_ops.concat([x, y], axis=0) + x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype) + y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype) + indices = array_ops.where(condition, math_ops.range(x_nrows), + x_nrows + math_ops.range(y_nrows)) + return ragged_gather_ops.gather(x_and_y, indices) + + else: + raise ValueError('Input shapes do not match.') + + +def _elementwise_where_v2(condition, x, y): + """Ragged version of tf.where_v2(condition, x, y).""" + # Broadcast x, y, and condition to have the same shape. + if not (condition.shape.is_fully_defined() and x.shape.is_fully_defined() and + y.shape.is_fully_defined() and x.shape == y.shape and + condition.shape == x.shape): + shape_c = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( + condition) + shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x) + shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y) + shape = ragged_tensor_shape.broadcast_dynamic_shape( + shape_c, ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y)) + condition = ragged_tensor_shape.broadcast_to(condition, shape) + x = ragged_tensor_shape.broadcast_to(x, shape) + y = ragged_tensor_shape.broadcast_to(y, shape) + + condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor) + x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor) + y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor) + if not (condition_is_ragged or x_is_ragged or y_is_ragged): + return array_ops.where_v2(condition, x, y) + + return ragged_functional_ops.map_flat_values(array_ops.where_v2, condition, x, + y) + + +def _coordinate_where(condition): + """Ragged version of tf.where(condition).""" + if not isinstance(condition, ragged_tensor.RaggedTensor): + return array_ops.where(condition) + + # The coordinate for each `true` value in condition.values. + selected_coords = _coordinate_where(condition.values) + + # Convert the first index in each coordinate to a row index and column index. + condition = condition.with_row_splits_dtype(selected_coords.dtype) + first_index = selected_coords[:, 0] + selected_rows = array_ops.gather(condition.value_rowids(), first_index) + selected_row_starts = array_ops.gather(condition.row_splits, selected_rows) + selected_cols = first_index - selected_row_starts + + # Assemble the row & column index with the indices for inner dimensions. + return array_ops.concat([ + array_ops.expand_dims(selected_rows, 1), + array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:] + ], + axis=1) + + +def _nrows(rt_input, out_type): + if isinstance(rt_input, ragged_tensor.RaggedTensor): + return rt_input.nrows(out_type=out_type) + else: + return array_ops.shape(rt_input, out_type=out_type)[0] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/row_partition.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/row_partition.py new file mode 100644 index 0000000000000000000000000000000000000000..43d739cc0cc4198fe2cc8effc8445263212086fc --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/row_partition.py @@ -0,0 +1,1495 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A class used to partition a sequence into contiguous subsequences ("rows"). +""" + + +# TODO(edloper): Make into a ExtensionType (if possible) + + +import numpy as np + +from tensorflow.core.protobuf import struct_pb2 +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import type_spec +from tensorflow.python.framework import type_spec_registry +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_ragged_math_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import segment_id_ops +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.util.tf_export import tf_export + +# =============================================================================== +# RowPartition +# =============================================================================== +# TODO(edloper): Consider removing row_starts and row_limits factory methods +# and accessors from RowPartition. In particular, these two encodings are +# "second-class citizens": we never cache them, and if you do construct a +# RowPartition from them then it may be more expensive than you might expect +# (because we append a value to the beginning/end to transform them into +# splits). If we do remove them from RowPartition, then we would still keep +# the from_row_starts and from_row_limits factory methods in RaggedTensor. + + +@tf_export("experimental.RowPartition") +class RowPartition(composite_tensor.CompositeTensor): + """Partitioning of a sequence of values into contiguous subsequences ("rows"). + + A `RowPartition` describes how a sequence with `nvals` items should be + divided into `nrows` contiguous subsequences ("rows"). For example, a + `RowPartition` could be used to partition the vector `[1, 2, 3, 4, 5]` into + subsequences `[[1, 2], [3], [], [4, 5]]`. Note that `RowPartition` stores + information about how values are partitioned, but does not include the + partitioned values themselves. `tf.RaggedTensor` is used to pair a `values` + tensor with one or more `RowPartition`s, providing a complete encoding for a + ragged tensor (i.e. a tensor with variable-length dimensions). + + `RowPartition`s may be defined using several different schemes: + + * `row_lengths`: an integer vector with shape `[nrows]`, which specifies + the length of each row. + + * `row_splits`: an integer vector with shape `[nrows+1]`, specifying the + "split points" between each row. + + * `row_starts`: an integer vector with shape `[nrows]`, which specifies + the start offset for each row. Equivalent to `row_splits[:-1]`. + + * `row_limits`: an integer vector with shape `[nrows]`, which specifies + the stop offset for each row. Equivalent to `row_splits[1:]`. + + * `value_rowids` is an integer vector with shape `[nvals]`, corresponding + one-to-one with sequence values, which specifies the row that each value + belongs to. If the partition has empty trailing rows, then `nrows` + must also be specified. + + * `uniform_row_length` is an integer scalar, specifying the length of every + row. This scheme may only be used if all rows have the same length. + + For example, the following `RowPartition`s all represent the partitioning of + 8 values into 5 sublists as follows: `[[*, *, *, *], [], [*, *, *], [*], []]`. + + >>> p1 = RowPartition.from_row_lengths([4, 0, 3, 1, 0]) + >>> p2 = RowPartition.from_row_splits([0, 4, 4, 7, 8, 8]) + >>> p3 = RowPartition.from_row_starts([0, 4, 4, 7, 8], nvals=8) + >>> p4 = RowPartition.from_row_limits([4, 4, 7, 8, 8]) + >>> p5 = RowPartition.from_value_rowids([0, 0, 0, 0, 2, 2, 2, 3], nrows=5) + + For more information about each scheme, see the documentation for the + its factory method. For additional examples, see the documentation on + `tf.RaggedTensor`. + + ### Precomputed Encodings + + `RowPartition` always stores at least one encoding of the partitioning, but + it can be configured to cache additional encodings as well. This can + avoid unnecessary recomputation in eager mode. (In graph mode, optimizations + such as common subexpression elimination will typically prevent these + unnecessary recomputations.) To check which encodings are precomputed, use + `RowPartition.has_precomputed_`. To cache an additional + encoding, use `RowPartition.with_precomputed_`. + """ + + # ============================================================================= + # Constructor (private) + # ============================================================================= + def __init__(self, + row_splits, + row_lengths=None, + value_rowids=None, + nrows=None, + uniform_row_length=None, + nvals=None, + internal=False): + """Creates a `RowPartition` from the specified encoding tensor(s). + + This constructor is private -- please use one of the following ops to + build `RowPartition`s: + + * `RowPartition.from_row_lengths` + * `RowPartition.from_value_rowids` + * `RowPartition.from_row_splits` + * `RowPartition.from_row_starts` + * `RowPartition.from_row_limits` + * `RowPartition.from_uniform_row_length` + + If row_splits is has a constant value, then all other arguments should + have a constant value. + + Args: + row_splits: A 1-D integer tensor with shape `[nrows+1]`. + row_lengths: A 1-D integer tensor with shape `[nrows]` + value_rowids: A 1-D integer tensor with shape `[nvals]`. + nrows: A 1-D integer scalar tensor. + uniform_row_length: A scalar tensor. + nvals: A scalar tensor. + internal: Private key value, required to ensure that this private + constructor is *only* called from the factory methods. + + Raises: + TypeError: If a row partitioning tensor has an inappropriate dtype. + TypeError: If exactly one row partitioning argument was not specified. + ValueError: If a row partitioning tensor has an inappropriate shape. + ValueError: If multiple partitioning arguments are specified. + ValueError: If nrows is specified but value_rowids is not None. + """ + if internal is not _row_partition_factory_key: + raise ValueError("RowPartition constructor is private; please use one " + "of the factory methods instead (e.g., " + "RowPartition.from_row_lengths())") + + # Validate the arguments. + if not isinstance(row_splits, tensor_lib.Tensor): + raise TypeError("Row-partitioning argument must be a Tensor, got %r" % + row_splits) + if row_splits.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("Row-partitioning argument must be int32 or int64") + + # Validate shapes & dtypes. + row_splits.shape.assert_has_rank(1) + row_splits.set_shape([None]) + self._row_splits = row_splits + + # Store any cached tensors. These are used to avoid unnecessary + # round-trip conversions when a RowPartition is constructed from + # lengths or rowids, and we later want those lengths/rowids back. + for tensor in [row_lengths, value_rowids, nrows, uniform_row_length, nvals]: + if tensor is not None: + if not isinstance(tensor, tensor_lib.Tensor): + raise TypeError("Cached value must be a Tensor or None.") + elif tensor.dtype != row_splits.dtype: + raise ValueError(f"Inconsistent dtype for encoding tensors: " + f"{tensor} vs {row_splits}") + self._row_lengths = row_lengths + self._value_rowids = value_rowids + self._nrows = nrows + self._uniform_row_length = uniform_row_length + self._nvals = nvals + + # ============================================================================= + # Factory Methods + # ============================================================================= + + @classmethod + def from_value_rowids(cls, + value_rowids, + nrows=None, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `value_rowids`. + + This `RowPartition` divides a sequence `values` into rows by specifying + which row each value should be added to: + + ```python + partitioned_rows = [[] for _ in nrows] + for (value, rowid) in zip(values, value_rowids): + partitioned_rows[rowid].append(value) + ``` + + Args: + value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds + one-to-one with `values`, and specifies each value's row index. Must be + nonnegative, and must be sorted in ascending order. + nrows: An integer scalar specifying the number of rows. This should be + specified if the `RowPartition` may containing empty training rows. Must + be greater than `value_rowids[-1]` (or greater than or equal to zero if + `value_rowids` is empty). Defaults to `value_rowids[-1] + 1` (or zero if + `value_rowids` is empty). + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `value_rowids`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + + Raises: + ValueError: If `nrows` is incompatible with `value_rowids`. + + #### Example: + + >>> print(RowPartition.from_value_rowids( + ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], + ... nrows=4)) + tf.RowPartition(row_splits=[0 4 4 7 8]) + """ + # Local import bincount_ops to avoid import-cycle since bincount_ops + # imports ragged_tensor. + from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromValueRowIds", + [value_rowids, nrows]): + value_rowids = cls._convert_row_partition( + value_rowids, "value_rowids", dtype_hint=dtype_hint, dtype=dtype) + if nrows is None: + const_rowids = tensor_util.constant_value(value_rowids) + if const_rowids is None: + nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1 + const_nrows = None + else: + const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0 + nrows = ops.convert_to_tensor( + const_nrows, value_rowids.dtype, name="nrows") + else: + nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows") + const_nrows = tensor_util.constant_value(nrows) + if const_nrows is not None: + if const_nrows < 0: + raise ValueError("Expected nrows >= 0; got %d" % const_nrows) + const_rowids = tensor_util.constant_value(value_rowids) + if const_rowids is not None and const_rowids.size > 0: + if not const_nrows >= const_rowids[-1] + 1: + raise ValueError( + "Expected nrows >= value_rowids[-1] + 1; got nrows=%d, " + "value_rowids[-1]=%d" % (const_nrows, const_rowids[-1])) + + value_rowids.shape.assert_has_rank(1) + nrows.shape.assert_has_rank(0) + + if validate: + msg = ("Arguments to from_value_rowids do not form a valid " + "RowPartition") + checks = [ + check_ops.assert_rank(value_rowids, 1, message=msg), + check_ops.assert_rank(nrows, 0, message=msg), + check_ops.assert_non_negative(value_rowids[:1], message=msg), + _assert_monotonic_increasing(value_rowids, message=msg), + check_ops.assert_less(value_rowids[-1:], nrows, message=msg), + ] + value_rowids = control_flow_ops.with_dependencies(checks, value_rowids) + + # Convert value_rowids & nrows to row_splits. + # Note: we don't use segment_ids_to_row_splits() here because we want + # to save the intermediate value `row_lengths`, so we can cache it. + # TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the + # cast. + value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32) + nrows_int32 = math_ops.cast(nrows, dtypes.int32) + row_lengths = bincount_ops.bincount( + value_rowids_int32, + minlength=nrows_int32, + maxlength=nrows_int32, + dtype=value_rowids.dtype) + row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) + if const_nrows is not None: + row_lengths.set_shape([const_nrows]) + row_splits.set_shape([const_nrows + 1]) + + return cls( + row_splits=row_splits, + row_lengths=row_lengths, + value_rowids=value_rowids, + nrows=nrows, + internal=_row_partition_factory_key) + + @classmethod + def from_row_splits(cls, + row_splits, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_splits`. + + This `RowPartition` divides a sequence `values` into rows by indicating + where each row begins and ends: + + ```python + partitioned_rows = [] + for i in range(len(row_splits) - 1): + row_start = row_splits[i] + row_end = row_splits[i + 1] + partitioned_rows.append(values[row_start:row_end]) + ``` + + Args: + row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be + empty, and must be sorted in ascending order. `row_splits[0]` must be + zero. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_splits`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + + Raises: + ValueError: If `row_splits` is an empty list. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + if isinstance(row_splits, (list, tuple)) and not row_splits: + raise ValueError("row_splits tensor may not be empty.") + if isinstance(row_splits, tensor_lib.TensorSpec): + return cls(row_splits=row_splits, internal=_row_partition_factory_key) + + with ops.name_scope(None, "RowPartitionFromRowSplits", [row_splits]): + row_splits = cls._convert_row_partition( + row_splits, "row_splits", dtype_hint=dtype_hint, dtype=dtype) + row_splits.shape.assert_has_rank(1) + + if validate: + msg = "Arguments to from_row_splits do not form a valid RaggedTensor:" + checks = [ + check_ops.assert_rank(row_splits, 1, message=(msg + "rank")), + _assert_zero(row_splits[0], message=(msg + "zero")), + _assert_monotonic_increasing( + row_splits, message=(msg + "monotonic")), + ] + row_splits = control_flow_ops.with_dependencies(checks, row_splits) + + return cls(row_splits=row_splits, internal=_row_partition_factory_key) + + @classmethod + def from_row_lengths(cls, + row_lengths, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_lengths`. + + This `RowPartition` divides a sequence `values` into rows by indicating + the length of each row: + + ```python + partitioned_rows = [[values.pop(0) for _ in range(length)] + for length in row_lengths] + ``` + + Args: + row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be + nonnegative. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_lengths`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromRowLengths", [row_lengths]): + row_lengths = cls._convert_row_partition( + row_lengths, "row_lengths", dtype_hint=dtype_hint, dtype=dtype) + row_lengths.shape.assert_has_rank(1) + + if validate: + msg = "Arguments to from_row_lengths do not form a valid RowPartition" + checks = [ + check_ops.assert_rank(row_lengths, 1, message=msg), + check_ops.assert_non_negative(row_lengths, message=msg), + ] + row_lengths = control_flow_ops.with_dependencies(checks, row_lengths) + + row_limits = math_ops.cumsum(row_lengths) + row_splits = array_ops.concat([[0], row_limits], axis=0) + return cls( + row_splits=row_splits, + row_lengths=row_lengths, + internal=_row_partition_factory_key) + + @classmethod + def from_row_starts(cls, + row_starts, + nvals, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_starts`. + + Equivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`. + + Args: + row_starts: A 1-D integer tensor with shape `[nrows]`. Must be + nonnegative and sorted in ascending order. If `nrows>0`, then + `row_starts[0]` must be zero. + nvals: A scalar tensor indicating the number of values. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_starts`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromRowStarts", [row_starts]): + row_starts = cls._convert_row_partition( + row_starts, "row_starts", dtype_hint=dtype_hint, dtype=dtype) + row_starts.shape.assert_has_rank(1) + # TODO(martinz): nvals and row_starts could be inconsistent at call time, + # even though they eventually end up the same type. + nvals = math_ops.cast(nvals, row_starts.dtype) + if validate: + msg = "Arguments to from_row_starts do not form a valid RaggedTensor" + checks = [ + check_ops.assert_rank(row_starts, 1, message=msg), + _assert_zero(row_starts[:1], message=msg), + _assert_monotonic_increasing(row_starts, message=msg), + check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg), + ] + row_starts = control_flow_ops.with_dependencies(checks, row_starts) + + row_splits = array_ops.concat([row_starts, [nvals]], axis=0) + return cls(row_splits=row_splits, nvals=nvals, + internal=_row_partition_factory_key) + + @classmethod + def from_row_limits(cls, + row_limits, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `row_limits`. + + Equivalent to: `from_row_splits(values, concat([0, row_limits], axis=0))`. + + Args: + row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in + ascending order. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `row_limits`, dtype_hint, or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + with ops.name_scope(None, "RowPartitionFromRowLimits", [row_limits]): + row_limits = cls._convert_row_partition( + row_limits, "row_limits", dtype_hint=dtype_hint, dtype=dtype) + row_limits.shape.assert_has_rank(1) + + if validate: + msg = "Arguments to from_row_limits do not form a valid RaggedTensor" + checks = [ + check_ops.assert_rank(row_limits, 1, message=msg), + check_ops.assert_non_negative(row_limits[:1], message=msg), + _assert_monotonic_increasing(row_limits, message=msg), + ] + row_limits = control_flow_ops.with_dependencies(checks, row_limits) + + zero = array_ops.zeros([1], row_limits.dtype) + row_splits = array_ops.concat([zero, row_limits], axis=0) + return cls(row_splits=row_splits, internal=_row_partition_factory_key) + + @classmethod + def from_uniform_row_length(cls, + uniform_row_length, + nvals=None, + nrows=None, + validate=True, + dtype=None, + dtype_hint=None): + """Creates a `RowPartition` with rows partitioned by `uniform_row_length`. + + This `RowPartition` divides a sequence `values` into rows that all have + the same length: + + ```python + partitioned_rows = [[values.pop(0) for _ in range(uniform_row_length)] + for _ in range(nrows)] + ``` + + Note that either or both of nvals and nrows must be specified. + + Args: + uniform_row_length: A scalar integer tensor. Must be nonnegative. The + size of the outer axis of `values` must be evenly divisible by + `uniform_row_length`. + nvals: a non-negative scalar integer tensor for the number of values. + Must be specified if nrows is not specified. If not specified, + defaults to uniform_row_length*nrows + nrows: The number of rows in the constructed RowPartition. If not + specified, then it defaults to `nvals/uniform_row_length` (or `0` if + `uniform_row_length==0`). `nrows` only needs to be specified if + `uniform_row_length` might be zero. `uniform_row_length*nrows` must be + `nvals`. + validate: If true, then use assertions to check that the arguments form a + valid `RowPartition`. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `uniform_row_length`, dtype_hint, + or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A `RowPartition`. + """ + if not isinstance(validate, bool): + raise TypeError("validate must have type bool") + if nrows is None and nvals is None: + raise ValueError("Either (or both) of nvals and nrows must be specified") + with ops.name_scope(None, "RowPartitionFromUniformRowLength", + [uniform_row_length, nrows]): + [uniform_row_length, nvals, nrows + ] = _convert_all_to_tensors([(uniform_row_length, "uniform_row_length"), + (nvals, "nvals"), (nrows, "nrows")], + dtype=dtype, + dtype_hint=dtype_hint) + + uniform_row_length.shape.assert_has_rank(0) + + # Find nrows. + const_row_length = tensor_util.constant_value(uniform_row_length) + if nrows is None: + if const_row_length is None: + # Avoid division by zero if uniform_row_length==0 (and nvals==0). + rowlen_or_1 = math_ops.maximum( + uniform_row_length, + constant_op.constant(1, uniform_row_length.dtype)) + nrows = nvals // rowlen_or_1 + elif const_row_length == 0: + nrows = constant_op.constant(0, dtype=uniform_row_length.dtype) + else: + nrows = nvals // const_row_length + const_nrows = None if nrows is None else tensor_util.constant_value(nrows) + const_nvals = None if nvals is None else tensor_util.constant_value(nvals) + const_uniform_row_length = tensor_util.constant_value(uniform_row_length) + + checks = [] + + if const_nvals is None and const_nrows is not None and const_uniform_row_length is not None: + const_nvals = const_nrows * const_uniform_row_length + if nvals is not None and validate: + checks.append(check_ops.assert_equal(nvals, const_nvals)) + nvals = constant_op.constant(const_nvals, uniform_row_length.dtype) + + if nvals is None: + nvals = nrows * uniform_row_length + + # Find row_splits. + if const_nrows is not None and const_row_length is not None: + row_splits = [v * const_row_length for v in range(const_nrows + 1)] + row_splits = constant_op.constant(row_splits, uniform_row_length.dtype) + else: + row_splits = math_ops.range( + nrows + 1, dtype=uniform_row_length.dtype) * uniform_row_length + + if validate: + + if (const_nrows is None or const_row_length is None or + const_nvals is None): + checks.append( + check_ops.assert_equal( + nrows * uniform_row_length, nvals, + ("uniform_row_length", uniform_row_length, "times nrows", + nrows, "must equal nvals", nvals))) + else: + if const_nrows * const_row_length != const_nvals: + raise ValueError( + "uniform_row_length=%d times nrows=%d must equal nvals=%d" % + (const_row_length, const_nrows, const_nvals)) + + if uniform_row_length.shape.rank is None: + checks.append( + check_ops.assert_rank( + uniform_row_length, + 0, + message="uniform_row_length must be a scalar.")) + + const_row_length = tensor_util.constant_value(uniform_row_length) + if const_row_length is None: + checks.append( + check_ops.assert_greater_equal( + uniform_row_length, + constant_op.constant(0, uniform_row_length.dtype), + message="uniform_row_length must be >= 0.")) + else: + if const_row_length < 0: + raise ValueError("uniform_row_length must be >= 0.") + + row_splits = control_flow_ops.with_dependencies(checks, row_splits) + + return cls( + row_splits=row_splits, + uniform_row_length=uniform_row_length, + nrows=nrows, + nvals=nvals, + internal=_row_partition_factory_key) + + @classmethod + def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None): + """Converts `partition` to Tensors. + + Args: + partition: A row-partitioning tensor for the `RowPartition` being + constructed. I.e., one of: row_splits, row_lengths, row_starts, + row_limits, value_rowids, uniform_row_length. + name: The name of the row-partitioning tensor. + dtype: Optional dtype for the RowPartition. If missing, the type + is inferred from the type of `uniform_row_length`, dtype_hint, + or tf.int64. + dtype_hint: Optional dtype for the RowPartition, used when dtype + is None. In some cases, a caller may not have a dtype in mind when + converting to a tensor, so dtype_hint can be used as a soft preference. + If the conversion to `dtype_hint` is not possible, this argument has no + effect. + + Returns: + A tensor equivalent to partition. + + Raises: + ValueError: if dtype is not int32 or int64. + """ + if dtype_hint is None: + dtype_hint = dtypes.int64 + if (isinstance(partition, np.ndarray) and + partition.dtype == np.int32 and dtype is None): + partition = ops.convert_to_tensor(partition, name=name) + else: + partition = tensor_conversion.convert_to_tensor_v2( + partition, dtype_hint=dtype_hint, dtype=dtype, name=name + ) + if partition.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("%s must have dtype int32 or int64" % name) + + return partition + + def _with_dependencies(self, dependencies): + """Returns a new RowPartition equal to self with control dependencies. + + Specifically, self._row_splits is gated by the given control dependencies. + Used to add sanity checks to the constructors. + + Args: + dependencies: a list of tensors to use as dependencies. + + Returns: + A new RowPartition object. + """ + new_row_splits = control_flow_ops.with_dependencies(dependencies, + self._row_splits) + return RowPartition( + row_splits=new_row_splits, + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self._nrows, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + # ============================================================================= + # Accessors + # ============================================================================= + + @property + def dtype(self): + """The `DType` used to encode the row partition (either int32 or int64).""" + return self._row_splits.dtype + + def row_splits(self): + """Returns the row-split indices for this row partition. + + `row_splits` specifies where the values for each row begin and end. + In particular, the values for row `i` are stored in the slice + `values[row_splits[i]:row_splits[i+1]]`. + + Returns: + A 1-D integer `Tensor` with shape `[self.nrows+1]`. + The returned tensor is non-empty, and is sorted in ascending order. + `self.row_splits()[0] == 0`. + `self.row_splits()[-1] == self.nvals()`. + """ + return self._row_splits + + def value_rowids(self): + """Returns the row indices for this row partition. + + `value_rowids` specifies the row index fo reach value. In particular, + `value_rowids[i]` is the row index for `values[i]`. + + Returns: + A 1-D integer `Tensor` with shape `[self.nvals()]`. + The returned tensor is nonnegative, and is sorted in ascending order. + """ + if self._value_rowids is not None: + return self._value_rowids + return segment_id_ops.row_splits_to_segment_ids(self._row_splits) + + def nvals(self): + """Returns the number of values partitioned by this `RowPartition`. + + If the sequence partitioned by this `RowPartition` is a tensor, then + `nvals` is the size of that tensor's outermost dimension -- i.e., + `nvals == values.shape[0]`. + + Returns: + scalar integer Tensor + """ + # TODO(martinz): Uncomment these lines. + # if self._nvals is not None: + # return self._nvals + return self._row_splits[-1] + + def nrows(self): + """Returns the number of rows created by this `RowPartition`. + + Returns: + scalar integer Tensor + """ + if self._nrows is not None: + return self._nrows + nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0) + if nsplits.value is None: + return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1 + else: + return constant_op.constant(nsplits.value - 1, dtype=self.dtype) + + def uniform_row_length(self): + """Returns the length of each row in this partition, if rows are uniform. + + If all rows in this `RowPartition` have the same length, then this returns + that length as a scalar integer `Tensor`. Otherwise, it returns `None`. + + Returns: + scalar Tensor with `type=self.dtype`, or `None`. + """ + return self._uniform_row_length + + def row_starts(self): + """Returns the start indices for rows in this row partition. + + These indices specify where the values for each row begin. + `partition.row_starts()` is equal to `partition.row_splits()[:-1]`. + + Returns: + A 1-D integer Tensor with shape `[self.nrows()]`. + The returned tensor is nonnegative, and is sorted in ascending order. + `self.row_starts()[0] == 0`. + `self.row_starts()[-1] <= self.nvals()`. + """ + return self._row_splits[:-1] + + def row_limits(self): + """Returns the limit indices for rows in this row partition. + + These indices specify where the values for each row end. + `partition.row_limits()` is equal to `partition.row_splits()[:-1]`. + + Returns: + A 1-D integer Tensor with shape `[self.nrows]`. + The returned tensor is nonnegative, and is sorted in ascending order. + `self.row_limits()[-1] == self.nvals()`. + """ + return self._row_splits[1:] + + def row_lengths(self): + """Returns the lengths of rows in this `RowPartition`. + + Returns: + A 1-D integer Tensor with shape `[self.nrows]`. + The returned tensor is nonnegative. + `tf.reduce_sum(self.row_lengths) == self.nvals()`. + """ + if self._row_lengths is not None: + return self._row_lengths + splits = self._row_splits + return splits[1:] - splits[:-1] + + @property + def static_nrows(self): + """The number of rows in this partition, if statically known. + + ```python + self.row_lengths().shape == [self.static_nrows] + self.row_starts().shape == [self.static_nrows] + self.row_limits().shape == [self.static_nrows] + self.row_splits().shape == [self.static_nrows + 1] + ``` + + Returns: + The number of rows in this partition as an `int` (if statically known); + or `None` (otherwise). + """ + if self._row_splits is not None: + nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0]) + if nrows_plus_one is not None: + return nrows_plus_one - 1 + if self._row_lengths is not None: + nrows = tensor_shape.dimension_value(self._row_lengths.shape[0]) + if nrows is not None: + return nrows + if self._nrows is not None: + return tensor_util.constant_value(self._nrows) + return None + + @property + def static_nvals(self): + """The number of values in this partition, if statically known. + + ```python + self.value_rowids().shape == [self.static_vals] + ``` + + Returns: + The number of values in this partition as an `int` (if statically known); + or `None` (otherwise). + """ + if self._nvals is not None: + nvals = tensor_util.constant_value(self._nvals) + if nvals is not None: + return nvals + if self._value_rowids is not None: + nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0) + if nvals.value is not None: + return nvals.value + return None + + @property + def static_uniform_row_length(self): + """The number of values in each row of this partition, if statically known. + + Returns: + The number of values in each row of this partition as an `int` (if + statically known); or `None` (otherwise). + """ + if self._uniform_row_length is not None: + return tensor_util.constant_value(self._uniform_row_length) + return None + + def offsets_in_rows(self): + """Return the offset of each value. + + RowPartition takes an array x and converts it into sublists. + offsets[i] is the index of x[i] in its sublist. + Given a shape, such as: + [*,*,*],[*,*],[],[*,*] + This returns: + 0,1,2,0,1,0,1 + + Returns: + an offset for every value. + """ + return gen_ragged_math_ops.ragged_range( + starts=constant_op.constant(0, self.dtype), + limits=self.row_lengths(), + deltas=constant_op.constant(1, self.dtype)).rt_dense_values + + def is_uniform(self): + """Returns true if the partition is known to be uniform statically. + + This is based upon the existence of self._uniform_row_length. For example: + RowPartition.from_row_lengths([3,3,3]).is_uniform()==false + RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true + RowPartition.from_row_lengths([2,0,2]).is_uniform()==false + + Returns: + Whether a RowPartition is known to be uniform statically. + """ + return self._uniform_row_length is not None + + def _static_check(self): + """Checks if the object is internally consistent. + + Raises: + ValueError if inconsistent. + """ + my_dtype = self.dtype + if self._uniform_row_length is not None: + if self._uniform_row_length.dtype != my_dtype: + raise ValueError("_uniform_row_length.dtype=" + + str(self._uniform_row_length.dtype) + ", not " + + str(my_dtype)) + + if self._row_lengths is not None and self._row_lengths.dtype != my_dtype: + raise ValueError("_row_lengths.dtype=" + str(self._row_lengths.dtype) + + ", not " + str(my_dtype)) + + if self._value_rowids is not None and self._value_rowids.dtype != my_dtype: + raise ValueError("_value_rowids.dtype=" + str(self._value_rowids.dtype) + + ", not " + str(my_dtype)) + + if self._nrows is not None and self._nrows.dtype != my_dtype: + raise ValueError("_nrows.dtype=" + str(self._nrows.dtype) + ", not " + + str(my_dtype)) + + # ============================================================================= + # Transformation + # ============================================================================= + + def with_dtype(self, dtype): + """Returns a copy of this RowPartition with the given encoding dtype. + + Args: + dtype: The dtype for encoding tensors, such as `row_splits` and `nrows`. + One of `tf.int32` or `tf.int64`. + + Returns: + A copy of this RowPartition, with the encoding tensors cast to the given + type. + """ + dtype = dtypes.as_dtype(dtype) + if dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("dtype must be int32 or int64") + if self.dtype == dtype: + return self + + return RowPartition( + row_splits=_cast_if_not_none(self._row_splits, dtype), + row_lengths=_cast_if_not_none(self._row_lengths, dtype), + value_rowids=_cast_if_not_none(self._value_rowids, dtype), + nrows=_cast_if_not_none(self._nrows, dtype), + uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype), + internal=_row_partition_factory_key) + + # ============================================================================= + # String Encoding + # ============================================================================= + + def __repr__(self): + if self._uniform_row_length is not None: + return (f"tf.RowPartition(nrows={self._nrows}, " + f"uniform_row_length={self._uniform_row_length})") + else: + return f"tf.RowPartition(row_splits={self._row_splits})" + + # ============================================================================= + # Precomputed Encodings + # ============================================================================= + + def _has_precomputed_row_splits(self): + """Returns true if `row_splits` has already been computed. + + If true, then `self.row_splits()` will return its value without calling + any TensorFlow ops. + """ + return self._row_splits is not None + + def _has_precomputed_row_lengths(self): + """Returns true if `row_lengths` has already been computed. + + If true, then `self.row_lengths()` will return its value without calling + any TensorFlow ops. + """ + return self._row_lengths is not None + + def _has_precomputed_value_rowids(self): + """Returns true if `value_rowids` has already been computed. + + If true, then `self.value_rowids()` will return its value without calling + any TensorFlow ops. + """ + return self._value_rowids is not None + + def _has_precomputed_nrows(self): + """Returns true if `nrows` has already been computed. + + If true, then `self.nrows()` will return its value without calling + any TensorFlow ops. + """ + return self._nrows is not None + + def _has_precomputed_nvals(self): + """Returns true if `nvals` has already been computed. + + If true, then `self.nvals()` will return its value without calling + any TensorFlow ops. + """ + return self._nvals is not None + + def _with_precomputed_row_splits(self): + """Returns a copy of `self` with `row_splits` precomputed.""" + return RowPartition( + row_splits=self.row_splits(), + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self._nrows, + uniform_row_length=self._uniform_row_length, + nvals=self._nvals, + internal=_row_partition_factory_key) + + def _with_precomputed_row_lengths(self): + """Returns a copy of `self` with `row_lengths` precomputed.""" + return RowPartition( + row_splits=self._row_splits, + row_lengths=self.row_lengths(), + value_rowids=self._value_rowids, + nrows=self._nrows, + nvals=self._nvals, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _with_precomputed_value_rowids(self): + """Returns a copy of `self` with `value_rowids` precomputed.""" + return RowPartition( + row_splits=self._row_splits, + row_lengths=self._row_lengths, + value_rowids=self.value_rowids(), + nrows=self._nrows, + nvals=self._nvals, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _with_precomputed_nrows(self): + """Returns a copy of `self` with `nrows` precomputed.""" + return RowPartition( + row_splits=self._row_splits, + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self.nrows(), + nvals=self._nvals, + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _with_precomputed_nvals(self): + """Returns a copy of `self` with `row_splits` precomputed.""" + return RowPartition( + row_splits=self.row_splits(), + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nrows=self._nrows, + nvals=self.nvals(), + uniform_row_length=self._uniform_row_length, + internal=_row_partition_factory_key) + + def _merge_with_spec(self, b): + """Merge with a TypeSpec to create a new RowPartition.""" + a_spec = self._type_spec + if not a_spec.is_compatible_with(b): + # TODO(martinz): Should a dynamic check be used here? + raise ValueError("RowPartition and RowPartitionSpec are not compatible") + nrows = constant_op.constant( + b.nrows, self.dtype) if b.nrows is not None else self._nrows + nvals = constant_op.constant( + b.nvals, self.dtype) if b.nvals is not None else self._nvals + uniform_row_length = constant_op.constant( + b.uniform_row_length, self.dtype + ) if b.uniform_row_length is not None else self._uniform_row_length + return RowPartition( + row_splits=self._row_splits, + row_lengths=self._row_lengths, + value_rowids=self._value_rowids, + nvals=nvals, + uniform_row_length=uniform_row_length, + nrows=nrows, + internal=_row_partition_factory_key) + + def _merge_precomputed_encodings(self, other, validate=True): + """Returns a RowPartition that merges encodings from `self` and `other`. + + Requires that `self` and `other` describe the same partition. + + Args: + other: A `RowPartition` that encodes the same partition as `self`. + validate: If true, then add runtime checks to verify that `self` and + `other` encode the same row partition. + + Returns: + A `RowPartition`. + """ + # pylint: disable=protected-access + if (self is other or # Fast path if row partitions are equal. + (self._row_splits is other._row_splits and + self._row_lengths is other._row_lengths and + self._value_rowids is other._value_rowids and + self._nrows is other._nrows and + self._nvals is other._nvals and + self._uniform_row_length is other._uniform_row_length)): + return self + + # Merge the component tensors. We only need to validate one encoding. + # We merge less-expensive encodings first (to avoid expensive validation). + nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, "nrows", + validate) + nvals, _ = _merge_tensors(self._nvals, other._nvals, "nvals", validate) + uniform_row_length, uniform_row_length_validated = _merge_tensors( + self._uniform_row_length, other._uniform_row_length, + "uniform_row_length", validate) + if uniform_row_length_validated and nrows_validated: + validate = False # Validation complete. + row_splits, row_splits_validated = _merge_tensors(self._row_splits, + other._row_splits, + "row_splits", validate) + if row_splits_validated: + validate = False # Validation complete. + row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths, + other._row_lengths, + "row_lengths", validate) + if row_lengths_validated: + validate = False # Validation complete. + value_rowids, value_rowids_validated = _merge_tensors( + self._value_rowids, other._value_rowids, "value_rowids", validate) + if value_rowids_validated and nrows_validated: + validate = False # Validation complete. + # TODO(edloper): If we make the row_splits encoding optional, then there + # will be cases where we need to do validation at this point -- e.g. if + # self has only row_splits and other has only value_rowids. But for + # now, we are guaranteed to have done validation by this point. + + # Avoid creating new RowPartition objects if we don't need to. + if (row_splits is self._row_splits and row_lengths is self._row_lengths and + value_rowids is self._value_rowids and nrows is self._nrows and + uniform_row_length is self._uniform_row_length): + return self + if (row_splits is other._row_splits and + row_lengths is other._row_lengths and + value_rowids is other._value_rowids and nrows is other._nrows and + uniform_row_length is other._uniform_row_length): + return other + + return RowPartition( + row_splits=row_splits, + row_lengths=row_lengths, + value_rowids=value_rowids, + nrows=nrows, + uniform_row_length=uniform_row_length, + nvals=nvals, + internal=_row_partition_factory_key) + + # ============================================================================= + # Composite Tensor + # ============================================================================= + + @property + def _type_spec(self): + return RowPartitionSpec.from_value(self) + + +# =============================================================================== +# RowPartitionSpec +# =============================================================================== +# TODO(edloper): Consider refactoring RowPartitionSpec to allow any combination +# of precomputed row-partition encodings (rather than always using row_splits). + + +@type_spec_registry.register("tf.RowPartitionSpec") +class RowPartitionSpec(type_spec.TypeSpec): + """Type specification for a `tf.RowPartition`.""" + + __slots__ = ["_nrows", "_nvals", "_uniform_row_length", "_dtype"] + + value_type = property(lambda self: RowPartition) + + def __init__(self, + nrows=None, + nvals=None, + uniform_row_length=None, + dtype=dtypes.int64): + """Constructs a new RowPartitionSpec. + + Args: + nrows: The number of rows in the RowPartition, or `None` if unspecified. + nvals: The number of values partitioned by the RowPartition, or `None` if + unspecified. + uniform_row_length: The number of values in each row for this + RowPartition, or `None` if rows are ragged or row length is unspecified. + dtype: The data type used to encode the partition. One of `tf.int64` or + `tf.int32`. + """ + # Wrap dimension sizes in 1D TensorShapes so the default implementations + # of TypeSpec methods such as `is_compatile_with` will work. + nrows = tensor_shape.TensorShape([nrows]) + nvals = tensor_shape.TensorShape([nvals]) + if not isinstance(uniform_row_length, tensor_shape.TensorShape): + uniform_row_length = tensor_shape.TensorShape([uniform_row_length]) + else: + uniform_row_length = uniform_row_length.with_rank(1) + + self._nrows = nrows + self._nvals = nvals + self._uniform_row_length = uniform_row_length + self._dtype = dtypes.as_dtype(dtype) + if self._dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("dtype must be tf.int32 or tf.int64") + + # Check dimension consistency, & infer dimensions when possible. + nrows = tensor_shape.dimension_value(nrows[0]) + nvals = tensor_shape.dimension_value(nvals[0]) + ncols = tensor_shape.dimension_value(uniform_row_length[0]) + if nrows == 0: # no rows -> no values. + if nvals is None: + self._nvals = tensor_shape.TensorShape([0]) + elif nvals != 0: + raise ValueError("nvals=%s is not compatible with nrows=%s" % + (nvals, nrows)) + if ncols == 0: # there are no values in each row -> no values. + if nvals is None: + self._nvals = tensor_shape.TensorShape([0]) + elif nvals != 0: + raise ValueError("nvals=%s is not compatible with uniform_row_length" + "=%s" % (nvals, uniform_row_length)) + if ncols is not None and nvals is not None: + if ncols != 0 and nvals % ncols != 0: + raise ValueError("nvals=%s is not compatible with uniform_row_length" + "=%s (doesn't divide evenly)" % (nvals, ncols)) + if nrows is not None and nvals != ncols * nrows: + raise ValueError("nvals=%s is not compatible with nrows=%s and " + "uniform_row_length=%s" % (nvals, nrows, ncols)) + if nrows is None and ncols != 0: + self._nrows = tensor_shape.TensorShape([nvals // ncols]) + if ncols is not None and nrows is not None and nvals is None: + self._nvals = tensor_shape.TensorShape([ncols * nrows]) + + def is_compatible_with(self, other): + if not super(RowPartitionSpec, self).is_compatible_with(other): + return False + nrows = self._nrows.merge_with(other.nrows) + nvals = self._nvals.merge_with(other.nvals) + ncols = self._uniform_row_length.merge_with(other.uniform_row_length) + return self._dimensions_compatible(nrows, nvals, ncols) + + def _serialize(self): + return (self._nrows, self._nvals, self._uniform_row_length, self._dtype) + + @classmethod + def _deserialize(cls, serialization): + # Remove TensorShape wrappers from serialization. + (nrows, nvals, uniform_row_length, dtype) = serialization + nrows = tensor_shape.dimension_value(nrows[0]) + nvals = tensor_shape.dimension_value(nvals[0]) + return cls(nrows, nvals, uniform_row_length, dtype) + + @property + def nrows(self): + return tensor_shape.dimension_value(self._nrows[0]) + + @property + def nvals(self): + return tensor_shape.dimension_value(self._nvals[0]) + + @property + def uniform_row_length(self): + return tensor_shape.dimension_value(self._uniform_row_length[0]) + + @property + def dtype(self): + return self._dtype + + @property + def _component_specs(self): + row_splits_shape = tensor_shape.TensorShape( + [tensor_shape.dimension_at_index(self._nrows, 0) + 1]) + return tensor_lib.TensorSpec(row_splits_shape, self._dtype) + + def _to_components(self, value): + return value.row_splits() + + def _from_components(self, tensor): + return RowPartition.from_row_splits(tensor, validate=False) + + @classmethod + def from_value(cls, value): + if not isinstance(value, RowPartition): + raise TypeError("Expected `value` to be a `RowPartition`") + return cls(value.static_nrows, value.static_nvals, + value.static_uniform_row_length, value.dtype) + + def __repr__(self): + return ("RowPartitionSpec(nrows=%s, nvals=%s, uniform_row_length=%s, " + "dtype=%r)" % (self.nrows, self.nvals, self.uniform_row_length, + self.dtype)) + + @staticmethod + def _dimensions_compatible(nrows, nvals, uniform_row_length): + """Returns true if the given dimensions are compatible.""" + nrows = tensor_shape.dimension_value(nrows[0]) + nvals = tensor_shape.dimension_value(nvals[0]) + ncols = tensor_shape.dimension_value(uniform_row_length[0]) + if nrows == 0 and nvals not in (0, None): + return False # can't have values if we have no rows. + if ncols == 0 and nvals not in (0, None): + return False # can't have values if we have no values in each row. + if ncols is not None and nvals is not None: + if ncols != 0 and nvals % ncols != 0: + return False # rows aren't uniform. + if nrows is not None and nvals != ncols * nrows: + return False # inconsistent number of values. + return True + + def _merge_with(self, other): + """Merge two RowPartitionSpecs.""" + nrows = self._nrows.merge_with(other.nrows) + nvals = self._nvals.merge_with(other.nvals) + ncols = self._uniform_row_length.merge_with(other.uniform_row_length) + + if not RowPartitionSpec._dimensions_compatible(nrows, nvals, ncols): + raise ValueError("Merging incompatible RowPartitionSpecs") + + # NOTE: if the dtypes are unequal, behavior is unspecified. + if self.dtype != other.dtype: + raise ValueError("Merging RowPartitionSpecs with incompatible dtypes") + + return RowPartitionSpec(nrows=nrows[0], + nvals=nvals[0], + uniform_row_length=ncols[0], + dtype=self.dtype) + + def with_dtype(self, dtype): + nrows = tensor_shape.dimension_value(self._nrows[0]) + nvals = tensor_shape.dimension_value(self._nvals[0]) + return RowPartitionSpec(nrows, nvals, self._uniform_row_length, dtype) + + def __deepcopy__(self, memo): + del memo + dtype = self.dtype + nrows = tensor_shape.dimension_value(self._nrows[0]) + nvals = tensor_shape.dimension_value(self._nvals[0]) + uniform_row_length = (None if self._uniform_row_length is None else + tensor_shape.dimension_value( + self._uniform_row_length[0])) + return RowPartitionSpec(nrows, nvals, uniform_row_length, dtype) + + +nested_structure_coder.register_codec( + nested_structure_coder.BuiltInTypeSpecCodec( + RowPartitionSpec, struct_pb2.TypeSpecProto.ROW_PARTITION_SPEC + ) +) + + +# =============================================================================== +# Helper Functions +# =============================================================================== + + +def _assert_monotonic_increasing(tensor, message=None): + return check_ops.assert_non_negative( + tensor[1:] - tensor[:-1], message=message) + + +def _assert_zero(tensor, message=None): + return check_ops.assert_equal( + tensor, constant_op.constant(0, dtype=tensor.dtype), message=message) + + +def _cast_if_not_none(tensor, dtype): + return None if tensor is None else math_ops.cast(tensor, dtype) + + +def _merge_tensors(t1, t2, name, validate): + """Merge two optional Tensors with equal values into a single Tensor. + + Args: + t1: tf.Tensor or None + t2: tf.Tensor or None + name: A name for the tensors (for error messages) + validate: If true, then check that `t1` is compatible with `t2` (if both are + non-None). + + Returns: + A pair `(merged_value, validated)`: + * `merged_value` is `t1` if it is not None; or `t2` otherwise. + * `validated` is true if we validated that t1 and t2 are equal (either + by adding a check, or because t1 is t2). + """ + if t1 is None: + return t2, False + elif t2 is None: + return t1, False + elif t1 is t2: + return t1, True + else: + err_msg = ("RowPartition._merge_precomputed_encodings: partitions " + "have incompatible %s" % name) + if not t1.shape.is_compatible_with(t2.shape): + raise ValueError(err_msg) + if validate: + checks = [check_ops.assert_equal(t1, t2, message=err_msg)] + return control_flow_ops.with_dependencies(checks, t1), True + else: + return t1, False + +_row_partition_factory_key = object() # unique private object + + +def _get_dtype_or_none(value): + if isinstance(value, tensor_lib.Tensor): + return value.dtype + return None + + +def _get_target_dtype(values, dtype=None, dtype_hint=None): + """Gets the target dtype of a family of values.""" + if dtype is not None: + return dtype + + for value in values: + if isinstance(value, tensor_lib.Tensor): + return value.dtype + + for value in values: + if isinstance(value, np.ndarray): + return dtypes.as_dtype(value.dtype) + + if dtype_hint is not None: + return dtype_hint + + return dtypes.int64 + + +def _convert_all_to_tensors(values, dtype=None, dtype_hint=None): + """Convert a list of objects to tensors of the same dtype.""" + target_dtype = _get_target_dtype([x for (x, _) in values], dtype, dtype_hint) + + # If dtype is None, we use convert behavior. + # If dtype is not None, we use cast behavior. + convert_behavior = dtype is None + + if convert_behavior: + return [ + None if x is None else ops.convert_to_tensor( + x, dtype=target_dtype, name=name) for (x, name) in values + ] + else: + return [ + None if x is None else math_ops.cast(x, dtype=target_dtype, name=name) + for (x, name) in values + ] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/segment_id_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/segment_id_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..52489678f54c42d6a560c5339494aa6b732568ed --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/ragged/segment_id_ops.py @@ -0,0 +1,134 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ops for converting between row_splits and segment_ids.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import ragged_util +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +# For background on "segments" and "segment ids", see: +# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation +@tf_export("ragged.row_splits_to_segment_ids") +@dispatch.add_dispatch_support +def row_splits_to_segment_ids(splits, name=None, out_type=None): + """Generates the segmentation corresponding to a RaggedTensor `row_splits`. + + Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if + `splits[j] <= i < splits[j+1]`. Example: + + >>> print(tf.ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9])) + tf.Tensor([0 0 0 2 2 3 4 4 4], shape=(9,), dtype=int64) + + Args: + splits: A sorted 1-D integer Tensor. `splits[0]` must be zero. + name: A name prefix for the returned tensor (optional). + out_type: The dtype for the return value. Defaults to `splits.dtype`, + or `tf.int64` if `splits` does not have a dtype. + + Returns: + A sorted 1-D integer Tensor, with `shape=[splits[-1]]` + + Raises: + ValueError: If `splits` is invalid. + """ + with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name: + splits = ops.convert_to_tensor( + splits, name="splits", + preferred_dtype=dtypes.int64) + if splits.dtype not in (dtypes.int32, dtypes.int64): + raise ValueError("splits must have dtype int32 or int64") + splits.shape.assert_has_rank(1) + if tensor_shape.dimension_value(splits.shape[0]) == 0: + raise ValueError("Invalid row_splits: []") + if out_type is None: + out_type = splits.dtype + else: + out_type = dtypes.as_dtype(out_type) + row_lengths = splits[1:] - splits[:-1] + nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1 + indices = math_ops.range(nrows) + return ragged_util.repeat(indices, repeats=row_lengths, axis=0) + + +# For background on "segments" and "segment ids", see: +# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation +@tf_export("ragged.segment_ids_to_row_splits") +@dispatch.add_dispatch_support +def segment_ids_to_row_splits(segment_ids, num_segments=None, + out_type=None, name=None): + """Generates the RaggedTensor `row_splits` corresponding to a segmentation. + + Returns an integer vector `splits`, where `splits[0] = 0` and + `splits[i] = splits[i-1] + count(segment_ids==i)`. Example: + + >>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4])) + tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64) + + Args: + segment_ids: A 1-D integer Tensor. + num_segments: A scalar integer indicating the number of segments. Defaults + to `max(segment_ids) + 1` (or zero if `segment_ids` is empty). + out_type: The dtype for the return value. Defaults to `segment_ids.dtype`, + or `tf.int64` if `segment_ids` does not have a dtype. + name: A name prefix for the returned tensor (optional). + + Returns: + A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`. + """ + # Local import bincount_ops to avoid import-cycle. + from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top + if out_type is None: + if isinstance(segment_ids, tensor.Tensor): + out_type = segment_ids.dtype + elif isinstance(num_segments, tensor.Tensor): + out_type = num_segments.dtype + else: + out_type = dtypes.int64 + else: + out_type = dtypes.as_dtype(out_type) + with ops.name_scope(name, "SegmentIdsToRaggedSplits", [segment_ids]) as name: + # Note: we cast int64 tensors to int32, since bincount currently only + # supports int32 inputs. + segment_ids = ragged_util.convert_to_int_tensor(segment_ids, "segment_ids", + dtype=dtypes.int32) + segment_ids.shape.assert_has_rank(1) + if num_segments is not None: + num_segments = ragged_util.convert_to_int_tensor(num_segments, + "num_segments", + dtype=dtypes.int32) + num_segments.shape.assert_has_rank(0) + + row_lengths = bincount_ops.bincount( + segment_ids, + minlength=num_segments, + maxlength=num_segments, + dtype=out_type) + splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) + + # Update shape information, if possible. + if num_segments is not None: + const_num_segments = tensor_util.constant_value(num_segments) + if const_num_segments is not None: + splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1])) + + return splits