diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8365f2485eeb95eec0512770d1c747fcf367582d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Core module for TensorFlow distribution objects and helpers.""" +from tensorflow.python.ops.distributions import distributions diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_test_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03805a9cd6bf638a9f63675dd93bc6700c418e41 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/bijector_test_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/categorical.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd3db3cd5cbe148968639f3e2fb5b86526737f5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/categorical.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..089bd4980168400a96d89eb4009cb0eee2631341 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet_multinomial.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet_multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..511798c4f28439e097190b3f6cd465bd15760696 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/dirichlet_multinomial.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distribution.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distribution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e889151bd1a67df5deb3d3818fb10f5910ef334e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distribution.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distributions.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9725a3fe4f0c63754859fd828ea92dde76eb5185 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/distributions.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/exponential.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/exponential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7898a46ad45ef73e711b82f838d54cfef4fb0ee Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/exponential.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/gamma.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/gamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01deb49f092677d7c4820e032a9e6d772f0258fc Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/gamma.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/identity_bijector.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/identity_bijector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e701ae5e0a5a6f9c32e37b41666516a5b98a1f0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/identity_bijector.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/kullback_leibler.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/kullback_leibler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19dfa07705dde4c2a18fa0cb0b440b21ee3e1872 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/kullback_leibler.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/laplace.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/laplace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1e4ecbc12742635beeb8c91f2d07a70a8d3cc13 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/laplace.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/multinomial.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/multinomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783e100a683807b494c0365871341048d29b40d4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/multinomial.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/normal.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/normal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..062c8418b380b080949bb0c3eaab59d148ffbc4e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/normal.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/special_math.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/special_math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8328f1ea2684d0cf6b36c8053549d2dd84e8e853 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/special_math.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/student_t.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/student_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68514864b334bb0f1c3dd69f9e7b8a275aa6647f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/student_t.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/uniform.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/uniform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0067d0c615b6c038eaa55f9cc0b33ee1d4c3502 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/uniform.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e47263773fcb3cd36c95bcb1de628d0f795cd59 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/__pycache__/util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..7e5d875c8cf285f25071edf175b4ecfe1d8f5baa --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bernoulli.py @@ -0,0 +1,183 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Bernoulli distribution class.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["distributions.Bernoulli"]) +class Bernoulli(distribution.Distribution): + """Bernoulli distribution. + + The Bernoulli distribution with `probs` parameter, i.e., the probability of a + `1` outcome (vs a `0` outcome). + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + logits=None, + probs=None, + dtype=dtypes.int32, + validate_args=False, + allow_nan_stats=True, + name="Bernoulli"): + """Construct Bernoulli distributions. + + Args: + logits: An N-D `Tensor` representing the log-odds of a `1` event. Each + entry in the `Tensor` parametrizes an independent Bernoulli distribution + where the probability of an event is sigmoid(logits). Only one of + `logits` or `probs` should be passed in. + probs: An N-D `Tensor` representing the probability of a `1` + event. Each entry in the `Tensor` parameterizes an independent + Bernoulli distribution. Only one of `logits` or `probs` should be passed + in. + dtype: The type of the event samples. Default: `int32`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + ValueError: If p and logits are passed, or if neither are passed. + """ + parameters = dict(locals()) + with ops.name_scope(name) as name: + self._logits, self._probs = distribution_util.get_logits_and_probs( + logits=logits, + probs=probs, + validate_args=validate_args, + name=name) + super(Bernoulli, self).__init__( + dtype=dtype, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._logits, self._probs], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} + + @property + def logits(self): + """Log-odds of a `1` outcome (vs `0`).""" + return self._logits + + @property + def probs(self): + """Probability of a `1` outcome (vs `0`).""" + return self._probs + + def _batch_shape_tensor(self): + return array_ops.shape(self._logits) + + def _batch_shape(self): + return self._logits.get_shape() + + def _event_shape_tensor(self): + return array_ops.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + new_shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + uniform = random_ops.random_uniform( + new_shape, seed=seed, dtype=self.probs.dtype) + sample = math_ops.less(uniform, self.probs) + return math_ops.cast(sample, self.dtype) + + def _log_prob(self, event): + if self.validate_args: + event = distribution_util.embed_check_integer_casting_closed( + event, target_dtype=dtypes.bool) + + # TODO(jaana): The current sigmoid_cross_entropy_with_logits has + # inconsistent behavior for logits = inf/-inf. + event = math_ops.cast(event, self.logits.dtype) + logits = self.logits + # sigmoid_cross_entropy_with_logits doesn't broadcast shape, + # so we do this here. + + def _broadcast(logits, event): + return (array_ops.ones_like(event) * logits, + array_ops.ones_like(logits) * event) + + if not (event.get_shape().is_fully_defined() and + logits.get_shape().is_fully_defined() and + event.get_shape() == logits.get_shape()): + logits, event = _broadcast(logits, event) + return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits) + + def _entropy(self): + return (-self.logits * (math_ops.sigmoid(self.logits) - 1) + # pylint: disable=invalid-unary-operand-type + nn.softplus(-self.logits)) # pylint: disable=invalid-unary-operand-type + + def _mean(self): + return array_ops.identity(self.probs) + + def _variance(self): + return self._mean() * (1. - self.probs) + + def _mode(self): + """Returns `1` if `prob > 0.5` and `0` otherwise.""" + return math_ops.cast(self.probs > 0.5, self.dtype) + + +@kullback_leibler.RegisterKL(Bernoulli, Bernoulli) +def _kl_bernoulli_bernoulli(a, b, name=None): + """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. + + Args: + a: instance of a Bernoulli distribution object. + b: instance of a Bernoulli distribution object. + name: (optional) Name to use for created operations. + default is "kl_bernoulli_bernoulli". + + Returns: + Batchwise KL(a || b) + """ + with ops.name_scope(name, "kl_bernoulli_bernoulli", + values=[a.logits, b.logits]): + delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits) + delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits) + return (math_ops.sigmoid(a.logits) * delta_probs0 + + math_ops.sigmoid(-a.logits) * delta_probs1) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.py new file mode 100644 index 0000000000000000000000000000000000000000..ce89d662cb7792a6a81712f62e687e0a7fcba093 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/beta.py @@ -0,0 +1,407 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Beta distribution class.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Beta", + "BetaWithSoftplusConcentration", +] + + +_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in +`[0, 1].` It must have a shape compatible with `self.batch_shape()`.""" + + +@tf_export(v1=["distributions.Beta"]) +class Beta(distribution.Distribution): + """Beta distribution. + + The Beta distribution is defined over the `(0, 1)` interval using parameters + `concentration1` (aka "alpha") and `concentration0` (aka "beta"). + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z + Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta) + ``` + + where: + + * `concentration1 = alpha`, + * `concentration0 = beta`, + * `Z` is the normalization constant, and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The concentration parameters represent mean total counts of a `1` or a `0`, + i.e., + + ```none + concentration1 = alpha = mean * total_concentration + concentration0 = beta = (1. - mean) * total_concentration + ``` + + where `mean` in `(0, 1)` and `total_concentration` is a positive real number + representing a mean `total_count = concentration1 + concentration0`. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + Warning: The samples can be zero due to finite precision. + This happens more often when some of the concentrations are very small. + Make sure to round the samples to `np.finfo(dtype).tiny` before computing the + density. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Create a batch of three Beta distributions. + alpha = [1, 2, 3] + beta = [1, 2, 3] + dist = tfd.Beta(alpha, beta) + + dist.sample([4, 5]) # Shape [4, 5, 3] + + # `x` has three batch entries, each with two samples. + x = [[.1, .4, .5], + [.2, .3, .5]] + # Calculate the probability of each pair of samples under the corresponding + # distribution in `dist`. + dist.prob(x) # Shape [2, 3] + ``` + + ```python + # Create batch_shape=[2, 3] via parameter broadcast: + alpha = [[1.], [2]] # Shape [2, 1] + beta = [3., 4, 5] # Shape [3] + dist = tfd.Beta(alpha, beta) + + # alpha broadcast as: [[1., 1, 1,], + # [2, 2, 2]] + # beta broadcast as: [[3., 4, 5], + # [3, 4, 5]] + # batch_Shape [2, 3] + dist.sample([4, 5]) # Shape [4, 5, 2, 3] + + x = [.2, .3, .5] + # x will be broadcast as [[.2, .3, .5], + # [.2, .3, .5]], + # thus matching batch_shape [2, 3]. + dist.prob(x) # Shape [2, 3] + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + alpha = tf.constant(1.0) + beta = tf.constant(2.0) + dist = tfd.Beta(alpha, beta) + samples = dist.sample(5) # Shape [5] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, [alpha, beta]) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + concentration1=None, + concentration0=None, + validate_args=False, + allow_nan_stats=True, + name="Beta"): + """Initialize a batch of Beta distributions. + + Args: + concentration1: Positive floating-point `Tensor` indicating mean + number of successes; aka "alpha". Implies `self.dtype` and + `self.batch_shape`, i.e., + `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`. + concentration0: Positive floating-point `Tensor` indicating mean + number of failures; aka "beta". Otherwise has same semantics as + `concentration1`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration1, concentration0]) as name: + self._concentration1 = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration1, name="concentration1"), + validate_args) + self._concentration0 = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration0, name="concentration0"), + validate_args) + check_ops.assert_same_float_dtype([ + self._concentration1, self._concentration0]) + self._total_concentration = self._concentration1 + self._concentration0 + super(Beta, self).__init__( + dtype=self._total_concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._concentration1, + self._concentration0, + self._total_concentration], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict(zip( + ["concentration1", "concentration0"], + [ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)) + + @property + def concentration1(self): + """Concentration parameter associated with a `1` outcome.""" + return self._concentration1 + + @property + def concentration0(self): + """Concentration parameter associated with a `0` outcome.""" + return self._concentration0 + + @property + def total_concentration(self): + """Sum of concentration parameters.""" + return self._total_concentration + + def _batch_shape_tensor(self): + return array_ops.shape(self.total_concentration) + + def _batch_shape(self): + return self.total_concentration.get_shape() + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + expanded_concentration1 = array_ops.ones_like( + self.total_concentration, dtype=self.dtype) * self.concentration1 + expanded_concentration0 = array_ops.ones_like( + self.total_concentration, dtype=self.dtype) * self.concentration0 + gamma1_sample = random_ops.random_gamma( + shape=[n], + alpha=expanded_concentration1, + dtype=self.dtype, + seed=seed) + gamma2_sample = random_ops.random_gamma( + shape=[n], + alpha=expanded_concentration0, + dtype=self.dtype, + seed=distribution_util.gen_new_seed(seed, "beta")) + beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) + return beta_sample + + @distribution_util.AppendDocstring(_beta_sample_note) + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + @distribution_util.AppendDocstring(_beta_sample_note) + def _prob(self, x): + return math_ops.exp(self._log_prob(x)) + + @distribution_util.AppendDocstring(_beta_sample_note) + def _log_cdf(self, x): + return math_ops.log(self._cdf(x)) + + @distribution_util.AppendDocstring(_beta_sample_note) + def _cdf(self, x): + return math_ops.betainc(self.concentration1, self.concentration0, x) + + def _log_unnormalized_prob(self, x): + x = self._maybe_assert_valid_sample(x) + return (math_ops.xlogy(self.concentration1 - 1., x) + + (self.concentration0 - 1.) * math_ops.log1p(-x)) # pylint: disable=invalid-unary-operand-type + + def _log_normalization(self): + return (math_ops.lgamma(self.concentration1) + + math_ops.lgamma(self.concentration0) + - math_ops.lgamma(self.total_concentration)) + + def _entropy(self): + return ( + self._log_normalization() + - (self.concentration1 - 1.) * math_ops.digamma(self.concentration1) + - (self.concentration0 - 1.) * math_ops.digamma(self.concentration0) + + ((self.total_concentration - 2.) * + math_ops.digamma(self.total_concentration))) + + def _mean(self): + return self._concentration1 / self._total_concentration + + def _variance(self): + return self._mean() * (1. - self._mean()) / (1. + self.total_concentration) + + @distribution_util.AppendDocstring( + """Note: The mode is undefined when `concentration1 <= 1` or + `concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN` + is used for undefined modes. If `self.allow_nan_stats` is `False` an + exception is raised when one or more modes are undefined.""") + def _mode(self): + mode = (self.concentration1 - 1.) / (self.total_concentration - 2.) + if self.allow_nan_stats: + nan = array_ops.fill( + self.batch_shape_tensor(), + np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), + name="nan") + is_defined = math_ops.logical_and(self.concentration1 > 1., + self.concentration0 > 1.) + return array_ops.where_v2(is_defined, mode, nan) + return control_flow_ops.with_dependencies([ + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.concentration1, + message="Mode undefined for concentration1 <= 1."), + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.concentration0, + message="Mode undefined for concentration0 <= 1.") + ], mode) + + def _maybe_assert_valid_concentration(self, concentration, validate_args): + """Checks the validity of a concentration parameter.""" + if not validate_args: + return concentration + return control_flow_ops.with_dependencies([ + check_ops.assert_positive( + concentration, + message="Concentration parameter must be positive."), + ], concentration) + + def _maybe_assert_valid_sample(self, x): + """Checks the validity of a sample.""" + if not self.validate_args: + return x + return control_flow_ops.with_dependencies([ + check_ops.assert_positive(x, message="sample must be positive"), + check_ops.assert_less( + x, + array_ops.ones([], self.dtype), + message="sample must be less than `1`."), + ], x) + + +class BetaWithSoftplusConcentration(Beta): + """Beta with softplus transform of `concentration1` and `concentration0`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Beta(tf.nn.softplus(concentration1), " + "tf.nn.softplus(concentration2))` instead.", + warn_once=True) + def __init__(self, + concentration1, + concentration0, + validate_args=False, + allow_nan_stats=True, + name="BetaWithSoftplusConcentration"): + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration1, + concentration0]) as name: + super(BetaWithSoftplusConcentration, self).__init__( + concentration1=nn.softplus(concentration1, + name="softplus_concentration1"), + concentration0=nn.softplus(concentration0, + name="softplus_concentration0"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters + + +@kullback_leibler.RegisterKL(Beta, Beta) +def _kl_beta_beta(d1, d2, name=None): + """Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta. + + Args: + d1: instance of a Beta distribution object. + d2: instance of a Beta distribution object. + name: (optional) Name to use for created operations. + default is "kl_beta_beta". + + Returns: + Batchwise KL(d1 || d2) + """ + def delta(fn, is_property=True): + fn1 = getattr(d1, fn) + fn2 = getattr(d2, fn) + return (fn2 - fn1) if is_property else (fn2() - fn1()) + with ops.name_scope(name, "kl_beta_beta", values=[ + d1.concentration1, + d1.concentration0, + d1.total_concentration, + d2.concentration1, + d2.concentration0, + d2.total_concentration, + ]): + return (delta("_log_normalization", is_property=False) + - math_ops.digamma(d1.concentration1) * delta("concentration1") + - math_ops.digamma(d1.concentration0) * delta("concentration0") + + (math_ops.digamma(d1.total_concentration) + * delta("total_concentration"))) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector.py new file mode 100644 index 0000000000000000000000000000000000000000..bdf3dde499db2678923bd9b0e85ed7299de295f8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector.py @@ -0,0 +1,21 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Bijector base.""" + +# go/tf-wildcard-import +# pylint: disable=wildcard-import,unused-import +from tensorflow.python.ops.distributions.bijector_impl import Bijector + +# pylint: enable=wildcard-import,unused-import diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_impl.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..99d7a5ab106578b3ea653fbbeec23c10e40ad292 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_impl.py @@ -0,0 +1,1113 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Bijector base.""" + +import abc +import collections +import contextlib +import re + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import object_identity + + +__all__ = [ + "Bijector", +] + + +class _Mapping(collections.namedtuple( + "_Mapping", ["x", "y", "ildj_map", "kwargs"])): + """Helper class to make it easier to manage caching in `Bijector`.""" + + def __new__(cls, x=None, y=None, ildj_map=None, kwargs=None): + """Custom __new__ so namedtuple items have defaults. + + Args: + x: `Tensor`. Forward. + y: `Tensor`. Inverse. + ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor` + representing the inverse log det jacobian. + kwargs: Python dictionary. Extra args supplied to + forward/inverse/etc functions. + + Returns: + mapping: New instance of _Mapping. + """ + return super(_Mapping, cls).__new__(cls, x, y, ildj_map, kwargs) + + @property + def x_key(self): + """Returns key used for caching Y=g(X).""" + return ((object_identity.Reference(self.x),) + + self._deep_tuple(tuple(sorted(self.kwargs.items())))) + + @property + def y_key(self): + """Returns key used for caching X=g^{-1}(Y).""" + return ((object_identity.Reference(self.y),) + + self._deep_tuple(tuple(sorted(self.kwargs.items())))) + + def merge(self, x=None, y=None, ildj_map=None, kwargs=None, mapping=None): + """Returns new _Mapping with args merged with self. + + Args: + x: `Tensor`. Forward. + y: `Tensor`. Inverse. + ildj_map: `Dictionary`. This is a mapping from event_ndims to a `Tensor` + representing the inverse log det jacobian. + kwargs: Python dictionary. Extra args supplied to + forward/inverse/etc functions. + mapping: Instance of _Mapping to merge. Can only be specified if no other + arg is specified. + + Returns: + mapping: New instance of `_Mapping` which has inputs merged with self. + + Raises: + ValueError: if mapping and any other arg is not `None`. + """ + if mapping is None: + mapping = _Mapping(x=x, y=y, ildj_map=ildj_map, kwargs=kwargs) + elif any(arg is not None for arg in [x, y, ildj_map, kwargs]): + raise ValueError("Cannot simultaneously specify mapping and individual " + "arguments.") + + return _Mapping( + x=self._merge(self.x, mapping.x), + y=self._merge(self.y, mapping.y), + ildj_map=self._merge_dicts(self.ildj_map, mapping.ildj_map), + kwargs=self._merge(self.kwargs, mapping.kwargs)) + + def _merge_dicts(self, old=None, new=None): + """Helper to merge two dictionaries.""" + old = {} if old is None else old + new = {} if new is None else new + for k, v in new.items(): + val = old.get(k, None) + if val is not None and val is not v: + raise ValueError("Found different value for existing key " + "(key:{} old_value:{} new_value:{}".format( + k, old[k], v)) + old[k] = v + return old + + def _merge(self, old, new): + """Helper to merge which handles merging one value.""" + if old is None: + return new + elif new is not None and old is not new: + raise ValueError("Incompatible values: %s != %s" % (old, new)) + return old + + def _deep_tuple(self, x): + """Converts lists of lists to tuples of tuples.""" + return (tuple(map(self._deep_tuple, x)) + if isinstance(x, (list, tuple)) else x) + + +class Bijector(metaclass=abc.ABCMeta): + r"""Interface for transformations of a `Distribution` sample. + + Bijectors can be used to represent any differentiable and injective + (one to one) function defined on an open subset of `R^n`. Some non-injective + transformations are also supported (see "Non Injective Transforms" below). + + #### Mathematical Details + + A `Bijector` implements a [smooth covering map]( + https://en.wikipedia.org/wiki/Local_diffeomorphism), i.e., a local + diffeomorphism such that every point in the target has a neighborhood evenly + covered by a map ([see also]( + https://en.wikipedia.org/wiki/Covering_space#Covering_of_a_manifold)). + A `Bijector` is used by `TransformedDistribution` but can be generally used + for transforming a `Distribution` generated `Tensor`. A `Bijector` is + characterized by three operations: + + 1. Forward + + Useful for turning one random outcome into another random outcome from a + different distribution. + + 2. Inverse + + Useful for "reversing" a transformation to compute one probability in + terms of another. + + 3. `log_det_jacobian(x)` + + "The log of the absolute value of the determinant of the matrix of all + first-order partial derivatives of the inverse function." + + Useful for inverting a transformation to compute one probability in terms + of another. Geometrically, the Jacobian determinant is the volume of the + transformation and is used to scale the probability. + + We take the absolute value of the determinant before log to avoid NaN + values. Geometrically, a negative determinant corresponds to an + orientation-reversing transformation. It is ok for us to discard the sign + of the determinant because we only integrate everywhere-nonnegative + functions (probability densities) and the correct orientation is always the + one that produces a nonnegative integrand. + + By convention, transformations of random variables are named in terms of the + forward transformation. The forward transformation creates samples, the + inverse is useful for computing probabilities. + + #### Example Uses + + - Basic properties: + + ```python + x = ... # A tensor. + # Evaluate forward transformation. + fwd_x = my_bijector.forward(x) + x == my_bijector.inverse(fwd_x) + x != my_bijector.forward(fwd_x) # Not equal because x != g(g(x)). + ``` + + - Computing a log-likelihood: + + ```python + def transformed_log_prob(bijector, log_prob, x): + return (bijector.inverse_log_det_jacobian(x, event_ndims=0) + + log_prob(bijector.inverse(x))) + ``` + + - Transforming a random outcome: + + ```python + def transformed_sample(bijector, x): + return bijector.forward(x) + ``` + + #### Example Bijectors + + - "Exponential" + + ```none + Y = g(X) = exp(X) + X ~ Normal(0, 1) # Univariate. + ``` + + Implies: + + ```none + g^{-1}(Y) = log(Y) + |Jacobian(g^{-1})(y)| = 1 / y + Y ~ LogNormal(0, 1), i.e., + prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y)) + = (1 / y) Normal(log(y); 0, 1) + ``` + + Here is an example of how one might implement the `Exp` bijector: + + ```python + class Exp(Bijector): + + def __init__(self, validate_args=False, name="exp"): + super(Exp, self).__init__( + validate_args=validate_args, + forward_min_event_ndims=0, + name=name) + + def _forward(self, x): + return math_ops.exp(x) + + def _inverse(self, y): + return math_ops.log(y) + + def _inverse_log_det_jacobian(self, y): + return -self._forward_log_det_jacobian(self._inverse(y)) + + def _forward_log_det_jacobian(self, x): + # Notice that we needn't do any reducing, even when`event_ndims > 0`. + # The base Bijector class will handle reducing for us; it knows how + # to do so because we called `super` `__init__` with + # `forward_min_event_ndims = 0`. + return x + ``` + + - "Affine" + + ```none + Y = g(X) = sqrtSigma * X + mu + X ~ MultivariateNormal(0, I_d) + ``` + + Implies: + + ```none + g^{-1}(Y) = inv(sqrtSigma) * (Y - mu) + |Jacobian(g^{-1})(y)| = det(inv(sqrtSigma)) + Y ~ MultivariateNormal(mu, sqrtSigma) , i.e., + prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y)) + = det(sqrtSigma)^(-d) * + MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d) + ``` + + #### Min_event_ndims and Naming + + Bijectors are named for the dimensionality of data they act on (i.e. without + broadcasting). We can think of bijectors having an intrinsic `min_event_ndims` + , which is the minimum number of dimensions for the bijector act on. For + instance, a Cholesky decomposition requires a matrix, and hence + `min_event_ndims=2`. + + Some examples: + + `AffineScalar: min_event_ndims=0` + `Affine: min_event_ndims=1` + `Cholesky: min_event_ndims=2` + `Exp: min_event_ndims=0` + `Sigmoid: min_event_ndims=0` + `SoftmaxCentered: min_event_ndims=1` + + Note the difference between `Affine` and `AffineScalar`. `AffineScalar` + operates on scalar events, whereas `Affine` operates on vector-valued events. + + More generally, there is a `forward_min_event_ndims` and an + `inverse_min_event_ndims`. In most cases, these will be the same. + However, for some shape changing bijectors, these will be different + (e.g. a bijector which pads an extra dimension at the end, might have + `forward_min_event_ndims=0` and `inverse_min_event_ndims=1`. + + + #### Jacobian Determinant + + The Jacobian determinant is a reduction over `event_ndims - min_event_ndims` + (`forward_min_event_ndims` for `forward_log_det_jacobian` and + `inverse_min_event_ndims` for `inverse_log_det_jacobian`). + To see this, consider the `Exp` `Bijector` applied to a `Tensor` which has + sample, batch, and event (S, B, E) shape semantics. Suppose the `Tensor`'s + partitioned-shape is `(S=[4], B=[2], E=[3, 3])`. The shape of the `Tensor` + returned by `forward` and `inverse` is unchanged, i.e., `[4, 2, 3, 3]`. + However the shape returned by `inverse_log_det_jacobian` is `[4, 2]` because + the Jacobian determinant is a reduction over the event dimensions. + + Another example is the `Affine` `Bijector`. Because `min_event_ndims = 1`, the + Jacobian determinant reduction is over `event_ndims - 1`. + + It is sometimes useful to implement the inverse Jacobian determinant as the + negative forward Jacobian determinant. For example, + + ```python + def _inverse_log_det_jacobian(self, y): + return -self._forward_log_det_jac(self._inverse(y)) # Note negation. + ``` + + The correctness of this approach can be seen from the following claim. + + - Claim: + + Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero + for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then: + + ```none + (log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X) + ``` + + - Proof: + + From the bijective, nonzero differentiability of `g`, the + [inverse function theorem]( + https://en.wikipedia.org/wiki/Inverse_function_theorem) + implies `g^{-1}` is differentiable in the image of `g`. + Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields + `I = g'(g^{-1}(y))*g^{-1}'(y)`. + The same theorem also implies `g^{-1}'` is non-singular therefore: + `inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`. + The claim follows from [properties of determinant]( + https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups). + + Generally its preferable to directly implement the inverse Jacobian + determinant. This should have superior numerical stability and will often + share subgraphs with the `_inverse` implementation. + + #### Is_constant_jacobian + + Certain bijectors will have constant jacobian matrices. For instance, the + `Affine` bijector encodes multiplication by a matrix plus a shift, with + jacobian matrix, the same aforementioned matrix. + + `is_constant_jacobian` encodes the fact that the jacobian matrix is constant. + The semantics of this argument are the following: + + * Repeated calls to "log_det_jacobian" functions with the same + `event_ndims` (but not necessarily same input), will return the first + computed jacobian (because the matrix is constant, and hence is input + independent). + * `log_det_jacobian` implementations are merely broadcastable to the true + `log_det_jacobian` (because, again, the jacobian matrix is input + independent). Specifically, `log_det_jacobian` is implemented as the + log jacobian determinant for a single input. + + ```python + class Identity(Bijector): + + def __init__(self, validate_args=False, name="identity"): + super(Identity, self).__init__( + is_constant_jacobian=True, + validate_args=validate_args, + forward_min_event_ndims=0, + name=name) + + def _forward(self, x): + return x + + def _inverse(self, y): + return y + + def _inverse_log_det_jacobian(self, y): + return -self._forward_log_det_jacobian(self._inverse(y)) + + def _forward_log_det_jacobian(self, x): + # The full log jacobian determinant would be array_ops.zero_like(x). + # However, we circumvent materializing that, since the jacobian + # calculation is input independent, and we specify it for one input. + return constant_op.constant(0., x.dtype.base_dtype) + + ``` + + #### Subclass Requirements + + - Subclasses typically implement: + + - `_forward`, + - `_inverse`, + - `_inverse_log_det_jacobian`, + - `_forward_log_det_jacobian` (optional). + + The `_forward_log_det_jacobian` is called when the bijector is inverted via + the `Invert` bijector. If undefined, a slightly less efficiently + calculation, `-1 * _inverse_log_det_jacobian`, is used. + + If the bijector changes the shape of the input, you must also implement: + + - _forward_event_shape_tensor, + - _forward_event_shape (optional), + - _inverse_event_shape_tensor, + - _inverse_event_shape (optional). + + By default the event-shape is assumed unchanged from input. + + - If the `Bijector`'s use is limited to `TransformedDistribution` (or friends + like `QuantizedDistribution`) then depending on your use, you may not need + to implement all of `_forward` and `_inverse` functions. + + Examples: + + 1. Sampling (e.g., `sample`) only requires `_forward`. + 2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require + `_inverse` (and related). + 3. Only calling probability functions on the output of `sample` means + `_inverse` can be implemented as a cache lookup. + + See "Example Uses" [above] which shows how these functions are used to + transform a distribution. (Note: `_forward` could theoretically be + implemented as a cache lookup but this would require controlling the + underlying sample generation mechanism.) + + #### Non Injective Transforms + + **WARNING** Handing of non-injective transforms is subject to change. + + Non injective maps `g` are supported, provided their domain `D` can be + partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that, + ignoring sets of measure zero, the restriction of `g` to each subset is a + differentiable bijection onto `g(D)`. In particular, this implies that for + `y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always + contains exactly `k` distinct points. + + The property, `_is_injective` is set to `False` to indicate that the bijector + is not injective, yet satisfies the above condition. + + The usual bijector API is modified in the case `_is_injective is False` (see + method docstrings for specifics). Here we show by example the `AbsoluteValue` + bijector. In this case, the domain `D = (-inf, inf)`, can be partitioned + into `D1 = (-inf, 0)`, `D2 = {0}`, and `D3 = (0, inf)`. Let `gi` be the + restriction of `g` to `Di`, then both `g1` and `g3` are bijections onto + `(0, inf)`, with `g1^{-1}(y) = -y`, and `g3^{-1}(y) = y`. We will use + `g1` and `g3` to define bijector methods over `D1` and `D3`. `D2 = {0}` is + an oddball in that `g2` is one to one, and the derivative is not well defined. + Fortunately, when considering transformations of probability densities + (e.g. in `TransformedDistribution`), sets of measure zero have no effect in + theory, and only a small effect in 32 or 64 bit precision. For that reason, + we define `inverse(0)` and `inverse_log_det_jacobian(0)` both as `[0, 0]`, + which is convenient and results in a left-semicontinuous pdf. + + + ```python + abs = tfp.distributions.bijectors.AbsoluteValue() + + abs.forward(-1.) + ==> 1. + + abs.forward(1.) + ==> 1. + + abs.inverse(1.) + ==> (-1., 1.) + + # The |dX/dY| is constant, == 1. So Log|dX/dY| == 0. + abs.inverse_log_det_jacobian(1., event_ndims=0) + ==> (0., 0.) + + # Special case handling of 0. + abs.inverse(0.) + ==> (0., 0.) + + abs.inverse_log_det_jacobian(0., event_ndims=0) + ==> (0., 0.) + ``` + + """ + + @abc.abstractmethod + def __init__(self, + graph_parents=None, + is_constant_jacobian=False, + validate_args=False, + dtype=None, + forward_min_event_ndims=None, + inverse_min_event_ndims=None, + name=None): + """Constructs Bijector. + + A `Bijector` transforms random variables into new random variables. + + Examples: + + ```python + # Create the Y = g(X) = X transform. + identity = Identity() + + # Create the Y = g(X) = exp(X) transform. + exp = Exp() + ``` + + See `Bijector` subclass docstring for more details and specific examples. + + Args: + graph_parents: Python list of graph prerequisites of this `Bijector`. + is_constant_jacobian: Python `bool` indicating that the Jacobian matrix is + not a function of the input. + validate_args: Python `bool`, default `False`. Whether to validate input + with asserts. If `validate_args` is `False`, and the inputs are invalid, + correct behavior is not guaranteed. + dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not + enforced. + forward_min_event_ndims: Python `integer` indicating the minimum number of + dimensions `forward` operates on. + inverse_min_event_ndims: Python `integer` indicating the minimum number of + dimensions `inverse` operates on. Will be set to + `forward_min_event_ndims` by default, if no value is provided. + name: The name to give Ops created by the initializer. + + Raises: + ValueError: If neither `forward_min_event_ndims` and + `inverse_min_event_ndims` are specified, or if either of them is + negative. + ValueError: If a member of `graph_parents` is not a `Tensor`. + """ + self._graph_parents = graph_parents or [] + + if forward_min_event_ndims is None and inverse_min_event_ndims is None: + raise ValueError("Must specify at least one of `forward_min_event_ndims` " + "and `inverse_min_event_ndims`.") + elif inverse_min_event_ndims is None: + inverse_min_event_ndims = forward_min_event_ndims + elif forward_min_event_ndims is None: + forward_min_event_ndims = inverse_min_event_ndims + + if not isinstance(forward_min_event_ndims, int): + raise TypeError("Expected forward_min_event_ndims to be of " + "type int, got {}".format( + type(forward_min_event_ndims).__name__)) + + if not isinstance(inverse_min_event_ndims, int): + raise TypeError("Expected inverse_min_event_ndims to be of " + "type int, got {}".format( + type(inverse_min_event_ndims).__name__)) + + if forward_min_event_ndims < 0: + raise ValueError("forward_min_event_ndims must be a non-negative " + "integer.") + if inverse_min_event_ndims < 0: + raise ValueError("inverse_min_event_ndims must be a non-negative " + "integer.") + + self._forward_min_event_ndims = forward_min_event_ndims + self._inverse_min_event_ndims = inverse_min_event_ndims + self._is_constant_jacobian = is_constant_jacobian + self._constant_ildj_map = {} + self._validate_args = validate_args + self._dtype = dtype + # These dicts can only be accessed using _Mapping.x_key or _Mapping.y_key + self._from_y = {} + self._from_x = {} + if name: + self._name = name + else: + # We want the default convention to be snake_case rather than CamelCase + # since `Chain` uses bijector.name as the kwargs dictionary key. + def camel_to_snake(name): + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + self._name = camel_to_snake(type(self).__name__.lstrip("_")) + + for i, t in enumerate(self._graph_parents): + if t is None or not tensor_util.is_tf_type(t): + raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) + + @property + def graph_parents(self): + """Returns this `Bijector`'s graph_parents as a Python list.""" + return self._graph_parents + + @property + def forward_min_event_ndims(self): + """Returns the minimal number of dimensions bijector.forward operates on.""" + return self._forward_min_event_ndims + + @property + def inverse_min_event_ndims(self): + """Returns the minimal number of dimensions bijector.inverse operates on.""" + return self._inverse_min_event_ndims + + @property + def is_constant_jacobian(self): + """Returns true iff the Jacobian matrix is not a function of x. + + Note: Jacobian matrix is either constant for both forward and inverse or + neither. + + Returns: + is_constant_jacobian: Python `bool`. + """ + return self._is_constant_jacobian + + @property + def _is_injective(self): + """Returns true iff the forward map `g` is injective (one-to-one function). + + **WARNING** This hidden property and its behavior are subject to change. + + Note: Non-injective maps `g` are supported, provided their domain `D` can + be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that, + ignoring sets of measure zero, the restriction of `g` to each subset is a + differentiable bijection onto `g(D)`. + + Returns: + is_injective: Python `bool`. + """ + return True + + @property + def validate_args(self): + """Returns True if Tensor arguments will be validated.""" + return self._validate_args + + @property + def dtype(self): + """dtype of `Tensor`s transformable by this distribution.""" + return self._dtype + + @property + def name(self): + """Returns the string name of this `Bijector`.""" + return self._name + + def _forward_event_shape_tensor(self, input_shape): + """Subclass implementation for `forward_event_shape_tensor` function.""" + # By default, we assume event_shape is unchanged. + return input_shape + + def forward_event_shape_tensor(self, + input_shape, + name="forward_event_shape_tensor"): + """Shape of a single sample from a single batch as an `int32` 1D `Tensor`. + + Args: + input_shape: `Tensor`, `int32` vector indicating event-portion shape + passed into `forward` function. + name: name to give to the op + + Returns: + forward_event_shape_tensor: `Tensor`, `int32` vector indicating + event-portion shape after applying `forward`. + """ + with self._name_scope(name, [input_shape]): + input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, + name="input_shape") + return self._forward_event_shape_tensor(input_shape) + + def _forward_event_shape(self, input_shape): + """Subclass implementation for `forward_event_shape` public function.""" + # By default, we assume event_shape is unchanged. + return input_shape + + def forward_event_shape(self, input_shape): + """Shape of a single sample from a single batch as a `TensorShape`. + + Same meaning as `forward_event_shape_tensor`. May be only partially defined. + + Args: + input_shape: `TensorShape` indicating event-portion shape passed into + `forward` function. + + Returns: + forward_event_shape_tensor: `TensorShape` indicating event-portion shape + after applying `forward`. Possibly unknown. + """ + return self._forward_event_shape(tensor_shape.TensorShape(input_shape)) + + def _inverse_event_shape_tensor(self, output_shape): + """Subclass implementation for `inverse_event_shape_tensor` function.""" + # By default, we assume event_shape is unchanged. + return output_shape + + def inverse_event_shape_tensor(self, + output_shape, + name="inverse_event_shape_tensor"): + """Shape of a single sample from a single batch as an `int32` 1D `Tensor`. + + Args: + output_shape: `Tensor`, `int32` vector indicating event-portion shape + passed into `inverse` function. + name: name to give to the op + + Returns: + inverse_event_shape_tensor: `Tensor`, `int32` vector indicating + event-portion shape after applying `inverse`. + """ + with self._name_scope(name, [output_shape]): + output_shape = ops.convert_to_tensor(output_shape, dtype=dtypes.int32, + name="output_shape") + return self._inverse_event_shape_tensor(output_shape) + + def _inverse_event_shape(self, output_shape): + """Subclass implementation for `inverse_event_shape` public function.""" + # By default, we assume event_shape is unchanged. + return tensor_shape.TensorShape(output_shape) + + def inverse_event_shape(self, output_shape): + """Shape of a single sample from a single batch as a `TensorShape`. + + Same meaning as `inverse_event_shape_tensor`. May be only partially defined. + + Args: + output_shape: `TensorShape` indicating event-portion shape passed into + `inverse` function. + + Returns: + inverse_event_shape_tensor: `TensorShape` indicating event-portion shape + after applying `inverse`. Possibly unknown. + """ + return self._inverse_event_shape(output_shape) + + def _forward(self, x): + """Subclass implementation for `forward` public function.""" + raise NotImplementedError("forward not implemented.") + + def _call_forward(self, x, name, **kwargs): + with self._name_scope(name, [x]): + x = ops.convert_to_tensor(x, name="x") + self._maybe_assert_dtype(x) + if not self._is_injective: # No caching for non-injective + return self._forward(x, **kwargs) + mapping = self._lookup(x=x, kwargs=kwargs) + if mapping.y is not None: + return mapping.y + mapping = mapping.merge(y=self._forward(x, **kwargs)) + self._cache(mapping) + return mapping.y + + def forward(self, x, name="forward"): + """Returns the forward `Bijector` evaluation, i.e., X = g(Y). + + Args: + x: `Tensor`. The input to the "forward" evaluation. + name: The name to give this op. + + Returns: + `Tensor`. + + Raises: + TypeError: if `self.dtype` is specified and `x.dtype` is not + `self.dtype`. + NotImplementedError: if `_forward` is not implemented. + """ + return self._call_forward(x, name) + + def _inverse(self, y): + """Subclass implementation for `inverse` public function.""" + raise NotImplementedError("inverse not implemented") + + def _call_inverse(self, y, name, **kwargs): + with self._name_scope(name, [y]): + y = ops.convert_to_tensor(y, name="y") + self._maybe_assert_dtype(y) + if not self._is_injective: # No caching for non-injective + return self._inverse(y, **kwargs) + mapping = self._lookup(y=y, kwargs=kwargs) + if mapping.x is not None: + return mapping.x + mapping = mapping.merge(x=self._inverse(y, **kwargs)) + self._cache(mapping) + return mapping.x + + def inverse(self, y, name="inverse"): + """Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y). + + Args: + y: `Tensor`. The input to the "inverse" evaluation. + name: The name to give this op. + + Returns: + `Tensor`, if this bijector is injective. + If not injective, returns the k-tuple containing the unique + `k` points `(x1, ..., xk)` such that `g(xi) = y`. + + Raises: + TypeError: if `self.dtype` is specified and `y.dtype` is not + `self.dtype`. + NotImplementedError: if `_inverse` is not implemented. + """ + return self._call_inverse(y, name) + + def _inverse_log_det_jacobian(self, y): + """Subclass implementation of `inverse_log_det_jacobian` public function. + + In particular, this method differs from the public function, in that it + does not take `event_ndims`. Thus, this implements the minimal Jacobian + determinant calculation (i.e. over `inverse_min_event_ndims`). + + Args: + y: `Tensor`. The input to the "inverse_log_det_jacobian" evaluation. + Returns: + inverse_log_det_jacobian: `Tensor`, if this bijector is injective. + If not injective, returns the k-tuple containing jacobians for the + unique `k` points `(x1, ..., xk)` such that `g(xi) = y`. + """ + raise NotImplementedError("inverse_log_det_jacobian not implemented.") + + def _call_inverse_log_det_jacobian(self, y, event_ndims, name, **kwargs): + with self._name_scope(name, [y]): + if event_ndims in self._constant_ildj_map: + return self._constant_ildj_map[event_ndims] + y = ops.convert_to_tensor(y, name="y") + self._maybe_assert_dtype(y) + with ops.control_dependencies(self._check_valid_event_ndims( + min_event_ndims=self.inverse_min_event_ndims, + event_ndims=event_ndims)): + if not self._is_injective: # No caching for non-injective + try: + ildjs = self._inverse_log_det_jacobian(y, **kwargs) + return tuple(self._reduce_jacobian_det_over_event( + y, ildj, self.inverse_min_event_ndims, event_ndims) + for ildj in ildjs) + except NotImplementedError as original_exception: + try: + x = self._inverse(y, **kwargs) + fldjs = self._forward_log_det_jacobian(x, **kwargs) + return tuple(self._reduce_jacobian_det_over_event( + x, -fldj, self.forward_min_event_ndims, event_ndims) + for fldj in fldjs) + except NotImplementedError: + raise original_exception + + mapping = self._lookup(y=y, kwargs=kwargs) + if mapping.ildj_map is not None and event_ndims in mapping.ildj_map: + return mapping.ildj_map[event_ndims] + try: + x = None # Not needed; leave cache as is. + ildj = self._inverse_log_det_jacobian(y, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + y, ildj, self.inverse_min_event_ndims, event_ndims) + except NotImplementedError as original_exception: + try: + x = (mapping.x if mapping.x is not None + else self._inverse(y, **kwargs)) + ildj = -self._forward_log_det_jacobian(x, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + x, ildj, self.forward_min_event_ndims, event_ndims) + except NotImplementedError: + raise original_exception + + mapping = mapping.merge(x=x, ildj_map={event_ndims: ildj}) + self._cache(mapping) + if self.is_constant_jacobian: + self._constant_ildj_map[event_ndims] = ildj + return ildj + + def inverse_log_det_jacobian( + self, y, event_ndims, name="inverse_log_det_jacobian"): + """Returns the (log o det o Jacobian o inverse)(y). + + Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.) + + Note that `forward_log_det_jacobian` is the negative of this function, + evaluated at `g^{-1}(y)`. + + Args: + y: `Tensor`. The input to the "inverse" Jacobian determinant evaluation. + event_ndims: Number of dimensions in the probabilistic events being + transformed. Must be greater than or equal to + `self.inverse_min_event_ndims`. The result is summed over the final + dimensions to produce a scalar Jacobian determinant for each event, + i.e. it has shape `y.shape.ndims - event_ndims` dimensions. + name: The name to give this op. + + Returns: + `Tensor`, if this bijector is injective. + If not injective, returns the tuple of local log det + Jacobians, `log(det(Dg_i^{-1}(y)))`, where `g_i` is the restriction + of `g` to the `ith` partition `Di`. + + Raises: + TypeError: if `self.dtype` is specified and `y.dtype` is not + `self.dtype`. + NotImplementedError: if `_inverse_log_det_jacobian` is not implemented. + """ + return self._call_inverse_log_det_jacobian(y, event_ndims, name) + + def _forward_log_det_jacobian(self, x): + """Subclass implementation of `forward_log_det_jacobian` public function. + + In particular, this method differs from the public function, in that it + does not take `event_ndims`. Thus, this implements the minimal Jacobian + determinant calculation (i.e. over `forward_min_event_ndims`). + + Args: + x: `Tensor`. The input to the "forward_log_det_jacobian" evaluation. + + Returns: + forward_log_det_jacobian: `Tensor`, if this bijector is injective. + If not injective, returns the k-tuple containing jacobians for the + unique `k` points `(x1, ..., xk)` such that `g(xi) = y`. + """ + + raise NotImplementedError( + "forward_log_det_jacobian not implemented.") + + def _call_forward_log_det_jacobian(self, x, event_ndims, name, **kwargs): + if not self._is_injective: + raise NotImplementedError( + "forward_log_det_jacobian cannot be implemented for non-injective " + "transforms.") + with self._name_scope(name, [x]): + with ops.control_dependencies(self._check_valid_event_ndims( + min_event_ndims=self.forward_min_event_ndims, + event_ndims=event_ndims)): + if event_ndims in self._constant_ildj_map: + # Need "-1. *" to avoid invalid-unary-operand-type linter warning. + return -1. * self._constant_ildj_map[event_ndims] + x = ops.convert_to_tensor(x, name="x") + self._maybe_assert_dtype(x) + if not self._is_injective: # No caching for non-injective + try: + fldjs = self._forward_log_det_jacobian(x, **kwargs) # No caching. + return tuple(self._reduce_jacobian_det_over_event( + x, fldj, self.forward_min_event_ndims, event_ndims) + for fldj in fldjs) + except NotImplementedError as original_exception: + try: + y = self._forward(x, **kwargs) + ildjs = self._inverse_log_det_jacobian(y, **kwargs) + return tuple(self._reduce_jacobian_det_over_event( + y, -ildj, self.inverse_min_event_ndims, event_ndims) + for ildj in ildjs) + except NotImplementedError: + raise original_exception + mapping = self._lookup(x=x, kwargs=kwargs) + if mapping.ildj_map is not None and event_ndims in mapping.ildj_map: + return -mapping.ildj_map[event_ndims] + try: + y = None # Not needed; leave cache as is. + ildj = -self._forward_log_det_jacobian(x, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + x, ildj, self.forward_min_event_ndims, event_ndims) + except NotImplementedError as original_exception: + try: + y = (mapping.y if mapping.y is not None + else self._forward(x, **kwargs)) + ildj = self._inverse_log_det_jacobian(y, **kwargs) + ildj = self._reduce_jacobian_det_over_event( + y, ildj, self.inverse_min_event_ndims, event_ndims) + except NotImplementedError: + raise original_exception + mapping = mapping.merge(y=y, ildj_map={event_ndims: ildj}) + self._cache(mapping) + if self.is_constant_jacobian: + self._constant_ildj_map[event_ndims] = ildj + return -ildj + + def forward_log_det_jacobian( + self, x, event_ndims, name="forward_log_det_jacobian"): + """Returns both the forward_log_det_jacobian. + + Args: + x: `Tensor`. The input to the "forward" Jacobian determinant evaluation. + event_ndims: Number of dimensions in the probabilistic events being + transformed. Must be greater than or equal to + `self.forward_min_event_ndims`. The result is summed over the final + dimensions to produce a scalar Jacobian determinant for each event, + i.e. it has shape `x.shape.ndims - event_ndims` dimensions. + name: The name to give this op. + + Returns: + `Tensor`, if this bijector is injective. + If not injective this is not implemented. + + Raises: + TypeError: if `self.dtype` is specified and `y.dtype` is not + `self.dtype`. + NotImplementedError: if neither `_forward_log_det_jacobian` + nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or + this is a non-injective bijector. + """ + return self._call_forward_log_det_jacobian(x, event_ndims, name) + + @contextlib.contextmanager + def _name_scope(self, name=None, values=None): + """Helper function to standardize op scope.""" + with ops.name_scope(self.name): + with ops.name_scope( + name, values=(values or []) + self.graph_parents) as scope: + yield scope + + def _maybe_assert_dtype(self, x): + """Helper to check dtype when self.dtype is known.""" + if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype: + raise TypeError("Input had dtype %s but expected %s." % + (self.dtype, x.dtype)) + + def _cache(self, mapping): + """Helper which stores mapping info in forward/inverse dicts.""" + # Merging from lookup is an added check that we're not overwriting anything + # which is not None. + mapping = mapping.merge(mapping=self._lookup( + mapping.x, mapping.y, mapping.kwargs)) + if mapping.x is None and mapping.y is None: + raise ValueError("Caching expects at least one of (x,y) to be known, " + "i.e., not None.") + self._from_x[mapping.x_key] = mapping + self._from_y[mapping.y_key] = mapping + + def _lookup(self, x=None, y=None, kwargs=None): + """Helper which retrieves mapping info from forward/inverse dicts.""" + mapping = _Mapping(x=x, y=y, kwargs=kwargs) + # Since _cache requires both x,y to be set, we only need to do one cache + # lookup since the mapping is always in both or neither. + if mapping.x is not None: + return self._from_x.get(mapping.x_key, mapping) + if mapping.y is not None: + return self._from_y.get(mapping.y_key, mapping) + return mapping + + def _reduce_jacobian_det_over_event( + self, y, ildj, min_event_ndims, event_ndims): + """Reduce jacobian over event_ndims - min_event_ndims.""" + # In this case, we need to tile the Jacobian over the event and reduce. + y_rank = array_ops.rank(y) + y_shape = array_ops.shape(y)[ + y_rank - event_ndims : y_rank - min_event_ndims] + + ones = array_ops.ones(y_shape, ildj.dtype) + reduced_ildj = math_ops.reduce_sum( + ones * ildj, + axis=self._get_event_reduce_dims(min_event_ndims, event_ndims)) + # The multiplication by ones can change the inferred static shape so we try + # to recover as much as possible. + event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) + if (event_ndims_ is not None and + y.shape.ndims is not None and + ildj.shape.ndims is not None): + y_shape = y.shape[y.shape.ndims - event_ndims_ : + y.shape.ndims - min_event_ndims] + broadcast_shape = array_ops.broadcast_static_shape(ildj.shape, y_shape) + reduced_ildj.set_shape( + broadcast_shape[: broadcast_shape.ndims - ( + event_ndims_ - min_event_ndims)]) + + return reduced_ildj + + def _get_event_reduce_dims(self, min_event_ndims, event_ndims): + """Compute the reduction dimensions given event_ndims.""" + event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) + + if event_ndims_ is not None: + return [-index for index in range(1, event_ndims_ - min_event_ndims + 1)] + else: + reduce_ndims = event_ndims - min_event_ndims + return math_ops.range(-reduce_ndims, 0) + + def _check_valid_event_ndims(self, min_event_ndims, event_ndims): + """Check whether event_ndims is at least min_event_ndims.""" + event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims") + event_ndims_ = tensor_util.constant_value(event_ndims) + assertions = [] + + if not event_ndims.dtype.is_integer: + raise ValueError("Expected integer dtype, got dtype {}".format( + event_ndims.dtype)) + + if event_ndims_ is not None: + if event_ndims.shape.ndims != 0: + raise ValueError("Expected scalar event_ndims, got shape {}".format( + event_ndims.shape)) + if min_event_ndims > event_ndims_: + raise ValueError("event_ndims ({}) must be larger than " + "min_event_ndims ({})".format( + event_ndims_, min_event_ndims)) + elif self.validate_args: + assertions += [ + check_ops.assert_greater_equal(event_ndims, min_event_ndims)] + + if event_ndims.shape.is_fully_defined(): + if event_ndims.shape.ndims != 0: + raise ValueError("Expected scalar shape, got ndims {}".format( + event_ndims.shape.ndims)) + + elif self.validate_args: + assertions += [ + check_ops.assert_rank(event_ndims, 0, message="Expected scalar.")] + return assertions + + def _maybe_get_static_event_ndims(self, event_ndims): + """Helper which returns tries to return an integer static value.""" + event_ndims_ = distribution_util.maybe_get_static_value(event_ndims) + + if isinstance(event_ndims_, (np.generic, np.ndarray)): + if event_ndims_.dtype not in (np.int32, np.int64): + raise ValueError("Expected integer dtype, got dtype {}".format( + event_ndims_.dtype)) + + if isinstance(event_ndims_, np.ndarray) and len(event_ndims_.shape): + raise ValueError("Expected a scalar integer, got {}".format( + event_ndims_)) + event_ndims_ = int(event_ndims_) + + return event_ndims_ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_test_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..48433c568e60205ab4bb4625986baa14ee98af1c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/bijector_test_util.py @@ -0,0 +1,221 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Bijector unit-test utilities.""" + +import numpy as np + +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import uniform as uniform_lib + + +def assert_finite(array): + if not np.isfinite(array).all(): + raise AssertionError("array was not all finite. %s" % array[:15]) + + +def assert_strictly_increasing(array): + np.testing.assert_array_less(0., np.diff(array)) + + +def assert_strictly_decreasing(array): + np.testing.assert_array_less(np.diff(array), 0.) + + +def assert_strictly_monotonic(array): + if array[0] < array[-1]: + assert_strictly_increasing(array) + else: + assert_strictly_decreasing(array) + + +def assert_scalar_congruency(bijector, + lower_x, + upper_x, + n=int(10e3), + rtol=0.01, + sess=None): + """Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent. + + We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the + `bijector` in order to check that: + + 1. the forward is strictly monotonic. + 2. the forward/inverse methods are inverses of each other. + 3. the jacobian is the correct change of measure. + + This can only be used for a Bijector mapping open subsets of the real line + to themselves. This is due to the fact that this test compares the `prob` + before/after transformation with the Lebesgue measure on the line. + + Args: + bijector: Instance of Bijector + lower_x: Python scalar. + upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in + the domain of the `bijector`. The `bijector` should probably not produce + huge variation in values in the interval `(lower_x, upper_x)`, or else + the variance based check of the Jacobian will require small `rtol` or + huge `n`. + n: Number of samples to draw for the checks. + rtol: Positive number. Used for the Jacobian check. + sess: `tf.compat.v1.Session`. Defaults to the default session. + + Raises: + AssertionError: If tests fail. + """ + # Checks and defaults. + if sess is None: + sess = ops.get_default_session() + + # Should be monotonic over this interval + ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32) + if bijector.dtype is not None: + ten_x_pts = ten_x_pts.astype(bijector.dtype.as_numpy_dtype) + forward_on_10_pts = bijector.forward(ten_x_pts) + + # Set the lower/upper limits in the range of the bijector. + lower_y, upper_y = sess.run( + [bijector.forward(lower_x), bijector.forward(upper_x)]) + if upper_y < lower_y: # If bijector.forward is a decreasing function. + lower_y, upper_y = upper_y, lower_y + + # Uniform samples from the domain, range. + uniform_x_samps = uniform_lib.Uniform( + low=lower_x, high=upper_x).sample(n, seed=0) + uniform_y_samps = uniform_lib.Uniform( + low=lower_y, high=upper_y).sample(n, seed=1) + + # These compositions should be the identity. + inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps)) + forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps)) + + # For a < b, and transformation y = y(x), + # (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy + # "change_measure_dy_dx" below is a Monte Carlo approximation to the right + # hand side, which should then be close to the left, which is (b - a). + # We assume event_ndims=0 because we assume scalar -> scalar. The log_det + # methods will handle whether they expect event_ndims > 0. + dy_dx = math_ops.exp(bijector.inverse_log_det_jacobian( + uniform_y_samps, event_ndims=0)) + # E[|dx/dy|] under Uniform[lower_y, upper_y] + # = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure + expectation_of_dy_dx_under_uniform = math_ops.reduce_mean(dy_dx) + # dy = dP(u) * (upper_y - lower_y) + change_measure_dy_dx = ( + (upper_y - lower_y) * expectation_of_dy_dx_under_uniform) + + # We'll also check that dy_dx = 1 / dx_dy. + dx_dy = math_ops.exp( + bijector.forward_log_det_jacobian( + bijector.inverse(uniform_y_samps), event_ndims=0)) + + [ + forward_on_10_pts_v, + dy_dx_v, + dx_dy_v, + change_measure_dy_dx_v, + uniform_x_samps_v, + uniform_y_samps_v, + inverse_forward_x_v, + forward_inverse_y_v, + ] = sess.run([ + forward_on_10_pts, + dy_dx, + dx_dy, + change_measure_dy_dx, + uniform_x_samps, + uniform_y_samps, + inverse_forward_x, + forward_inverse_y, + ]) + + assert_strictly_monotonic(forward_on_10_pts_v) + # Composition of forward/inverse should be the identity. + np.testing.assert_allclose( + inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3) + np.testing.assert_allclose( + forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3) + # Change of measure should be correct. + np.testing.assert_allclose( + upper_x - lower_x, change_measure_dy_dx_v, atol=0, rtol=rtol) + # Inverse Jacobian should be equivalent to the reciprocal of the forward + # Jacobian. + np.testing.assert_allclose( + dy_dx_v, np.divide(1., dx_dy_v), atol=1e-5, rtol=1e-3) + + +def assert_bijective_and_finite( + bijector, x, y, event_ndims, atol=0, rtol=1e-5, sess=None): + """Assert that forward/inverse (along with jacobians) are inverses and finite. + + It is recommended to use x and y values that are very very close to the edge + of the Bijector's domain. + + Args: + bijector: A Bijector instance. + x: np.array of values in the domain of bijector.forward. + y: np.array of values in the domain of bijector.inverse. + event_ndims: Integer describing the number of event dimensions this bijector + operates on. + atol: Absolute tolerance. + rtol: Relative tolerance. + sess: TensorFlow session. Defaults to the default session. + + Raises: + AssertionError: If tests fail. + """ + sess = sess or ops.get_default_session() + + # These are the incoming points, but people often create a crazy range of + # values for which these end up being bad, especially in 16bit. + assert_finite(x) + assert_finite(y) + + f_x = bijector.forward(x) + g_y = bijector.inverse(y) + + [ + x_from_x, + y_from_y, + ildj_f_x, + fldj_x, + ildj_y, + fldj_g_y, + f_x_v, + g_y_v, + ] = sess.run([ + bijector.inverse(f_x), + bijector.forward(g_y), + bijector.inverse_log_det_jacobian(f_x, event_ndims=event_ndims), + bijector.forward_log_det_jacobian(x, event_ndims=event_ndims), + bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims), + bijector.forward_log_det_jacobian(g_y, event_ndims=event_ndims), + f_x, + g_y, + ]) + + assert_finite(x_from_x) + assert_finite(y_from_y) + assert_finite(ildj_f_x) + assert_finite(fldj_x) + assert_finite(ildj_y) + assert_finite(fldj_g_y) + assert_finite(f_x_v) + assert_finite(g_y_v) + + np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol) + np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol) + np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol) + np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/categorical.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..2054271c71ce81200674a005841d0fb5bc7dd789 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/categorical.py @@ -0,0 +1,345 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Categorical distribution class.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +def _broadcast_cat_event_and_params(event, params, base_dtype): + """Broadcasts the event or distribution parameters.""" + if event.dtype.is_integer: + pass + elif event.dtype.is_floating: + # When `validate_args=True` we've already ensured int/float casting + # is closed. + event = math_ops.cast(event, dtype=dtypes.int32) + else: + raise TypeError("`value` should have integer `dtype` or " + "`self.dtype` ({})".format(base_dtype)) + shape_known_statically = ( + params.shape.ndims is not None and + params.shape[:-1].is_fully_defined() and + event.shape.is_fully_defined()) + if not shape_known_statically or params.shape[:-1] != event.shape: + params *= array_ops.ones_like(event[..., array_ops.newaxis], + dtype=params.dtype) + params_shape = array_ops.shape(params)[:-1] + event *= array_ops.ones(params_shape, dtype=event.dtype) + if params.shape.ndims is not None: + event.set_shape(tensor_shape.TensorShape(params.shape[:-1])) + + return event, params + + +@tf_export(v1=["distributions.Categorical"]) +class Categorical(distribution.Distribution): + """Categorical distribution. + + The Categorical distribution is parameterized by either probabilities or + log-probabilities of a set of `K` classes. It is defined over the integers + `{0, 1, ..., K}`. + + The Categorical distribution is closely related to the `OneHotCategorical` and + `Multinomial` distributions. The Categorical distribution can be intuited as + generating samples according to `argmax{ OneHotCategorical(probs) }` itself + being identical to `argmax{ Multinomial(probs, total_count=1) }`. + + #### Mathematical Details + + The probability mass function (pmf) is, + + ```none + pmf(k; pi) = prod_j pi_j**[k == j] + ``` + + #### Pitfalls + + The number of classes, `K`, must not exceed: + - the largest integer representable by `self.dtype`, i.e., + `2**(mantissa_bits+1)` (IEEE 754), + - the maximum `Tensor` index, i.e., `2**31-1`. + + In other words, + + ```python + K <= min(2**31-1, { + tf.float16: 2**11, + tf.float32: 2**24, + tf.float64: 2**53 }[param.dtype]) + ``` + + Note: This condition is validated only when `self.validate_args = True`. + + #### Examples + + Creates a 3-class distribution with the 2nd class being most likely. + + ```python + dist = Categorical(probs=[0.1, 0.5, 0.4]) + n = 1e4 + empirical_prob = tf.cast( + tf.histogram_fixed_width( + dist.sample(int(n)), + [0., 2], + nbins=3), + dtype=tf.float32) / n + # ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32) + ``` + + Creates a 3-class distribution with the 2nd class being most likely. + Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than + probabilities. + + ```python + dist = Categorical(logits=np.log([0.1, 0.5, 0.4]) + n = 1e4 + empirical_prob = tf.cast( + tf.histogram_fixed_width( + dist.sample(int(n)), + [0., 2], + nbins=3), + dtype=tf.float32) / n + # ==> array([0.1045, 0.5047, 0.3908], dtype=float32) + ``` + + Creates a 3-class distribution with the 3rd class being most likely. + The distribution functions can be evaluated on counts. + + ```python + # counts is a scalar. + p = [0.1, 0.4, 0.5] + dist = Categorical(probs=p) + dist.prob(0) # Shape [] + + # p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts. + counts = [1, 0] + dist.prob(counts) # Shape [2] + + # p will be broadcast to shape [3, 5, 7, 3] to match counts. + counts = [[...]] # Shape [5, 7, 3] + dist.prob(counts) # Shape [5, 7, 3] + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__( + self, + logits=None, + probs=None, + dtype=dtypes.int32, + validate_args=False, + allow_nan_stats=True, + name="Categorical"): + """Initialize Categorical distributions using class log-probabilities. + + Args: + logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities + of a set of Categorical distributions. The first `N - 1` dimensions + index into a batch of independent distributions and the last dimension + represents a vector of logits for each class. Only one of `logits` or + `probs` should be passed in. + probs: An N-D `Tensor`, `N >= 1`, representing the probabilities + of a set of Categorical distributions. The first `N - 1` dimensions + index into a batch of independent distributions and the last dimension + represents a vector of probabilities for each class. Only one of + `logits` or `probs` should be passed in. + dtype: The type of the event samples (default: int32). + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[logits, probs]) as name: + self._logits, self._probs = distribution_util.get_logits_and_probs( + logits=logits, + probs=probs, + validate_args=validate_args, + multidimensional=True, + name=name) + + if validate_args: + self._logits = distribution_util.embed_check_categorical_event_shape( + self._logits) + + logits_shape_static = self._logits.get_shape().with_rank_at_least(1) + if logits_shape_static.ndims is not None: + self._batch_rank = ops.convert_to_tensor( + logits_shape_static.ndims - 1, + dtype=dtypes.int32, + name="batch_rank") + else: + with ops.name_scope(name="batch_rank"): + self._batch_rank = array_ops.rank(self._logits) - 1 + + logits_shape = array_ops.shape(self._logits, name="logits_shape") + if tensor_shape.dimension_value(logits_shape_static[-1]) is not None: + self._event_size = ops.convert_to_tensor( + logits_shape_static.dims[-1].value, + dtype=dtypes.int32, + name="event_size") + else: + with ops.name_scope(name="event_size"): + self._event_size = logits_shape[self._batch_rank] + + if logits_shape_static[:-1].is_fully_defined(): + self._batch_shape_val = constant_op.constant( + logits_shape_static[:-1].as_list(), + dtype=dtypes.int32, + name="batch_shape") + else: + with ops.name_scope(name="batch_shape"): + self._batch_shape_val = logits_shape[:-1] + super(Categorical, self).__init__( + dtype=dtype, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._logits, + self._probs], + name=name) + + @property + def event_size(self): + """Scalar `int32` tensor: the number of classes.""" + return self._event_size + + @property + def logits(self): + """Vector of coordinatewise logits.""" + return self._logits + + @property + def probs(self): + """Vector of coordinatewise probabilities.""" + return self._probs + + def _batch_shape_tensor(self): + return array_ops.identity(self._batch_shape_val) + + def _batch_shape(self): + return self.logits.get_shape()[:-1] + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + if self.logits.get_shape().ndims == 2: + logits_2d = self.logits + else: + logits_2d = array_ops.reshape(self.logits, [-1, self.event_size]) + sample_dtype = dtypes.int64 if self.dtype.size > 4 else dtypes.int32 + draws = random_ops.multinomial( + logits_2d, n, seed=seed, output_dtype=sample_dtype) + draws = array_ops.reshape( + array_ops.transpose(draws), + array_ops.concat([[n], self.batch_shape_tensor()], 0)) + return math_ops.cast(draws, self.dtype) + + def _cdf(self, k): + k = ops.convert_to_tensor(k, name="k") + if self.validate_args: + k = distribution_util.embed_check_integer_casting_closed( + k, target_dtype=dtypes.int32) + + k, probs = _broadcast_cat_event_and_params( + k, self.probs, base_dtype=self.dtype.base_dtype) + + # batch-flatten everything in order to use `sequence_mask()`. + batch_flattened_probs = array_ops.reshape(probs, + (-1, self._event_size)) + batch_flattened_k = array_ops.reshape(k, [-1]) + + to_sum_over = array_ops.where( + array_ops.sequence_mask(batch_flattened_k, self._event_size), + batch_flattened_probs, + array_ops.zeros_like(batch_flattened_probs)) + batch_flattened_cdf = math_ops.reduce_sum(to_sum_over, axis=-1) + # Reshape back to the shape of the argument. + return array_ops.reshape(batch_flattened_cdf, array_ops.shape(k)) + + def _log_prob(self, k): + k = ops.convert_to_tensor(k, name="k") + if self.validate_args: + k = distribution_util.embed_check_integer_casting_closed( + k, target_dtype=dtypes.int32) + k, logits = _broadcast_cat_event_and_params( + k, self.logits, base_dtype=self.dtype.base_dtype) + + # pylint: disable=invalid-unary-operand-type + return -nn_ops.sparse_softmax_cross_entropy_with_logits( + labels=k, + logits=logits) + + def _entropy(self): + return -math_ops.reduce_sum( + nn_ops.log_softmax(self.logits) * self.probs, axis=-1) + + def _mode(self): + ret = math_ops.argmax(self.logits, axis=self._batch_rank) + ret = math_ops.cast(ret, self.dtype) + ret.set_shape(self.batch_shape) + return ret + + +@kullback_leibler.RegisterKL(Categorical, Categorical) +def _kl_categorical_categorical(a, b, name=None): + """Calculate the batched KL divergence KL(a || b) with a and b Categorical. + + Args: + a: instance of a Categorical distribution object. + b: instance of a Categorical distribution object. + name: (optional) Name to use for created operations. + default is "kl_categorical_categorical". + + Returns: + Batchwise KL(a || b) + """ + with ops.name_scope(name, "kl_categorical_categorical", + values=[a.logits, b.logits]): + # sum(probs log(probs / (1 - probs))) + delta_log_probs1 = (nn_ops.log_softmax(a.logits) - + nn_ops.log_softmax(b.logits)) + return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1, + axis=-1) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet.py new file mode 100644 index 0000000000000000000000000000000000000000..cac99f8e0c071fd539fca7d22bd118c83a9ad5d2 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet.py @@ -0,0 +1,410 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Dirichlet distribution class.""" + +import numpy as np + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Dirichlet", +] + + +_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with +dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e., +`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with +`self.batch_shape() + self.event_shape()`.""" + + +@tf_export(v1=["distributions.Dirichlet"]) +class Dirichlet(distribution.Distribution): + """Dirichlet distribution. + + The Dirichlet distribution is defined over the + [`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive, + length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the + Beta distribution when `k = 2`. + + #### Mathematical Details + + The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e., + + ```none + S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }. + ``` + + The probability density function (pdf) is, + + ```none + pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z + Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j) + ``` + + where: + + * `x in S^{k-1}`, i.e., the `(k-1)`-simplex, + * `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`, + * `Z` is the normalization constant aka the [multivariate beta function]( + https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function), + and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The `concentration` represents mean total counts of class occurrence, i.e., + + ```none + concentration = alpha = mean * total_concentration + ``` + + where `mean` in `S^{k-1}` and `total_concentration` is a positive real number + representing a mean total count. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + Warning: Some components of the samples can be zero due to finite precision. + This happens more often when some of the concentrations are very small. + Make sure to round the samples to `np.finfo(dtype).tiny` before computing the + density. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Create a single trivariate Dirichlet, with the 3rd class being three times + # more frequent than the first. I.e., batch_shape=[], event_shape=[3]. + alpha = [1., 2, 3] + dist = tfd.Dirichlet(alpha) + + dist.sample([4, 5]) # shape: [4, 5, 3] + + # x has one sample, one batch, three classes: + x = [.2, .3, .5] # shape: [3] + dist.prob(x) # shape: [] + + # x has two samples from one batch: + x = [[.1, .4, .5], + [.2, .3, .5]] + dist.prob(x) # shape: [2] + + # alpha will be broadcast to shape [5, 7, 3] to match x. + x = [[...]] # shape: [5, 7, 3] + dist.prob(x) # shape: [5, 7] + ``` + + ```python + # Create batch_shape=[2], event_shape=[3]: + alpha = [[1., 2, 3], + [4, 5, 6]] # shape: [2, 3] + dist = tfd.Dirichlet(alpha) + + dist.sample([4, 5]) # shape: [4, 5, 2, 3] + + x = [.2, .3, .5] + # x will be broadcast as [[.2, .3, .5], + # [.2, .3, .5]], + # thus matching batch_shape [2, 3]. + dist.prob(x) # shape: [2] + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + alpha = tf.constant([1.0, 2.0, 3.0]) + dist = tfd.Dirichlet(alpha) + samples = dist.sample(5) # Shape [5, 3] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, alpha) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + concentration, + validate_args=False, + allow_nan_stats=True, + name="Dirichlet"): + """Initialize a batch of Dirichlet distributions. + + Args: + concentration: Positive floating-point `Tensor` indicating mean number + of class occurrences; aka "alpha". Implies `self.dtype`, and + `self.batch_shape`, `self.event_shape`, i.e., if + `concentration.shape = [N1, N2, ..., Nm, k]` then + `batch_shape = [N1, N2, ..., Nm]` and + `event_shape = [k]`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration]) as name: + self._concentration = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration, name="concentration"), + validate_args) + self._total_concentration = math_ops.reduce_sum(self._concentration, -1) + super(Dirichlet, self).__init__( + dtype=self._concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._concentration, + self._total_concentration], + name=name) + + @property + def concentration(self): + """Concentration parameter; expected counts for that coordinate.""" + return self._concentration + + @property + def total_concentration(self): + """Sum of last dim of concentration parameter.""" + return self._total_concentration + + def _batch_shape_tensor(self): + return array_ops.shape(self.total_concentration) + + def _batch_shape(self): + return self.total_concentration.get_shape() + + def _event_shape_tensor(self): + return array_ops.shape(self.concentration)[-1:] + + def _event_shape(self): + return self.concentration.get_shape().with_rank_at_least(1)[-1:] + + def _sample_n(self, n, seed=None): + gamma_sample = random_ops.random_gamma( + shape=[n], + alpha=self.concentration, + dtype=self.dtype, + seed=seed) + return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keepdims=True) + + @distribution_util.AppendDocstring(_dirichlet_sample_note) + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + @distribution_util.AppendDocstring(_dirichlet_sample_note) + def _prob(self, x): + return math_ops.exp(self._log_prob(x)) + + def _log_unnormalized_prob(self, x): + x = self._maybe_assert_valid_sample(x) + return math_ops.reduce_sum(math_ops.xlogy(self.concentration - 1., x), -1) + + def _log_normalization(self): + return special_math_ops.lbeta(self.concentration) + + def _entropy(self): + k = math_ops.cast(self.event_shape_tensor()[0], self.dtype) + return ( + self._log_normalization() + + ((self.total_concentration - k) + * math_ops.digamma(self.total_concentration)) + - math_ops.reduce_sum( + (self.concentration - 1.) * math_ops.digamma(self.concentration), + axis=-1)) + + def _mean(self): + return self.concentration / self.total_concentration[..., array_ops.newaxis] + + def _covariance(self): + x = self._variance_scale_term() * self._mean() + # pylint: disable=invalid-unary-operand-type + return array_ops.matrix_set_diag( + -math_ops.matmul( + x[..., array_ops.newaxis], + x[..., array_ops.newaxis, :]), # outer prod + self._variance()) + + def _variance(self): + scale = self._variance_scale_term() + x = scale * self._mean() + return x * (scale - x) + + def _variance_scale_term(self): + """Helper to `_covariance` and `_variance` which computes a shared scale.""" + return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis]) + + @distribution_util.AppendDocstring( + """Note: The mode is undefined when any `concentration <= 1`. If + `self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If + `self.allow_nan_stats` is `False` an exception is raised when one or more + modes are undefined.""") + def _mode(self): + k = math_ops.cast(self.event_shape_tensor()[0], self.dtype) + mode = (self.concentration - 1.) / ( + self.total_concentration[..., array_ops.newaxis] - k) + if self.allow_nan_stats: + nan = array_ops.fill( + array_ops.shape(mode), + np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), + name="nan") + return array_ops.where_v2( + math_ops.reduce_all(self.concentration > 1., axis=-1), mode, nan) + return control_flow_ops.with_dependencies([ + check_ops.assert_less( + array_ops.ones([], self.dtype), + self.concentration, + message="Mode undefined when any concentration <= 1"), + ], mode) + + def _maybe_assert_valid_concentration(self, concentration, validate_args): + """Checks the validity of the concentration parameter.""" + if not validate_args: + return concentration + return control_flow_ops.with_dependencies([ + check_ops.assert_positive( + concentration, + message="Concentration parameter must be positive."), + check_ops.assert_rank_at_least( + concentration, 1, + message="Concentration parameter must have >=1 dimensions."), + check_ops.assert_less( + 1, array_ops.shape(concentration)[-1], + message="Concentration parameter must have event_size >= 2."), + ], concentration) + + def _maybe_assert_valid_sample(self, x): + """Checks the validity of a sample.""" + if not self.validate_args: + return x + return control_flow_ops.with_dependencies([ + check_ops.assert_positive(x, message="samples must be positive"), + check_ops.assert_near( + array_ops.ones([], dtype=self.dtype), + math_ops.reduce_sum(x, -1), + message="sample last-dimension must sum to `1`"), + ], x) + + +@kullback_leibler.RegisterKL(Dirichlet, Dirichlet) +def _kl_dirichlet_dirichlet(d1, d2, name=None): + """Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. + + Args: + d1: instance of a Dirichlet distribution object. + d2: instance of a Dirichlet distribution object. + name: (optional) Name to use for created operations. + default is "kl_dirichlet_dirichlet". + + Returns: + Batchwise KL(d1 || d2) + """ + with ops.name_scope(name, "kl_dirichlet_dirichlet", values=[ + d1.concentration, d2.concentration]): + # The KL between Dirichlet distributions can be derived as follows. We have + # + # Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)] + # + # where B(a) is the multivariate Beta function: + # + # B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n]) + # + # The KL is + # + # KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))} + # + # so we'll need to know the log density of the Dirichlet. This is + # + # log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a) + # + # The only term that matters for the expectations is the log(x[i]). To + # compute the expectation of this term over the Dirichlet density, we can + # use the following facts about the Dirichlet in exponential family form: + # 1. log(x[i]) is a sufficient statistic + # 2. expected sufficient statistics (of any exp family distribution) are + # equal to derivatives of the log normalizer with respect to + # corresponding natural parameters: E{T[i](x)} = dA/d(eta[i]) + # + # To proceed, we can rewrite the Dirichlet density in exponential family + # form as follows: + # + # Dir(x; a) = exp{eta(a) . T(x) - A(a)} + # + # where '.' is the dot product of vectors eta and T, and A is a scalar: + # + # eta[i](a) = a[i] - 1 + # T[i](x) = log(x[i]) + # A(a) = log B(a) + # + # Now, we can use fact (2) above to write + # + # E_Dir(x; a)[log(x[i])] + # = dA(a) / da[i] + # = d/da[i] log B(a) + # = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j]) + # = digamma(a[i])) - digamma(sum_j a[j]) + # + # Putting it all together, we have + # + # KL[Dir(x; a) || Dir(x; b)] + # = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)} + # = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b)) + # = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b) + # = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))] + # - lbeta(a) + lbeta(b)) + + digamma_sum_d1 = math_ops.digamma( + math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True)) + digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1 + concentration_diff = d1.concentration - d2.concentration + + return (math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) - + special_math_ops.lbeta(d1.concentration) + + special_math_ops.lbeta(d2.concentration)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet_multinomial.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet_multinomial.py new file mode 100644 index 0000000000000000000000000000000000000000..947801cf1bf66dc0b24adbfa706fc3b7d3db0e17 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/dirichlet_multinomial.py @@ -0,0 +1,353 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The DirichletMultinomial distribution class.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "DirichletMultinomial", +] + + +_dirichlet_multinomial_sample_note = """For each batch of counts, +`value = [n_0, ..., n_{K-1}]`, `P[value]` is the probability that after +sampling `self.total_count` draws from this Dirichlet-Multinomial distribution, +the number of draws falling in class `j` is `n_j`. Since this definition is +[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables); +different sequences have the same counts so the probability includes a +combinatorial coefficient. + +Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no +fractional components, and such that +`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable +with `self.concentration` and `self.total_count`.""" + + +@tf_export(v1=["distributions.DirichletMultinomial"]) +class DirichletMultinomial(distribution.Distribution): + """Dirichlet-Multinomial compound distribution. + + The Dirichlet-Multinomial distribution is parameterized by a (batch of) + length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of + trials, i.e., the number of trials per draw from the DirichletMultinomial. It + is defined over a (batch of) length-`K` vector `counts` such that + `tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is + identically the Beta-Binomial distribution when `K = 2`. + + #### Mathematical Details + + The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a + length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`. + + The probability mass function (pmf) is, + + ```none + pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z + Z = Beta(alpha) / N! + ``` + + where: + + * `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`, + * `total_count = N`, `N` a positive integer, + * `N!` is `N` factorial, and, + * `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the + [multivariate beta function]( + https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function), + and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + Dirichlet-Multinomial is a [compound distribution]( + https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its + samples are generated as follows. + + 1. Choose class probabilities: + `probs = [p_0,...,p_{K-1}] ~ Dir(concentration)` + 2. Draw integers: + `counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)` + + The last `concentration` dimension parametrizes a single Dirichlet-Multinomial + distribution. When calling distribution functions (e.g., `dist.prob(counts)`), + `concentration`, `total_count` and `counts` are broadcast to the same shape. + The last dimension of `counts` corresponds single Dirichlet-Multinomial + distributions. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + #### Pitfalls + + The number of classes, `K`, must not exceed: + - the largest integer representable by `self.dtype`, i.e., + `2**(mantissa_bits+1)` (IEE754), + - the maximum `Tensor` index, i.e., `2**31-1`. + + In other words, + + ```python + K <= min(2**31-1, { + tf.float16: 2**11, + tf.float32: 2**24, + tf.float64: 2**53 }[param.dtype]) + ``` + + Note: This condition is validated only when `self.validate_args = True`. + + #### Examples + + ```python + alpha = [1., 2., 3.] + n = 2. + dist = DirichletMultinomial(n, alpha) + ``` + + Creates a 3-class distribution, with the 3rd class is most likely to be + drawn. + The distribution functions can be evaluated on counts. + + ```python + # counts same shape as alpha. + counts = [0., 0., 2.] + dist.prob(counts) # Shape [] + + # alpha will be broadcast to [[1., 2., 3.], [1., 2., 3.]] to match counts. + counts = [[1., 1., 0.], [1., 0., 1.]] + dist.prob(counts) # Shape [2] + + # alpha will be broadcast to shape [5, 7, 3] to match counts. + counts = [[...]] # Shape [5, 7, 3] + dist.prob(counts) # Shape [5, 7] + ``` + + Creates a 2-batch of 3-class distributions. + + ```python + alpha = [[1., 2., 3.], [4., 5., 6.]] # Shape [2, 3] + n = [3., 3.] + dist = DirichletMultinomial(n, alpha) + + # counts will be broadcast to [[2., 1., 0.], [2., 1., 0.]] to match alpha. + counts = [2., 1., 0.] + dist.prob(counts) # Shape [2] + ``` + + """ + + # TODO(b/27419586) Change docstring for dtype of concentration once int + # allowed. + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + total_count, + concentration, + validate_args=False, + allow_nan_stats=True, + name="DirichletMultinomial"): + """Initialize a batch of DirichletMultinomial distributions. + + Args: + total_count: Non-negative floating point tensor, whose dtype is the same + as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with + `m >= 0`. Defines this as a batch of `N1 x ... x Nm` different + Dirichlet multinomial distributions. Its components should be equal to + integer values. + concentration: Positive floating point tensor, whose dtype is the + same as `n` with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`. + Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet + multinomial distributions. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[total_count, concentration]) as name: + # Broadcasting works because: + # * The broadcasting convention is to prepend dimensions of size [1], and + # we use the last dimension for the distribution, whereas + # the batch dimensions are the leading dimensions, which forces the + # distribution dimension to be defined explicitly (i.e. it cannot be + # created automatically by prepending). This forces enough explicitness. + # * All calls involving `counts` eventually require a broadcast between + # `counts` and concentration. + self._total_count = ops.convert_to_tensor(total_count, name="total_count") + if validate_args: + self._total_count = ( + distribution_util.embed_check_nonnegative_integer_form( + self._total_count)) + self._concentration = self._maybe_assert_valid_concentration( + ops.convert_to_tensor(concentration, + name="concentration"), + validate_args) + self._total_concentration = math_ops.reduce_sum(self._concentration, -1) + super(DirichletMultinomial, self).__init__( + dtype=self._concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._total_count, + self._concentration], + name=name) + + @property + def total_count(self): + """Number of trials used to construct a sample.""" + return self._total_count + + @property + def concentration(self): + """Concentration parameter; expected prior counts for that coordinate.""" + return self._concentration + + @property + def total_concentration(self): + """Sum of last dim of concentration parameter.""" + return self._total_concentration + + def _batch_shape_tensor(self): + return array_ops.shape(self.total_concentration) + + def _batch_shape(self): + return self.total_concentration.get_shape() + + def _event_shape_tensor(self): + return array_ops.shape(self.concentration)[-1:] + + def _event_shape(self): + # Event shape depends only on total_concentration, not "n". + return self.concentration.get_shape().with_rank_at_least(1)[-1:] + + def _sample_n(self, n, seed=None): + n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) + k = self.event_shape_tensor()[0] + unnormalized_logits = array_ops.reshape( + math_ops.log(random_ops.random_gamma( + shape=[n], + alpha=self.concentration, + dtype=self.dtype, + seed=seed)), + shape=[-1, k]) + draws = random_ops.multinomial( + logits=unnormalized_logits, + num_samples=n_draws, + seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial")) + x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2) + final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) + x = array_ops.reshape(x, final_shape) + return math_ops.cast(x, self.dtype) + + @distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note) + def _log_prob(self, counts): + counts = self._maybe_assert_valid_sample(counts) + ordered_prob = ( + special_math_ops.lbeta(self.concentration + counts) + - special_math_ops.lbeta(self.concentration)) + return ordered_prob + distribution_util.log_combinations( + self.total_count, counts) + + @distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note) + def _prob(self, counts): + return math_ops.exp(self._log_prob(counts)) + + def _mean(self): + return self.total_count * (self.concentration / + self.total_concentration[..., array_ops.newaxis]) + + @distribution_util.AppendDocstring( + """The covariance for each batch member is defined as the following: + + ```none + Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) * + (n + alpha_0) / (1 + alpha_0) + ``` + + where `concentration = alpha` and + `total_concentration = alpha_0 = sum_j alpha_j`. + + The covariance between elements in a batch is defined as: + + ```none + Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 * + (n + alpha_0) / (1 + alpha_0) + ``` + """) + def _covariance(self): + x = self._variance_scale_term() * self._mean() + # pylint: disable=invalid-unary-operand-type + return array_ops.matrix_set_diag( + -math_ops.matmul( + x[..., array_ops.newaxis], + x[..., array_ops.newaxis, :]), # outer prod + self._variance()) + + def _variance(self): + scale = self._variance_scale_term() + x = scale * self._mean() + return x * (self.total_count * scale - x) + + def _variance_scale_term(self): + """Helper to `_covariance` and `_variance` which computes a shared scale.""" + # We must take care to expand back the last dim whenever we use the + # total_concentration. + c0 = self.total_concentration[..., array_ops.newaxis] + return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0)) + + def _maybe_assert_valid_concentration(self, concentration, validate_args): + """Checks the validity of the concentration parameter.""" + if not validate_args: + return concentration + concentration = distribution_util.embed_check_categorical_event_shape( + concentration) + return control_flow_ops.with_dependencies([ + check_ops.assert_positive( + concentration, + message="Concentration parameter must be positive."), + ], concentration) + + def _maybe_assert_valid_sample(self, counts): + """Check counts for proper shape, values, then return tensor version.""" + if not self.validate_args: + return counts + counts = distribution_util.embed_check_nonnegative_integer_form(counts) + return control_flow_ops.with_dependencies([ + check_ops.assert_equal( + self.total_count, math_ops.reduce_sum(counts, -1), + message="counts last-dimension must sum to `self.total_count`"), + ], counts) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..09d9e2a507d7ded2170735b72088c59ae15ab8fa --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distribution.py @@ -0,0 +1,1316 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base classes for probability distributions.""" + +import abc +import contextlib +import types + +import numpy as np + +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util +from tensorflow.python.util import deprecation +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "ReparameterizationType", + "FULLY_REPARAMETERIZED", + "NOT_REPARAMETERIZED", + "Distribution", +] + +_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [ + "batch_shape", + "batch_shape_tensor", + "cdf", + "covariance", + "cross_entropy", + "entropy", + "event_shape", + "event_shape_tensor", + "kl_divergence", + "log_cdf", + "log_prob", + "log_survival_function", + "mean", + "mode", + "prob", + "sample", + "stddev", + "survival_function", + "variance", +] + + +class _BaseDistribution(metaclass=abc.ABCMeta): + """Abstract base class needed for resolving subclass hierarchy.""" + pass + + +def _copy_fn(fn): + """Create a deep copy of fn. + + Args: + fn: a callable + + Returns: + A `FunctionType`: a deep copy of fn. + + Raises: + TypeError: if `fn` is not a callable. + """ + if not callable(fn): + raise TypeError("fn is not callable: %s" % fn) + # The blessed way to copy a function. copy.deepcopy fails to create a + # non-reference copy. Since: + # types.FunctionType == type(lambda: None), + # and the docstring for the function type states: + # + # function(code, globals[, name[, argdefs[, closure]]]) + # + # Create a function object from a code object and a dictionary. + # ... + # + # Here we can use this to create a new function with the old function's + # code, globals, closure, etc. + return types.FunctionType( + code=fn.__code__, globals=fn.__globals__, + name=fn.__name__, argdefs=fn.__defaults__, + closure=fn.__closure__) + + +def _update_docstring(old_str, append_str): + """Update old_str by inserting append_str just before the "Args:" section.""" + old_str = old_str or "" + old_str_lines = old_str.split("\n") + + # Step 0: Prepend spaces to all lines of append_str. This is + # necessary for correct markdown generation. + append_str = "\n".join(" %s" % line for line in append_str.split("\n")) + + # Step 1: Find mention of "Args": + has_args_ix = [ + ix for ix, line in enumerate(old_str_lines) + if line.strip().lower() == "args:"] + if has_args_ix: + final_args_ix = has_args_ix[-1] + return ("\n".join(old_str_lines[:final_args_ix]) + + "\n\n" + append_str + "\n\n" + + "\n".join(old_str_lines[final_args_ix:])) + else: + return old_str + "\n\n" + append_str + + +def _convert_to_tensor(value, name=None, preferred_dtype=None): + """Converts to tensor avoiding an eager bug that loses float precision.""" + # TODO(b/116672045): Remove this function. + if (context.executing_eagerly() and preferred_dtype is not None and + (preferred_dtype.is_integer or preferred_dtype.is_bool)): + v = ops.convert_to_tensor(value, name=name) + if v.dtype.is_floating: + return v + return ops.convert_to_tensor( + value, name=name, preferred_dtype=preferred_dtype) + + +class _DistributionMeta(abc.ABCMeta): + + def __new__(mcs, classname, baseclasses, attrs): + """Control the creation of subclasses of the Distribution class. + + The main purpose of this method is to properly propagate docstrings + from private Distribution methods, like `_log_prob`, into their + public wrappers as inherited by the Distribution base class + (e.g. `log_prob`). + + Args: + classname: The name of the subclass being created. + baseclasses: A tuple of parent classes. + attrs: A dict mapping new attributes to their values. + + Returns: + The class object. + + Raises: + TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or + the new class is derived via multiple inheritance and the first + parent class is not a subclass of `BaseDistribution`. + AttributeError: If `Distribution` does not implement e.g. `log_prob`. + ValueError: If a `Distribution` public method lacks a docstring. + """ + if not baseclasses: # Nothing to be done for Distribution + raise TypeError("Expected non-empty baseclass. Does Distribution " + "not subclass _BaseDistribution?") + which_base = [ + base for base in baseclasses + if base == _BaseDistribution or issubclass(base, Distribution)] + base = which_base[0] + if base == _BaseDistribution: # Nothing to be done for Distribution + return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) + if not issubclass(base, Distribution): + raise TypeError("First parent class declared for %s must be " + "Distribution, but saw '%s'" % (classname, base.__name__)) + for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS: + special_attr = "_%s" % attr + class_attr_value = attrs.get(attr, None) + if attr in attrs: + # The method is being overridden, do not update its docstring + continue + base_attr_value = getattr(base, attr, None) + if not base_attr_value: + raise AttributeError( + "Internal error: expected base class '%s' to implement method '%s'" + % (base.__name__, attr)) + class_special_attr_value = attrs.get(special_attr, None) + if class_special_attr_value is None: + # No _special method available, no need to update the docstring. + continue + class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value) + if not class_special_attr_docstring: + # No docstring to append. + continue + class_attr_value = _copy_fn(base_attr_value) + class_attr_docstring = tf_inspect.getdoc(base_attr_value) + if class_attr_docstring is None: + raise ValueError( + "Expected base class fn to contain a docstring: %s.%s" + % (base.__name__, attr)) + class_attr_value.__doc__ = _update_docstring( + class_attr_value.__doc__, + ("Additional documentation from `%s`:\n\n%s" + % (classname, class_special_attr_docstring))) + attrs[attr] = class_attr_value + + return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) + + +@tf_export(v1=["distributions.ReparameterizationType"]) +class ReparameterizationType: + """Instances of this class represent how sampling is reparameterized. + + Two static instances exist in the distributions library, signifying + one of two possible properties for samples from a distribution: + + `FULLY_REPARAMETERIZED`: Samples from the distribution are fully + reparameterized, and straight-through gradients are supported. + + `NOT_REPARAMETERIZED`: Samples from the distribution are not fully + reparameterized, and straight-through gradients are either partially + unsupported or are not supported at all. In this case, for purposes of + e.g. RL or variational inference, it is generally safest to wrap the + sample results in a `stop_gradients` call and use policy + gradients / surrogate loss instead. + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, rep_type): + self._rep_type = rep_type + + def __repr__(self): + return "" % self._rep_type + + def __eq__(self, other): + """Determine if this `ReparameterizationType` is equal to another. + + Since ReparameterizationType instances are constant static global + instances, equality checks if two instances' id() values are equal. + + Args: + other: Object to compare against. + + Returns: + `self is other`. + """ + return self is other + + +# Fully reparameterized distribution: samples from a fully +# reparameterized distribution support straight-through gradients with +# respect to all parameters. +FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED") +tf_export(v1=["distributions.FULLY_REPARAMETERIZED"]).export_constant( + __name__, "FULLY_REPARAMETERIZED") + + +# Not reparameterized distribution: samples from a non- +# reparameterized distribution do not support straight-through gradients for +# at least some of the parameters. +NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED") +tf_export(v1=["distributions.NOT_REPARAMETERIZED"]).export_constant( + __name__, "NOT_REPARAMETERIZED") + + +@tf_export(v1=["distributions.Distribution"]) +class Distribution(_BaseDistribution, metaclass=_DistributionMeta): + """A generic probability distribution base class. + + `Distribution` is a base class for constructing and organizing properties + (e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian). + + #### Subclassing + + Subclasses are expected to implement a leading-underscore version of the + same-named function. The argument signature should be identical except for + the omission of `name="..."`. For example, to enable `log_prob(value, + name="log_prob")` a subclass should implement `_log_prob(value)`. + + Subclasses can append to public-level docstrings by providing + docstrings for their method specializations. For example: + + ```python + @util.AppendDocstring("Some other details.") + def _log_prob(self, value): + ... + ``` + + would add the string "Some other details." to the `log_prob` function + docstring. This is implemented as a simple decorator to avoid python + linter complaining about missing Args/Returns/Raises sections in the + partial docstrings. + + #### Broadcasting, batching, and shapes + + All distributions support batches of independent distributions of that type. + The batch shape is determined by broadcasting together the parameters. + + The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and + `log_prob` reflect this broadcasting, as does the return value of `sample` and + `sample_n`. + + `sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is + the shape of the `Tensor` returned from `sample_n`, `n` is the number of + samples, `batch_shape` defines how many independent distributions there are, + and `event_shape` defines the shape of samples from each of those independent + distributions. Samples are independent along the `batch_shape` dimensions, but + not necessarily so along the `event_shape` dimensions (depending on the + particulars of the underlying distribution). + + Using the `Uniform` distribution as an example: + + ```python + minval = 3.0 + maxval = [[4.0, 6.0], + [10.0, 12.0]] + + # Broadcasting: + # This instance represents 4 Uniform distributions. Each has a lower bound at + # 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape. + u = Uniform(minval, maxval) + + # `event_shape` is `TensorShape([])`. + event_shape = u.event_shape + # `event_shape_t` is a `Tensor` which will evaluate to []. + event_shape_t = u.event_shape_tensor() + + # Sampling returns a sample per distribution. `samples` has shape + # [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5, + # batch_shape=[2, 2], and event_shape=[]. + samples = u.sample_n(5) + + # The broadcasting holds across methods. Here we use `cdf` as an example. The + # same holds for `log_cdf` and the likelihood functions. + + # `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the + # shape of the `Uniform` instance. + cum_prob_broadcast = u.cdf(4.0) + + # `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting + # occurred. + cum_prob_per_dist = u.cdf([[4.0, 5.0], + [6.0, 7.0]]) + + # INVALID as the `value` argument is not broadcastable to the distribution's + # shape. + cum_prob_invalid = u.cdf([4.0, 5.0, 6.0]) + ``` + + #### Shapes + + There are three important concepts associated with TensorFlow Distributions + shapes: + - Event shape describes the shape of a single draw from the distribution; + it may be dependent across dimensions. For scalar distributions, the event + shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is + `[5]`. + - Batch shape describes independent, not identically distributed draws, aka a + "collection" or "bunch" of distributions. + - Sample shape describes independent, identically distributed draws of batches + from the distribution family. + + The event shape and the batch shape are properties of a Distribution object, + whereas the sample shape is associated with a specific call to `sample` or + `log_prob`. + + For detailed usage examples of TensorFlow Distributions shapes, see + [this tutorial]( + https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb) + + #### Parameter values leading to undefined statistics or distributions. + + Some distributions do not have well-defined statistics for all initialization + parameter values. For example, the beta distribution is parameterized by + positive real numbers `concentration1` and `concentration0`, and does not have + well-defined mode if `concentration1 < 1` or `concentration0 < 1`. + + The user is given the option of raising an exception or returning `NaN`. + + ```python + a = tf.exp(tf.matmul(logits, weights_a)) + b = tf.exp(tf.matmul(logits, weights_b)) + + # Will raise exception if ANY batch member has a < 1 or b < 1. + dist = distributions.beta(a, b, allow_nan_stats=False) + mode = dist.mode().eval() + + # Will return NaN for batch members with either a < 1 or b < 1. + dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior + mode = dist.mode().eval() + ``` + + In all cases, an exception is raised if *invalid* parameters are passed, e.g. + + ```python + # Will raise an exception if any Op is run. + negative_a = -1.0 * a # beta distribution by definition has a > 0. + dist = distributions.beta(negative_a, b, allow_nan_stats=True) + dist.mean().eval() + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + dtype, + reparameterization_type, + validate_args, + allow_nan_stats, + parameters=None, + graph_parents=None, + name=None): + """Constructs the `Distribution`. + + **This is a private method for subclass use.** + + Args: + dtype: The type of the event samples. `None` implies no type-enforcement. + reparameterization_type: Instance of `ReparameterizationType`. + If `distributions.FULLY_REPARAMETERIZED`, this + `Distribution` can be reparameterized in terms of some standard + distribution with a function whose Jacobian is constant for the support + of the standard distribution. If `distributions.NOT_REPARAMETERIZED`, + then no such reparameterization is available. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + parameters: Python `dict` of parameters used to instantiate this + `Distribution`. + graph_parents: Python `list` of graph prerequisites of this + `Distribution`. + name: Python `str` name prefixed to Ops created by this class. Default: + subclass name. + + Raises: + ValueError: if any member of graph_parents is `None` or not a `Tensor`. + """ + graph_parents = [] if graph_parents is None else graph_parents + for i, t in enumerate(graph_parents): + if t is None or not tensor_util.is_tf_type(t): + raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) + if not name or name[-1] != "/": # `name` is not a name scope + non_unique_name = name or type(self).__name__ + with ops.name_scope(non_unique_name) as name: + pass + self._dtype = dtype + self._reparameterization_type = reparameterization_type + self._allow_nan_stats = allow_nan_stats + self._validate_args = validate_args + self._parameters = parameters or {} + self._graph_parents = graph_parents + self._name = name + + @property + def _parameters(self): + return self._parameter_dict + + @_parameters.setter + def _parameters(self, value): + """Intercept assignments to self._parameters to avoid reference cycles. + + Parameters are often created using locals(), so we need to clean out any + references to `self` before assigning it to an attribute. + + Args: + value: A dictionary of parameters to assign to the `_parameters` property. + """ + if "self" in value: + del value["self"] + self._parameter_dict = value + + @classmethod + def param_shapes(cls, sample_shape, name="DistributionParamShapes"): + """Shapes of parameters given the desired shape of a call to `sample()`. + + This is a class method that describes what key/value arguments are required + to instantiate the given `Distribution` so that a particular shape is + returned for that instance's call to `sample()`. + + Subclasses should override class method `_param_shapes`. + + Args: + sample_shape: `Tensor` or python list/tuple. Desired shape of a call to + `sample()`. + name: name to prepend ops with. + + Returns: + `dict` of parameter name to `Tensor` shapes. + """ + with ops.name_scope(name, values=[sample_shape]): + return cls._param_shapes(sample_shape) + + @classmethod + def param_static_shapes(cls, sample_shape): + """param_shapes with static (i.e. `TensorShape`) shapes. + + This is a class method that describes what key/value arguments are required + to instantiate the given `Distribution` so that a particular shape is + returned for that instance's call to `sample()`. Assumes that the sample's + shape is known statically. + + Subclasses should override class method `_param_shapes` to return + constant-valued tensors when constant values are fed. + + Args: + sample_shape: `TensorShape` or python list/tuple. Desired shape of a call + to `sample()`. + + Returns: + `dict` of parameter name to `TensorShape`. + + Raises: + ValueError: if `sample_shape` is a `TensorShape` and is not fully defined. + """ + if isinstance(sample_shape, tensor_shape.TensorShape): + if not sample_shape.is_fully_defined(): + raise ValueError("TensorShape sample_shape must be fully defined") + sample_shape = sample_shape.as_list() + + params = cls.param_shapes(sample_shape) + + static_params = {} + for name, shape in params.items(): + static_shape = tensor_util.constant_value(shape) + if static_shape is None: + raise ValueError( + "sample_shape must be a fully-defined TensorShape or list/tuple") + static_params[name] = tensor_shape.TensorShape(static_shape) + + return static_params + + @staticmethod + def _param_shapes(sample_shape): + raise NotImplementedError("_param_shapes not implemented") + + @property + def name(self): + """Name prepended to all ops created by this `Distribution`.""" + return self._name + + @property + def dtype(self): + """The `DType` of `Tensor`s handled by this `Distribution`.""" + return self._dtype + + @property + def parameters(self): + """Dictionary of parameters used to instantiate this `Distribution`.""" + # Remove "self", "__class__", or other special variables. These can appear + # if the subclass used: + # `parameters = dict(locals())`. + return {k: v for k, v in self._parameters.items() + if not k.startswith("__") and k != "self"} + + @property + def reparameterization_type(self): + """Describes how samples from the distribution are reparameterized. + + Currently this is one of the static instances + `distributions.FULLY_REPARAMETERIZED` + or `distributions.NOT_REPARAMETERIZED`. + + Returns: + An instance of `ReparameterizationType`. + """ + return self._reparameterization_type + + @property + def allow_nan_stats(self): + """Python `bool` describing behavior when a stat is undefined. + + Stats return +/- infinity when it makes sense. E.g., the variance of a + Cauchy distribution is infinity. However, sometimes the statistic is + undefined, e.g., if a distribution's pdf does not achieve a maximum within + the support of the distribution, the mode is undefined. If the mean is + undefined, then by definition the variance is undefined. E.g. the mean for + Student's T for df = 1 is undefined (no clear way to say it is either + or - + infinity), so the variance = E[(X - mean)**2] is also undefined. + + Returns: + allow_nan_stats: Python `bool`. + """ + return self._allow_nan_stats + + @property + def validate_args(self): + """Python `bool` indicating possibly expensive checks are enabled.""" + return self._validate_args + + def copy(self, **override_parameters_kwargs): + """Creates a deep copy of the distribution. + + Note: the copy distribution may continue to depend on the original + initialization arguments. + + Args: + **override_parameters_kwargs: String/value dictionary of initialization + arguments to override with new values. + + Returns: + distribution: A new instance of `type(self)` initialized from the union + of self.parameters and override_parameters_kwargs, i.e., + `dict(self.parameters, **override_parameters_kwargs)`. + """ + parameters = dict(self.parameters, **override_parameters_kwargs) + return type(self)(**parameters) + + def _batch_shape_tensor(self): + raise NotImplementedError( + "batch_shape_tensor is not implemented: {}".format(type(self).__name__)) + + def batch_shape_tensor(self, name="batch_shape_tensor"): + """Shape of a single sample from a single event index as a 1-D `Tensor`. + + The batch dimensions are indexes into independent, non-identical + parameterizations of this distribution. + + Args: + name: name to give to the op + + Returns: + batch_shape: `Tensor`. + """ + with self._name_scope(name): + if self.batch_shape.is_fully_defined(): + return ops.convert_to_tensor(self.batch_shape.as_list(), + dtype=dtypes.int32, + name="batch_shape") + return self._batch_shape_tensor() + + def _batch_shape(self): + return tensor_shape.TensorShape(None) + + @property + def batch_shape(self): + """Shape of a single sample from a single event index as a `TensorShape`. + + May be partially defined or unknown. + + The batch dimensions are indexes into independent, non-identical + parameterizations of this distribution. + + Returns: + batch_shape: `TensorShape`, possibly unknown. + """ + return tensor_shape.as_shape(self._batch_shape()) + + def _event_shape_tensor(self): + raise NotImplementedError( + "event_shape_tensor is not implemented: {}".format(type(self).__name__)) + + def event_shape_tensor(self, name="event_shape_tensor"): + """Shape of a single sample from a single batch as a 1-D int32 `Tensor`. + + Args: + name: name to give to the op + + Returns: + event_shape: `Tensor`. + """ + with self._name_scope(name): + if self.event_shape.is_fully_defined(): + return ops.convert_to_tensor(self.event_shape.as_list(), + dtype=dtypes.int32, + name="event_shape") + return self._event_shape_tensor() + + def _event_shape(self): + return tensor_shape.TensorShape(None) + + @property + def event_shape(self): + """Shape of a single sample from a single batch as a `TensorShape`. + + May be partially defined or unknown. + + Returns: + event_shape: `TensorShape`, possibly unknown. + """ + return tensor_shape.as_shape(self._event_shape()) + + def is_scalar_event(self, name="is_scalar_event"): + """Indicates that `event_shape == []`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + is_scalar_event: `bool` scalar `Tensor`. + """ + with self._name_scope(name): + return ops.convert_to_tensor( + self._is_scalar_helper(self.event_shape, self.event_shape_tensor), + name="is_scalar_event") + + def is_scalar_batch(self, name="is_scalar_batch"): + """Indicates that `batch_shape == []`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + is_scalar_batch: `bool` scalar `Tensor`. + """ + with self._name_scope(name): + return ops.convert_to_tensor( + self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), + name="is_scalar_batch") + + def _sample_n(self, n, seed=None): + raise NotImplementedError("sample_n is not implemented: {}".format( + type(self).__name__)) + + def _call_sample_n(self, sample_shape, seed, name, **kwargs): + with self._name_scope(name, values=[sample_shape]): + sample_shape = ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32, name="sample_shape") + sample_shape, n = self._expand_sample_shape_to_vector( + sample_shape, "sample_shape") + samples = self._sample_n(n, seed, **kwargs) + batch_event_shape = array_ops.shape(samples)[1:] + final_shape = array_ops.concat([sample_shape, batch_event_shape], 0) + samples = array_ops.reshape(samples, final_shape) + samples = self._set_sample_static_shape(samples, sample_shape) + return samples + + def sample(self, sample_shape=(), seed=None, name="sample"): + """Generate samples of the specified shape. + + Note that a call to `sample()` without arguments will generate a single + sample. + + Args: + sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. + seed: Python integer seed for RNG + name: name to give to the op. + + Returns: + samples: a `Tensor` with prepended dimensions `sample_shape`. + """ + return self._call_sample_n(sample_shape, seed, name) + + def _log_prob(self, value): + raise NotImplementedError("log_prob is not implemented: {}".format( + type(self).__name__)) + + def _call_log_prob(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._log_prob(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.log(self._prob(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def log_prob(self, value, name="log_prob"): + """Log probability density/mass function. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_log_prob(value, name) + + def _prob(self, value): + raise NotImplementedError("prob is not implemented: {}".format( + type(self).__name__)) + + def _call_prob(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._prob(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.exp(self._log_prob(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def prob(self, value, name="prob"): + """Probability density/mass function. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_prob(value, name) + + def _log_cdf(self, value): + raise NotImplementedError("log_cdf is not implemented: {}".format( + type(self).__name__)) + + def _call_log_cdf(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._log_cdf(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.log(self._cdf(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def log_cdf(self, value, name="log_cdf"): + """Log cumulative distribution function. + + Given random variable `X`, the cumulative distribution function `cdf` is: + + ```none + log_cdf(x) := Log[ P[X <= x] ] + ``` + + Often, a numerical approximation can be used for `log_cdf(x)` that yields + a more accurate answer than simply taking the logarithm of the `cdf` when + `x << -1`. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_log_cdf(value, name) + + def _cdf(self, value): + raise NotImplementedError("cdf is not implemented: {}".format( + type(self).__name__)) + + def _call_cdf(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._cdf(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.exp(self._log_cdf(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def cdf(self, value, name="cdf"): + """Cumulative distribution function. + + Given random variable `X`, the cumulative distribution function `cdf` is: + + ```none + cdf(x) := P[X <= x] + ``` + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_cdf(value, name) + + def _log_survival_function(self, value): + raise NotImplementedError( + "log_survival_function is not implemented: {}".format( + type(self).__name__)) + + def _call_log_survival_function(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._log_survival_function(value, **kwargs) + except NotImplementedError as original_exception: + try: + return math_ops.log1p(-self.cdf(value, **kwargs)) + except NotImplementedError: + raise original_exception + + def log_survival_function(self, value, name="log_survival_function"): + """Log survival function. + + Given random variable `X`, the survival function is defined: + + ```none + log_survival_function(x) = Log[ P[X > x] ] + = Log[ 1 - P[X <= x] ] + = Log[ 1 - cdf(x) ] + ``` + + Typically, different numerical approximations can be used for the log + survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type + `self.dtype`. + """ + return self._call_log_survival_function(value, name) + + def _survival_function(self, value): + raise NotImplementedError("survival_function is not implemented: {}".format( + type(self).__name__)) + + def _call_survival_function(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + try: + return self._survival_function(value, **kwargs) + except NotImplementedError as original_exception: + try: + return 1. - self.cdf(value, **kwargs) + except NotImplementedError: + raise original_exception + + def survival_function(self, value, name="survival_function"): + """Survival function. + + Given random variable `X`, the survival function is defined: + + ```none + survival_function(x) = P[X > x] + = 1 - P[X <= x] + = 1 - cdf(x). + ``` + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type + `self.dtype`. + """ + return self._call_survival_function(value, name) + + def _entropy(self): + raise NotImplementedError("entropy is not implemented: {}".format( + type(self).__name__)) + + def entropy(self, name="entropy"): + """Shannon entropy in nats.""" + with self._name_scope(name): + return self._entropy() + + def _mean(self): + raise NotImplementedError("mean is not implemented: {}".format( + type(self).__name__)) + + def mean(self, name="mean"): + """Mean.""" + with self._name_scope(name): + return self._mean() + + def _quantile(self, value): + raise NotImplementedError("quantile is not implemented: {}".format( + type(self).__name__)) + + def _call_quantile(self, value, name, **kwargs): + with self._name_scope(name, values=[value]): + value = _convert_to_tensor( + value, name="value", preferred_dtype=self.dtype) + return self._quantile(value, **kwargs) + + def quantile(self, value, name="quantile"): + """Quantile function. Aka "inverse cdf" or "percent point function". + + Given random variable `X` and `p in [0, 1]`, the `quantile` is: + + ```none + quantile(p) := x such that P[X <= x] == p + ``` + + Args: + value: `float` or `double` `Tensor`. + name: Python `str` prepended to names of ops created by this function. + + Returns: + quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with + values of type `self.dtype`. + """ + return self._call_quantile(value, name) + + def _variance(self): + raise NotImplementedError("variance is not implemented: {}".format( + type(self).__name__)) + + def variance(self, name="variance"): + """Variance. + + Variance is defined as, + + ```none + Var = E[(X - E[X])**2] + ``` + + where `X` is the random variable associated with this distribution, `E` + denotes expectation, and `Var.shape = batch_shape + event_shape`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + variance: Floating-point `Tensor` with shape identical to + `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. + """ + with self._name_scope(name): + try: + return self._variance() + except NotImplementedError as original_exception: + try: + return math_ops.square(self._stddev()) + except NotImplementedError: + raise original_exception + + def _stddev(self): + raise NotImplementedError("stddev is not implemented: {}".format( + type(self).__name__)) + + def stddev(self, name="stddev"): + """Standard deviation. + + Standard deviation is defined as, + + ```none + stddev = E[(X - E[X])**2]**0.5 + ``` + + where `X` is the random variable associated with this distribution, `E` + denotes expectation, and `stddev.shape = batch_shape + event_shape`. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + stddev: Floating-point `Tensor` with shape identical to + `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. + """ + + with self._name_scope(name): + try: + return self._stddev() + except NotImplementedError as original_exception: + try: + return math_ops.sqrt(self._variance()) + except NotImplementedError: + raise original_exception + + def _covariance(self): + raise NotImplementedError("covariance is not implemented: {}".format( + type(self).__name__)) + + def covariance(self, name="covariance"): + """Covariance. + + Covariance is (possibly) defined only for non-scalar-event distributions. + + For example, for a length-`k`, vector-valued distribution, it is calculated + as, + + ```none + Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])] + ``` + + where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E` + denotes expectation. + + Alternatively, for non-vector, multivariate distributions (e.g., + matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices + under some vectorization of the events, i.e., + + ```none + Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above] + ``` + + where `Cov` is a (batch of) `k' x k'` matrices, + `0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function + mapping indices of this distribution's event dimensions to indices of a + length-`k'` vector. + + Args: + name: Python `str` prepended to names of ops created by this function. + + Returns: + covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']` + where the first `n` dimensions are batch coordinates and + `k' = reduce_prod(self.event_shape)`. + """ + with self._name_scope(name): + return self._covariance() + + def _mode(self): + raise NotImplementedError("mode is not implemented: {}".format( + type(self).__name__)) + + def mode(self, name="mode"): + """Mode.""" + with self._name_scope(name): + return self._mode() + + def _cross_entropy(self, other): + return kullback_leibler.cross_entropy( + self, other, allow_nan_stats=self.allow_nan_stats) + + def cross_entropy(self, other, name="cross_entropy"): + """Computes the (Shannon) cross entropy. + + Denote this distribution (`self`) by `P` and the `other` distribution by + `Q`. Assuming `P, Q` are absolutely continuous with respect to + one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) + cross entropy is defined as: + + ```none + H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) + ``` + + where `F` denotes the support of the random variable `X ~ P`. + + Args: + other: `tfp.distributions.Distribution` instance. + name: Python `str` prepended to names of ops created by this function. + + Returns: + cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` + representing `n` different calculations of (Shanon) cross entropy. + """ + with self._name_scope(name): + return self._cross_entropy(other) + + def _kl_divergence(self, other): + return kullback_leibler.kl_divergence( + self, other, allow_nan_stats=self.allow_nan_stats) + + def kl_divergence(self, other, name="kl_divergence"): + """Computes the Kullback--Leibler divergence. + + Denote this distribution (`self`) by `p` and the `other` distribution by + `q`. Assuming `p, q` are absolutely continuous with respect to reference + measure `r`, the KL divergence is defined as: + + ```none + KL[p, q] = E_p[log(p(X)/q(X))] + = -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x) + = H[p, q] - H[p] + ``` + + where `F` denotes the support of the random variable `X ~ p`, `H[., .]` + denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy. + + Args: + other: `tfp.distributions.Distribution` instance. + name: Python `str` prepended to names of ops created by this function. + + Returns: + kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` + representing `n` different calculations of the Kullback-Leibler + divergence. + """ + with self._name_scope(name): + return self._kl_divergence(other) + + def __str__(self): + return ("tfp.distributions.{type_name}(" + "\"{self_name}\"" + "{maybe_batch_shape}" + "{maybe_event_shape}" + ", dtype={dtype})".format( + type_name=type(self).__name__, + self_name=self.name, + maybe_batch_shape=(", batch_shape={}".format(self.batch_shape) + if self.batch_shape.ndims is not None + else ""), + maybe_event_shape=(", event_shape={}".format(self.event_shape) + if self.event_shape.ndims is not None + else ""), + dtype=self.dtype.name)) + + def __repr__(self): + return ("".format( + type_name=type(self).__name__, + self_name=self.name, + batch_shape=self.batch_shape, + event_shape=self.event_shape, + dtype=self.dtype.name)) + + @contextlib.contextmanager + def _name_scope(self, name=None, values=None): + """Helper function to standardize op scope.""" + with ops.name_scope(self.name): + with ops.name_scope(name, values=( + ([] if values is None else values) + self._graph_parents)) as scope: + yield scope + + def _expand_sample_shape_to_vector(self, x, name): + """Helper to `sample` which ensures input is 1D.""" + x_static_val = tensor_util.constant_value(x) + if x_static_val is None: + prod = math_ops.reduce_prod(x) + else: + prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype()) + + ndims = x.get_shape().ndims # != sample_ndims + if ndims is None: + # Maybe expand_dims. + ndims = array_ops.rank(x) + expanded_shape = util.pick_vector( + math_ops.equal(ndims, 0), + np.array([1], dtype=np.int32), array_ops.shape(x)) + x = array_ops.reshape(x, expanded_shape) + elif ndims == 0: + # Definitely expand_dims. + if x_static_val is not None: + x = ops.convert_to_tensor( + np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()), + name=name) + else: + x = array_ops.reshape(x, [1]) + elif ndims != 1: + raise ValueError("Input is neither scalar nor vector.") + + return x, prod + + def _set_sample_static_shape(self, x, sample_shape): + """Helper to `sample`; sets static shape info.""" + # Set shape hints. + sample_shape = tensor_shape.TensorShape( + tensor_util.constant_value(sample_shape)) + + ndims = x.get_shape().ndims + sample_ndims = sample_shape.ndims + batch_ndims = self.batch_shape.ndims + event_ndims = self.event_shape.ndims + + # Infer rank(x). + if (ndims is None and + sample_ndims is not None and + batch_ndims is not None and + event_ndims is not None): + ndims = sample_ndims + batch_ndims + event_ndims + x.set_shape([None] * ndims) + + # Infer sample shape. + if ndims is not None and sample_ndims is not None: + shape = sample_shape.concatenate([None]*(ndims - sample_ndims)) + x.set_shape(x.get_shape().merge_with(shape)) + + # Infer event shape. + if ndims is not None and event_ndims is not None: + shape = tensor_shape.TensorShape( + [None]*(ndims - event_ndims)).concatenate(self.event_shape) + x.set_shape(x.get_shape().merge_with(shape)) + + # Infer batch shape. + if batch_ndims is not None: + if ndims is not None: + if sample_ndims is None and event_ndims is not None: + sample_ndims = ndims - batch_ndims - event_ndims + elif event_ndims is None and sample_ndims is not None: + event_ndims = ndims - batch_ndims - sample_ndims + if sample_ndims is not None and event_ndims is not None: + shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate( + self.batch_shape).concatenate([None]*event_ndims) + x.set_shape(x.get_shape().merge_with(shape)) + + return x + + def _is_scalar_helper(self, static_shape, dynamic_shape_fn): + """Implementation for `is_scalar_batch` and `is_scalar_event`.""" + if static_shape.ndims is not None: + return static_shape.ndims == 0 + shape = dynamic_shape_fn() + if (shape.get_shape().ndims is not None and + shape.get_shape().dims[0].value is not None): + # If the static_shape is correctly written then we should never execute + # this branch. We keep it just in case there's some unimagined corner + # case. + return shape.get_shape().as_list() == [0] + return math_ops.equal(array_ops.shape(shape)[0], 0) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distributions.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..cdc015295f5872f14d1373ca1db10892789924e0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/distributions.py @@ -0,0 +1,36 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Core module for TensorFlow distribution objects and helpers.""" +from tensorflow.python.util import deprecation + + +# pylint: disable=wildcard-import,unused-import,g-import-not-at-top +with deprecation.silence(): + from tensorflow.python.ops.distributions.bernoulli import Bernoulli + from tensorflow.python.ops.distributions.beta import Beta + from tensorflow.python.ops.distributions.categorical import Categorical + from tensorflow.python.ops.distributions.dirichlet import Dirichlet + from tensorflow.python.ops.distributions.dirichlet_multinomial import DirichletMultinomial + from tensorflow.python.ops.distributions.distribution import * + from tensorflow.python.ops.distributions.exponential import Exponential + from tensorflow.python.ops.distributions.gamma import Gamma + from tensorflow.python.ops.distributions.kullback_leibler import * + from tensorflow.python.ops.distributions.laplace import Laplace + from tensorflow.python.ops.distributions.multinomial import Multinomial + from tensorflow.python.ops.distributions.normal import Normal + from tensorflow.python.ops.distributions.student_t import StudentT + from tensorflow.python.ops.distributions.uniform import Uniform +# pylint: enable=wildcard-import,unused-import +del deprecation diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/exponential.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/exponential.py new file mode 100644 index 0000000000000000000000000000000000000000..729ae866dbc7aea8c654a41ea00db4043afe70b7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/exponential.py @@ -0,0 +1,162 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Exponential distribution class.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import gamma +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Exponential", + "ExponentialWithSoftplusRate", +] + + +@tf_export(v1=["distributions.Exponential"]) +class Exponential(gamma.Gamma): + """Exponential distribution. + + The Exponential distribution is parameterized by an event `rate` parameter. + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; lambda, x > 0) = exp(-lambda x) / Z + Z = 1 / lambda + ``` + + where `rate = lambda` and `Z` is the normalizaing constant. + + The Exponential distribution is a special case of the Gamma distribution, + i.e., + + ```python + Exponential(rate) = Gamma(concentration=1., rate) + ``` + + The Exponential distribution uses a `rate` parameter, or "inverse scale", + which can be intuited as, + + ```none + X ~ Exponential(rate=1) + Y = X / rate + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + rate, + validate_args=False, + allow_nan_stats=True, + name="Exponential"): + """Construct Exponential distribution with parameter `rate`. + + Args: + rate: Floating point tensor, equivalent to `1 / mean`. Must contain only + positive values. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + # Even though all statistics of are defined for valid inputs, this is not + # true in the parent class "Gamma." Therefore, passing + # allow_nan_stats=True + # through to the parent class results in unnecessary asserts. + with ops.name_scope(name, values=[rate]) as name: + self._rate = ops.convert_to_tensor(rate, name="rate") + super(Exponential, self).__init__( + concentration=array_ops.ones([], dtype=self._rate.dtype), + rate=self._rate, + allow_nan_stats=allow_nan_stats, + validate_args=validate_args, + name=name) + self._parameters = parameters + self._graph_parents += [self._rate] + + @staticmethod + def _param_shapes(sample_shape): + return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} + + @property + def rate(self): + return self._rate + + def _log_survival_function(self, value): + return self._log_prob(value) - math_ops.log(self._rate) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0) + # Uniform variates must be sampled from the open-interval `(0, 1)` rather + # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` + # because it is the smallest, positive, "normal" number. A "normal" number + # is such that the mantissa has an implicit leading 1. Normal, positive + # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In + # this case, a subnormal number (i.e., np.nextafter) can cause us to sample + # 0. + sampled = random_ops.random_uniform( + shape, + minval=np.finfo(self.dtype.as_numpy_dtype).tiny, + maxval=1., + seed=seed, + dtype=self.dtype) + return -math_ops.log(sampled) / self._rate + + +class ExponentialWithSoftplusRate(Exponential): + """Exponential with softplus transform on `rate`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Exponential(tf.nn.softplus(rate)).", + warn_once=True) + def __init__(self, + rate, + validate_args=False, + allow_nan_stats=True, + name="ExponentialWithSoftplusRate"): + parameters = dict(locals()) + with ops.name_scope(name, values=[rate]) as name: + super(ExponentialWithSoftplusRate, self).__init__( + rate=nn.softplus(rate, name="softplus_rate"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/gamma.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/gamma.py new file mode 100644 index 0000000000000000000000000000000000000000..c84caebf92f90fe1f47d63f282658cd95cdd9345 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/gamma.py @@ -0,0 +1,338 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Gamma distribution class.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Gamma", + "GammaWithSoftplusConcentrationRate", +] + + +@tf_export(v1=["distributions.Gamma"]) +class Gamma(distribution.Distribution): + """Gamma distribution. + + The Gamma distribution is defined over positive real numbers using + parameters `concentration` (aka "alpha") and `rate` (aka "beta"). + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z + Z = Gamma(alpha) beta**(-alpha) + ``` + + where: + + * `concentration = alpha`, `alpha > 0`, + * `rate = beta`, `beta > 0`, + * `Z` is the normalizing constant, and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The cumulative density function (cdf) is, + + ```none + cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha) + ``` + + where `GammaInc` is the [lower incomplete Gamma function]( + https://en.wikipedia.org/wiki/Incomplete_gamma_function). + + The parameters can be intuited via their relationship to mean and stddev, + + ```none + concentration = alpha = (mean / stddev)**2 + rate = beta = mean / stddev**2 = concentration / mean + ``` + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + Warning: The samples of this distribution are always non-negative. However, + the samples that are smaller than `np.finfo(dtype).tiny` are rounded + to this value, so it appears more often than it should. + This should only be noticeable when the `concentration` is very small, or the + `rate` is very large. See note in `tf.random.gamma` docstring. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + dist = tfd.Gamma(concentration=3.0, rate=2.0) + dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0]) + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + concentration = tf.constant(3.0) + rate = tf.constant(2.0) + dist = tfd.Gamma(concentration, rate) + samples = dist.sample(5) # Shape [5] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, [concentration, rate]) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + concentration, + rate, + validate_args=False, + allow_nan_stats=True, + name="Gamma"): + """Construct Gamma with `concentration` and `rate` parameters. + + The parameters `concentration` and `rate` must be shaped in a way that + supports broadcasting (e.g. `concentration + rate` is a valid operation). + + Args: + concentration: Floating point tensor, the concentration params of the + distribution(s). Must contain only positive values. + rate: Floating point tensor, the inverse scale params of the + distribution(s). Must contain only positive values. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if `concentration` and `rate` are different dtypes. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration, rate]) as name: + with ops.control_dependencies([ + check_ops.assert_positive(concentration), + check_ops.assert_positive(rate), + ] if validate_args else []): + self._concentration = array_ops.identity( + concentration, name="concentration") + self._rate = array_ops.identity(rate, name="rate") + check_ops.assert_same_float_dtype( + [self._concentration, self._rate]) + super(Gamma, self).__init__( + dtype=self._concentration.dtype, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + parameters=parameters, + graph_parents=[self._concentration, + self._rate], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("concentration", "rate"), ([ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def concentration(self): + """Concentration parameter.""" + return self._concentration + + @property + def rate(self): + """Rate parameter.""" + return self._rate + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.concentration), + array_ops.shape(self.rate)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.concentration.get_shape(), + self.rate.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + @distribution_util.AppendDocstring( + """Note: See `tf.random.gamma` docstring for sampling details and + caveats.""") + def _sample_n(self, n, seed=None): + return random_ops.random_gamma( + shape=[n], + alpha=self.concentration, + beta=self.rate, + dtype=self.dtype, + seed=seed) + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _cdf(self, x): + x = self._maybe_assert_valid_sample(x) + # Note that igamma returns the regularized incomplete gamma function, + # which is what we want for the CDF. + return math_ops.igamma(self.concentration, self.rate * x) + + def _log_unnormalized_prob(self, x): + x = self._maybe_assert_valid_sample(x) + return math_ops.xlogy(self.concentration - 1., x) - self.rate * x + + def _log_normalization(self): + return (math_ops.lgamma(self.concentration) + - self.concentration * math_ops.log(self.rate)) + + def _entropy(self): + return (self.concentration + - math_ops.log(self.rate) + + math_ops.lgamma(self.concentration) + + ((1. - self.concentration) * + math_ops.digamma(self.concentration))) + + def _mean(self): + return self.concentration / self.rate + + def _variance(self): + return self.concentration / math_ops.square(self.rate) + + def _stddev(self): + return math_ops.sqrt(self.concentration) / self.rate + + @distribution_util.AppendDocstring( + """The mode of a gamma distribution is `(shape - 1) / rate` when + `shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, + an exception will be raised rather than returning `NaN`.""") + def _mode(self): + mode = (self.concentration - 1.) / self.rate + if self.allow_nan_stats: + nan = array_ops.fill( + self.batch_shape_tensor(), + np.array(np.nan, dtype=self.dtype.as_numpy_dtype()), + name="nan") + return array_ops.where_v2(self.concentration > 1., mode, nan) + else: + return control_flow_ops.with_dependencies([ + check_ops.assert_less( + array_ops.ones([], self.dtype), + self.concentration, + message="mode not defined when any concentration <= 1"), + ], mode) + + def _maybe_assert_valid_sample(self, x): + check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype) + if not self.validate_args: + return x + return control_flow_ops.with_dependencies([ + check_ops.assert_positive(x), + ], x) + + +class GammaWithSoftplusConcentrationRate(Gamma): + """`Gamma` with softplus of `concentration` and `rate`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Gamma(tf.nn.softplus(concentration), " + "tf.nn.softplus(rate))` instead.", + warn_once=True) + def __init__(self, + concentration, + rate, + validate_args=False, + allow_nan_stats=True, + name="GammaWithSoftplusConcentrationRate"): + parameters = dict(locals()) + with ops.name_scope(name, values=[concentration, rate]) as name: + super(GammaWithSoftplusConcentrationRate, self).__init__( + concentration=nn.softplus(concentration, + name="softplus_concentration"), + rate=nn.softplus(rate, name="softplus_rate"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters + + +@kullback_leibler.RegisterKL(Gamma, Gamma) +def _kl_gamma_gamma(g0, g1, name=None): + """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma. + + Args: + g0: instance of a Gamma distribution object. + g1: instance of a Gamma distribution object. + name: (optional) Name to use for created operations. + Default is "kl_gamma_gamma". + + Returns: + kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1). + """ + with ops.name_scope(name, "kl_gamma_gamma", values=[ + g0.concentration, g0.rate, g1.concentration, g1.rate]): + # Result from: + # http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps + # For derivation see: + # http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long + return (((g0.concentration - g1.concentration) + * math_ops.digamma(g0.concentration)) + + math_ops.lgamma(g1.concentration) + - math_ops.lgamma(g0.concentration) + + g1.concentration * math_ops.log(g0.rate) + - g1.concentration * math_ops.log(g1.rate) + + g0.concentration * (g1.rate / g0.rate - 1.)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/identity_bijector.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/identity_bijector.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6ae896567a3b4a6a11d16fd1a521b765e229d6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/identity_bijector.py @@ -0,0 +1,68 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Identity bijector.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.ops.distributions import bijector +from tensorflow.python.util import deprecation + + +__all__ = [ + "Identity", +] + + +class Identity(bijector.Bijector): + """Compute Y = g(X) = X. + + Example Use: + + ```python + # Create the Y=g(X)=X transform which is intended for Tensors with 1 batch + # ndim and 1 event ndim (i.e., vector of vectors). + identity = Identity() + x = [[1., 2], + [3, 4]] + x == identity.forward(x) == identity.inverse(x) + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, validate_args=False, name="identity"): + super(Identity, self).__init__( + forward_min_event_ndims=0, + is_constant_jacobian=True, + validate_args=validate_args, + name=name) + + def _forward(self, x): + return x + + def _inverse(self, y): + return y + + def _inverse_log_det_jacobian(self, y): + return constant_op.constant(0., dtype=y.dtype) + + def _forward_log_det_jacobian(self, x): + return constant_op.constant(0., dtype=x.dtype) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py new file mode 100644 index 0000000000000000000000000000000000000000..c8dfb2157e9ff252cce3294c70b6059380d28a60 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py @@ -0,0 +1,210 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Registration and usage mechanisms for KL-divergences.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import math_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import tf_export + + +_DIVERGENCES = {} + + +__all__ = [ + "RegisterKL", + "kl_divergence", +] + + +def _registered_kl(type_a, type_b): + """Get the KL function registered for classes a and b.""" + hierarchy_a = tf_inspect.getmro(type_a) + hierarchy_b = tf_inspect.getmro(type_b) + dist_to_children = None + kl_fn = None + for mro_to_a, parent_a in enumerate(hierarchy_a): + for mro_to_b, parent_b in enumerate(hierarchy_b): + candidate_dist = mro_to_a + mro_to_b + candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None) + if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children): + dist_to_children = candidate_dist + kl_fn = candidate_kl_fn + return kl_fn + + +@deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) +@tf_export(v1=["distributions.kl_divergence"]) +def kl_divergence(distribution_a, distribution_b, + allow_nan_stats=True, name=None): + """Get the KL-divergence KL(distribution_a || distribution_b). + + If there is no KL method registered specifically for `type(distribution_a)` + and `type(distribution_b)`, then the class hierarchies of these types are + searched. + + If one KL method is registered between any pairs of classes in these two + parent hierarchies, it is used. + + If more than one such registered method exists, the method whose registered + classes have the shortest sum MRO paths to the input types is used. + + If more than one such shortest path exists, the first method + identified in the search is used (favoring a shorter MRO distance to + `type(distribution_a)`). + + Args: + distribution_a: The first distribution. + distribution_b: The second distribution. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Returns: + A Tensor with the batchwise KL-divergence between `distribution_a` + and `distribution_b`. + + Raises: + NotImplementedError: If no KL method is defined for distribution types + of `distribution_a` and `distribution_b`. + """ + kl_fn = _registered_kl(type(distribution_a), type(distribution_b)) + if kl_fn is None: + raise NotImplementedError( + "No KL(distribution_a || distribution_b) registered for distribution_a " + "type %s and distribution_b type %s" + % (type(distribution_a).__name__, type(distribution_b).__name__)) + + with ops.name_scope("KullbackLeibler"): + kl_t = kl_fn(distribution_a, distribution_b, name=name) + if allow_nan_stats: + return kl_t + + # Check KL for NaNs + kl_t = array_ops.identity(kl_t, name="kl") + + with ops.control_dependencies([ + control_flow_assert.Assert( + math_ops.logical_not(math_ops.reduce_any(math_ops.is_nan(kl_t))), [ + "KL calculation between %s and %s returned NaN values " + "(and was called with allow_nan_stats=False). Values:" % + (distribution_a.name, distribution_b.name), kl_t + ]) + ]): + return array_ops.identity(kl_t, name="checked_kl") + + +@deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) +def cross_entropy(ref, other, + allow_nan_stats=True, name=None): + """Computes the (Shannon) cross entropy. + + Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q` + are absolutely continuous with respect to one another and permit densities + `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as: + + ```none + H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) + ``` + + where `F` denotes the support of the random variable `X ~ P`. + + Args: + ref: `tfd.Distribution` instance. + other: `tfd.Distribution` instance. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` prepended to names of ops created by this function. + + Returns: + cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]` + representing `n` different calculations of (Shanon) cross entropy. + """ + with ops.name_scope(name, "cross_entropy"): + return ref.entropy() + kl_divergence( + ref, other, allow_nan_stats=allow_nan_stats) + + +@tf_export(v1=["distributions.RegisterKL"]) +class RegisterKL: + """Decorator to register a KL divergence implementation function. + + Usage: + + @distributions.RegisterKL(distributions.Normal, distributions.Normal) + def _kl_normal_mvn(norm_a, norm_b): + # Return KL(norm_a || norm_b) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, dist_cls_a, dist_cls_b): + """Initialize the KL registrar. + + Args: + dist_cls_a: the class of the first argument of the KL divergence. + dist_cls_b: the class of the second argument of the KL divergence. + """ + self._key = (dist_cls_a, dist_cls_b) + + def __call__(self, kl_fn): + """Perform the KL registration. + + Args: + kl_fn: The function to use for the KL divergence. + + Returns: + kl_fn + + Raises: + TypeError: if kl_fn is not a callable. + ValueError: if a KL divergence function has already been registered for + the given argument classes. + """ + if not callable(kl_fn): + raise TypeError("kl_fn must be callable, received: %s" % kl_fn) + if self._key in _DIVERGENCES: + raise ValueError("KL(%s || %s) has already been registered to: %s" + % (self._key[0].__name__, self._key[1].__name__, + _DIVERGENCES[self._key])) + _DIVERGENCES[self._key] = kl_fn + return kl_fn diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/laplace.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/laplace.py new file mode 100644 index 0000000000000000000000000000000000000000..9cebbac5daab17b18450a05c5bfa977b4f019bbb --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/laplace.py @@ -0,0 +1,238 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Laplace distribution class.""" + +import math + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import special_math +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Laplace", + "LaplaceWithSoftplusScale", +] + + +@tf_export(v1=["distributions.Laplace"]) +class Laplace(distribution.Distribution): + """The Laplace distribution with location `loc` and `scale` parameters. + + #### Mathematical details + + The probability density function (pdf) of this distribution is, + + ```none + pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z + Z = 2 sigma + ``` + + where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant. + + Note that the Laplace distribution can be thought of two exponential + distributions spliced together "back-to-back." + + The Lpalce distribution is a member of the [location-scale family]( + https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be + constructed as, + + ```none + X ~ Laplace(loc=0, scale=1) + Y = loc + scale * X + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="Laplace"): + """Construct Laplace distribution with parameters `loc` and `scale`. + + The parameters `loc` and `scale` must be shaped in a way that supports + broadcasting (e.g., `loc / scale` is a valid operation). + + Args: + loc: Floating point tensor which characterizes the location (center) + of the distribution. + scale: Positive floating point tensor which characterizes the spread of + the distribution. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if `loc` and `scale` are of different dtype. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[loc, scale]) as name: + with ops.control_dependencies([check_ops.assert_positive(scale)] if + validate_args else []): + self._loc = array_ops.identity(loc, name="loc") + self._scale = array_ops.identity(scale, name="scale") + check_ops.assert_same_float_dtype([self._loc, self._scale]) + super(Laplace, self).__init__( + dtype=self._loc.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._loc, self._scale], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("loc", "scale"), ([ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def loc(self): + """Distribution parameter for the location.""" + return self._loc + + @property + def scale(self): + """Distribution parameter for scale.""" + return self._scale + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.loc), array_ops.shape(self.scale)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.loc.get_shape(), self.scale.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + # Uniform variates must be sampled from the open-interval `(-1, 1)` rather + # than `[-1, 1)`. In the case of `(0, 1)` we'd use + # `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest, + # positive, "normal" number. However, the concept of subnormality exists + # only at zero; here we need the smallest usable number larger than -1, + # i.e., `-1 + eps/2`. + uniform_samples = random_ops.random_uniform( + shape=shape, + minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), + self.dtype.as_numpy_dtype(0.)), + maxval=1., + dtype=self.dtype, + seed=seed) + return (self.loc - self.scale * math_ops.sign(uniform_samples) * + math_ops.log1p(-math_ops.abs(uniform_samples))) + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _prob(self, x): + return math_ops.exp(self._log_prob(x)) + + def _log_cdf(self, x): + return special_math.log_cdf_laplace(self._z(x)) + + def _log_survival_function(self, x): + return special_math.log_cdf_laplace(-self._z(x)) + + def _cdf(self, x): + z = self._z(x) + return (0.5 + 0.5 * math_ops.sign(z) * + (1. - math_ops.exp(-math_ops.abs(z)))) + + def _log_unnormalized_prob(self, x): + return -math_ops.abs(self._z(x)) + + def _log_normalization(self): + return math.log(2.) + math_ops.log(self.scale) + + def _entropy(self): + # Use broadcasting rules to calculate the full broadcast scale. + scale = self.scale + array_ops.zeros_like(self.loc) + return math.log(2.) + 1. + math_ops.log(scale) + + def _mean(self): + return self.loc + array_ops.zeros_like(self.scale) + + def _stddev(self): + return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc) + + def _median(self): + return self._mean() + + def _mode(self): + return self._mean() + + def _z(self, x): + return (x - self.loc) / self.scale + + +class LaplaceWithSoftplusScale(Laplace): + """Laplace with softplus applied to `scale`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Laplace(loc, tf.nn.softplus(scale)) " + "instead.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="LaplaceWithSoftplusScale"): + parameters = dict(locals()) + with ops.name_scope(name, values=[loc, scale]) as name: + super(LaplaceWithSoftplusScale, self).__init__( + loc=loc, + scale=nn.softplus(scale, name="softplus_scale"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/multinomial.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/multinomial.py new file mode 100644 index 0000000000000000000000000000000000000000..4b889bcb288f3b23684631bff8278e1604b45643 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/multinomial.py @@ -0,0 +1,314 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Multinomial distribution class.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Multinomial", +] + + +_multinomial_sample_note = """For each batch of counts, `value = [n_0, ... +,n_{k-1}]`, `P[value]` is the probability that after sampling `self.total_count` +draws from this Multinomial distribution, the number of draws falling in class +`j` is `n_j`. Since this definition is [exchangeable]( +https://en.wikipedia.org/wiki/Exchangeable_random_variables); different +sequences have the same counts so the probability includes a combinatorial +coefficient. + +Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no +fractional components, and such that +`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable +with `self.probs` and `self.total_count`.""" + + +@tf_export(v1=["distributions.Multinomial"]) +class Multinomial(distribution.Distribution): + """Multinomial distribution. + + This Multinomial distribution is parameterized by `probs`, a (batch of) + length-`K` `prob` (probability) vectors (`K > 1`) such that + `tf.reduce_sum(probs, -1) = 1`, and a `total_count` number of trials, i.e., + the number of trials per draw from the Multinomial. It is defined over a + (batch of) length-`K` vector `counts` such that + `tf.reduce_sum(counts, -1) = total_count`. The Multinomial is identically the + Binomial distribution when `K = 2`. + + #### Mathematical Details + + The Multinomial is a distribution over `K`-class counts, i.e., a length-`K` + vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`. + + The probability mass function (pmf) is, + + ```none + pmf(n; pi, N) = prod_j (pi_j)**n_j / Z + Z = (prod_j n_j!) / N! + ``` + + where: + * `probs = pi = [pi_0, ..., pi_{K-1}]`, `pi_j > 0`, `sum_j pi_j = 1`, + * `total_count = N`, `N` a positive integer, + * `Z` is the normalization constant, and, + * `N!` denotes `N` factorial. + + Distribution parameters are automatically broadcast in all functions; see + examples for details. + + #### Pitfalls + + The number of classes, `K`, must not exceed: + - the largest integer representable by `self.dtype`, i.e., + `2**(mantissa_bits+1)` (IEE754), + - the maximum `Tensor` index, i.e., `2**31-1`. + + In other words, + + ```python + K <= min(2**31-1, { + tf.float16: 2**11, + tf.float32: 2**24, + tf.float64: 2**53 }[param.dtype]) + ``` + + Note: This condition is validated only when `self.validate_args = True`. + + #### Examples + + Create a 3-class distribution, with the 3rd class is most likely to be drawn, + using logits. + + ```python + logits = [-50., -43, 0] + dist = Multinomial(total_count=4., logits=logits) + ``` + + Create a 3-class distribution, with the 3rd class is most likely to be drawn. + + ```python + p = [.2, .3, .5] + dist = Multinomial(total_count=4., probs=p) + ``` + + The distribution functions can be evaluated on counts. + + ```python + # counts same shape as p. + counts = [1., 0, 3] + dist.prob(counts) # Shape [] + + # p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts. + counts = [[1., 2, 1], [2, 2, 0]] + dist.prob(counts) # Shape [2] + + # p will be broadcast to shape [5, 7, 3] to match counts. + counts = [[...]] # Shape [5, 7, 3] + dist.prob(counts) # Shape [5, 7] + ``` + + Create a 2-batch of 3-class distributions. + + ```python + p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3] + dist = Multinomial(total_count=[4., 5], probs=p) + + counts = [[2., 1, 1], [3, 1, 1]] + dist.prob(counts) # Shape [2] + + dist.sample(5) # Shape [5, 2, 3] + ``` + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + total_count, + logits=None, + probs=None, + validate_args=False, + allow_nan_stats=True, + name="Multinomial"): + """Initialize a batch of Multinomial distributions. + + Args: + total_count: Non-negative floating point tensor with shape broadcastable + to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of + `N1 x ... x Nm` different Multinomial distributions. Its components + should be equal to integer values. + logits: Floating point tensor representing unnormalized log-probabilities + of a positive event with shape broadcastable to + `[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines + this as a batch of `N1 x ... x Nm` different `K` class Multinomial + distributions. Only one of `logits` or `probs` should be passed in. + probs: Positive floating point tensor with shape broadcastable to + `[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines + this as a batch of `N1 x ... x Nm` different `K` class Multinomial + distributions. `probs`'s components in the last portion of its shape + should sum to `1`. Only one of `logits` or `probs` should be passed in. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[total_count, logits, probs]) as name: + self._total_count = ops.convert_to_tensor(total_count, name="total_count") + if validate_args: + self._total_count = ( + distribution_util.embed_check_nonnegative_integer_form( + self._total_count)) + self._logits, self._probs = distribution_util.get_logits_and_probs( + logits=logits, + probs=probs, + multidimensional=True, + validate_args=validate_args, + name=name) + self._mean_val = self._total_count[..., array_ops.newaxis] * self._probs + super(Multinomial, self).__init__( + dtype=self._probs.dtype, + reparameterization_type=distribution.NOT_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._total_count, + self._logits, + self._probs], + name=name) + + @property + def total_count(self): + """Number of trials used to construct a sample.""" + return self._total_count + + @property + def logits(self): + """Vector of coordinatewise logits.""" + return self._logits + + @property + def probs(self): + """Probability of drawing a `1` in that coordinate.""" + return self._probs + + def _batch_shape_tensor(self): + return array_ops.shape(self._mean_val)[:-1] + + def _batch_shape(self): + return self._mean_val.get_shape().with_rank_at_least(1)[:-1] + + def _event_shape_tensor(self): + return array_ops.shape(self._mean_val)[-1:] + + def _event_shape(self): + return self._mean_val.get_shape().with_rank_at_least(1)[-1:] + + def _sample_n(self, n, seed=None): + n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) + k = self.event_shape_tensor()[0] + + # broadcast the total_count and logits to same shape + n_draws = array_ops.ones_like( + self.logits[..., 0], dtype=n_draws.dtype) * n_draws + logits = array_ops.ones_like( + n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits + + # flatten the total_count and logits + flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k] + flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm] + + # computes each total_count and logits situation by map_fn + def _sample_single(args): + logits, n_draw = args[0], args[1] # [K], [] + x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw, + seed) # [1, n*n_draw] + x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw] + x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k] + return x + + x = map_fn.map_fn( + _sample_single, [flat_logits, flat_ndraws], + dtype=self.dtype) # [B1B2...Bm, n, k] + + # reshape the results to proper shape + x = array_ops.transpose(x, perm=[1, 0, 2]) + final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) + x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k] + return x + + @distribution_util.AppendDocstring(_multinomial_sample_note) + def _log_prob(self, counts): + return self._log_unnormalized_prob(counts) - self._log_normalization(counts) + + def _log_unnormalized_prob(self, counts): + counts = self._maybe_assert_valid_sample(counts) + return math_ops.reduce_sum(counts * nn_ops.log_softmax(self.logits), -1) + + def _log_normalization(self, counts): + counts = self._maybe_assert_valid_sample(counts) + return -distribution_util.log_combinations(self.total_count, counts) + + def _mean(self): + return array_ops.identity(self._mean_val) + + def _covariance(self): + p = self.probs * array_ops.ones_like( + self.total_count)[..., array_ops.newaxis] + # pylint: disable=invalid-unary-operand-type + return array_ops.matrix_set_diag( + -math_ops.matmul( + self._mean_val[..., array_ops.newaxis], + p[..., array_ops.newaxis, :]), # outer product + self._variance()) + + def _variance(self): + p = self.probs * array_ops.ones_like( + self.total_count)[..., array_ops.newaxis] + return self._mean_val - self._mean_val * p + + def _maybe_assert_valid_sample(self, counts): + """Check counts for proper shape, values, then return tensor version.""" + if not self.validate_args: + return counts + counts = distribution_util.embed_check_nonnegative_integer_form(counts) + return control_flow_ops.with_dependencies([ + check_ops.assert_equal( + self.total_count, math_ops.reduce_sum(counts, -1), + message="counts must sum to `self.total_count`"), + ], counts) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/normal.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/normal.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd9f36873d455f9df6329c1693f2e9f4df5d4b0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/normal.py @@ -0,0 +1,291 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Normal (Gaussian) distribution class.""" + +import math + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import kullback_leibler +from tensorflow.python.ops.distributions import special_math +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "Normal", + "NormalWithSoftplusScale", +] + + +@tf_export(v1=["distributions.Normal"]) +class Normal(distribution.Distribution): + """The Normal distribution with location `loc` and `scale` parameters. + + #### Mathematical details + + The probability density function (pdf) is, + + ```none + pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z + Z = (2 pi sigma**2)**0.5 + ``` + + where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z` + is the normalization constant. + + The Normal distribution is a member of the [location-scale family]( + https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be + constructed as, + + ```none + X ~ Normal(loc=0, scale=1) + Y = loc + scale * X + ``` + + #### Examples + + Examples of initialization of one or a batch of distributions. + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Define a single scalar Normal distribution. + dist = tfd.Normal(loc=0., scale=3.) + + # Evaluate the cdf at 1, returning a scalar. + dist.cdf(1.) + + # Define a batch of two scalar valued Normals. + # The first has mean 1 and standard deviation 11, the second 2 and 22. + dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.]) + + # Evaluate the pdf of the first distribution on 0, and the second on 1.5, + # returning a length two tensor. + dist.prob([0, 1.5]) + + # Get 3 samples, returning a 3 x 2 tensor. + dist.sample([3]) + ``` + + Arguments are broadcast when possible. + + ```python + # Define a batch of two scalar valued Normals. + # Both have mean 1, but different standard deviations. + dist = tfd.Normal(loc=1., scale=[11, 22.]) + + # Evaluate the pdf of both distributions on the same point, 3.0, + # returning a length 2 tensor. + dist.prob(3.0) + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="Normal"): + """Construct Normal distributions with mean and stddev `loc` and `scale`. + + The parameters `loc` and `scale` must be shaped in a way that supports + broadcasting (e.g. `loc + scale` is a valid operation). + + Args: + loc: Floating point tensor; the means of the distribution(s). + scale: Floating point tensor; the stddevs of the distribution(s). + Must contain only positive values. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if `loc` and `scale` have different `dtype`. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[loc, scale]) as name: + with ops.control_dependencies([check_ops.assert_positive(scale)] if + validate_args else []): + self._loc = array_ops.identity(loc, name="loc") + self._scale = array_ops.identity(scale, name="scale") + check_ops.assert_same_float_dtype([self._loc, self._scale]) + super(Normal, self).__init__( + dtype=self._scale.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._loc, self._scale], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("loc", "scale"), ([ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def loc(self): + """Distribution parameter for the mean.""" + return self._loc + + @property + def scale(self): + """Distribution parameter for standard deviation.""" + return self._scale + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.loc), + array_ops.shape(self.scale)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.loc.get_shape(), + self.scale.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + sampled = random_ops.random_normal( + shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed) + return sampled * self.scale + self.loc + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _log_cdf(self, x): + return special_math.log_ndtr(self._z(x)) + + def _cdf(self, x): + return special_math.ndtr(self._z(x)) + + def _log_survival_function(self, x): + return special_math.log_ndtr(-self._z(x)) + + def _survival_function(self, x): + return special_math.ndtr(-self._z(x)) + + def _log_unnormalized_prob(self, x): + return -0.5 * math_ops.square(self._z(x)) + + def _log_normalization(self): + return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale) + + def _entropy(self): + # Use broadcasting rules to calculate the full broadcast scale. + scale = self.scale * array_ops.ones_like(self.loc) + return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale) + + def _mean(self): + return self.loc * array_ops.ones_like(self.scale) + + def _quantile(self, p): + return self._inv_z(special_math.ndtri(p)) + + def _stddev(self): + return self.scale * array_ops.ones_like(self.loc) + + def _mode(self): + return self._mean() + + def _z(self, x): + """Standardize input `x` to a unit normal.""" + with ops.name_scope("standardize", values=[x]): + return (x - self.loc) / self.scale + + def _inv_z(self, z): + """Reconstruct input `x` from a its normalized version.""" + with ops.name_scope("reconstruct", values=[z]): + return z * self.scale + self.loc + + +class NormalWithSoftplusScale(Normal): + """Normal with softplus applied to `scale`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.Normal(loc, tf.nn.softplus(scale)) " + "instead.", + warn_once=True) + def __init__(self, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="NormalWithSoftplusScale"): + parameters = dict(locals()) + with ops.name_scope(name, values=[scale]) as name: + super(NormalWithSoftplusScale, self).__init__( + loc=loc, + scale=nn.softplus(scale, name="softplus_scale"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters + + +@kullback_leibler.RegisterKL(Normal, Normal) +def _kl_normal_normal(n_a, n_b, name=None): + """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. + + Args: + n_a: instance of a Normal distribution object. + n_b: instance of a Normal distribution object. + name: (optional) Name to use for created operations. + default is "kl_normal_normal". + + Returns: + Batchwise KL(n_a || n_b) + """ + with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]): + one = constant_op.constant(1, dtype=n_a.dtype) + two = constant_op.constant(2, dtype=n_a.dtype) + half = constant_op.constant(0.5, dtype=n_a.dtype) + s_a_squared = math_ops.square(n_a.scale) + s_b_squared = math_ops.square(n_b.scale) + ratio = s_a_squared / s_b_squared + return (math_ops.squared_difference(n_a.loc, n_b.loc) / (two * s_b_squared) + + half * (ratio - one - math_ops.log(ratio))) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/special_math.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/special_math.py new file mode 100644 index 0000000000000000000000000000000000000000..797270f3e143e407afdc4d23837574a137bbc0db --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/special_math.py @@ -0,0 +1,470 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Functions "ndtr" and "ndtri" are derived from calculations made in: +# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html +# In the following email exchange, the author gives his consent to redistribute +# derived works under an Apache 2.0 license. +# +# From: Stephen Moshier +# Date: Sat, Jun 9, 2018 at 2:36 PM +# Subject: Re: Licensing cephes under Apache (BSD-like) license. +# To: rif +# +# +# +# Hello Rif, +# +# Yes, Google may distribute Cephes files under the Apache 2 license. +# +# If clarification is needed, I do not favor BSD over other free licenses. +# I would agree that Apache 2 seems to cover the concern you mentioned +# about sublicensees. +# +# Best wishes for good luck with your projects! +# Steve Moshier +# +# +# +# On Thu, 31 May 2018, rif wrote: +# +# > Hello Steve. +# > My name is Rif. I work on machine learning software at Google. +# > +# > Your cephes software continues to be incredibly useful and widely used. I +# > was wondering whether it would be permissible for us to use the Cephes code +# > under the Apache 2.0 license, which is extremely similar in permissions to +# > the BSD license (Wikipedia comparisons). This would be quite helpful to us +# > in terms of avoiding multiple licenses on software. +# > +# > I'm sorry to bother you with this (I can imagine you're sick of hearing +# > about this by now), but I want to be absolutely clear we're on the level and +# > not misusing your important software. In former conversation with Eugene +# > Brevdo (ebrevdo@google.com), you wrote "If your licensing is similar to BSD, +# > the formal way that has been handled is simply to add a statement to the +# > effect that you are incorporating the Cephes software by permission of the +# > author." I wanted to confirm that (a) we could use the Apache license, (b) +# > that we don't need to (and probably you don't want to) keep getting +# > contacted about individual uses, because your intent is generally to allow +# > this software to be reused under "BSD-like" license, and (c) you're OK +# > letting incorporators decide whether a license is sufficiently BSD-like? +# > +# > Best, +# > +# > rif +# > +# > +# > + +"""Special Math Ops.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops + +__all__ = [ + "erfinv", + "ndtr", + "ndtri", + "log_ndtr", + "log_cdf_laplace", +] + + +# log_ndtr uses different functions over the ranges +# (-infty, lower](lower, upper](upper, infty) +# Lower bound values were chosen by examining where the support of ndtr +# appears to be zero, relative to scipy's (which is always 64bit). They were +# then made more conservative just to be safe. (Conservative means use the +# expansion more than we probably need to.) See `NdtrTest` in +# special_math_test.py. +LOGNDTR_FLOAT64_LOWER = np.array(-20, np.float64) +LOGNDTR_FLOAT32_LOWER = np.array(-10, np.float32) + +# Upper bound values were chosen by examining for which values of 'x' +# Log[cdf(x)] is 0, after which point we need to use the approximation +# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly +# conservative, meaning we use the approximation earlier than needed. +LOGNDTR_FLOAT64_UPPER = np.array(8, np.float64) +LOGNDTR_FLOAT32_UPPER = np.array(5, np.float32) + + +def ndtr(x, name="ndtr"): + """Normal distribution function. + + Returns the area under the Gaussian probability density function, integrated + from minus infinity to x: + + ``` + 1 / x + ndtr(x) = ---------- | exp(-0.5 t**2) dt + sqrt(2 pi) /-inf + + = 0.5 (1 + erf(x / sqrt(2))) + = 0.5 erfc(x / sqrt(2)) + ``` + + Args: + x: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="ndtr"). + + Returns: + ndtr: `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x` is not floating-type. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + if x.dtype.as_numpy_dtype not in [np.float32, np.float64]: + raise TypeError( + "x.dtype=%s is not handled, see docstring for supported types." + % x.dtype) + return _ndtr(x) + + +def _ndtr(x): + """Implements ndtr core logic.""" + half_sqrt_2 = constant_op.constant( + 0.5 * np.sqrt(2.), dtype=x.dtype, name="half_sqrt_2") + w = x * half_sqrt_2 + z = math_ops.abs(w) + y = array_ops.where_v2( + math_ops.less(z, half_sqrt_2), 1. + math_ops.erf(w), + array_ops.where_v2( + math_ops.greater(w, 0.), 2. - math_ops.erfc(z), math_ops.erfc(z))) + return 0.5 * y + + +def ndtri(p, name="ndtri"): + """The inverse of the CDF of the Normal distribution function. + + Returns x such that the area under the pdf from minus infinity to x is equal + to p. + + A piece-wise rational approximation is done for the function. + This is a port of the implementation in netlib. + + Args: + p: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="ndtri"). + + Returns: + x: `Tensor` with `dtype=p.dtype`. + + Raises: + TypeError: if `p` is not floating-type. + """ + + with ops.name_scope(name, values=[p]): + p = ops.convert_to_tensor(p, name="p") + if p.dtype.as_numpy_dtype not in [np.float32, np.float64]: + raise TypeError( + "p.dtype=%s is not handled, see docstring for supported types." + % p.dtype) + return _ndtri(p) + + +def _ndtri(p): + """Implements ndtri core logic.""" + + # Constants used in piece-wise rational approximations. Taken from the cephes + # library: + # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html + + p0 = [ + -1.23916583867381258016E0, 1.39312609387279679503E1, + -5.66762857469070293439E1, 9.80010754185999661536E1, + -5.99633501014107895267E1 + ] + q0 = [ + -1.18331621121330003142E0, 1.59056225126211695515E1, + -8.20372256168333339912E1, 2.00260212380060660359E2, + -2.25462687854119370527E2, 8.63602421390890590575E1, + 4.67627912898881538453E0, 1.95448858338141759834E0, 1.0 + ] + p1 = [ + -8.57456785154685413611E-4, -3.50424626827848203418E-2, + -1.40256079171354495875E-1, 2.18663306850790267539E0, + 1.46849561928858024014E1, 4.40805073893200834700E1, + 5.71628192246421288162E1, 3.15251094599893866154E1, + 4.05544892305962419923E0 + ] + q1 = [ + -9.33259480895457427372E-4, -3.80806407691578277194E-2, + -1.42182922854787788574E-1, 2.50464946208309415979E0, + 1.50425385692907503408E1, 4.13172038254672030440E1, + 4.53907635128879210584E1, 1.57799883256466749731E1, 1.0 + ] + p2 = [ + 6.23974539184983293730E-9, 2.65806974686737550832E-6, + 3.01581553508235416007E-4, 1.23716634817820021358E-2, + 2.01485389549179081538E-1, 1.33303460815807542389E0, + 3.93881025292474443415E0, 6.91522889068984211695E0, + 3.23774891776946035970E0 + ] + q2 = [ + 6.79019408009981274425E-9, 2.89247864745380683936E-6, + 3.28014464682127739104E-4, 1.34204006088543189037E-2, + 2.16236993594496635890E-1, 1.37702099489081330271E0, + 3.67983563856160859403E0, 6.02427039364742014255E0, 1.0 + ] + + def _create_polynomial(var, coeffs): + """Compute n_th order polynomial via Horner's method.""" + coeffs = np.array(coeffs, var.dtype.as_numpy_dtype) + if not coeffs.size: + return array_ops.zeros_like(var) + return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var + + maybe_complement_p = array_ops.where_v2(p > -np.expm1(-2.), 1. - p, p) + # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs + # later on. The result from the computation when p == 0 is not used so any + # number that doesn't result in NaNs is fine. + sanitized_mcp = array_ops.where_v2( + maybe_complement_p <= 0., + array_ops.fill(array_ops.shape(p), np.array(0.5, p.dtype.as_numpy_dtype)), + maybe_complement_p) + + # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2). + w = sanitized_mcp - 0.5 + ww = w ** 2 + x_for_big_p = w + w * ww * (_create_polynomial(ww, p0) + / _create_polynomial(ww, q0)) + x_for_big_p *= -np.sqrt(2. * np.pi) + + # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z), + # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different + # arrays based on whether p < exp(-32). + z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp)) + first_term = z - math_ops.log(z) / z + second_term_small_p = ( + _create_polynomial(1. / z, p2) / + _create_polynomial(1. / z, q2) / z) + second_term_otherwise = ( + _create_polynomial(1. / z, p1) / + _create_polynomial(1. / z, q1) / z) + x_for_small_p = first_term - second_term_small_p + x_otherwise = first_term - second_term_otherwise + + x = array_ops.where_v2( + sanitized_mcp > np.exp(-2.), x_for_big_p, + array_ops.where_v2(z >= 8.0, x_for_small_p, x_otherwise)) + + x = array_ops.where_v2(p > 1. - np.exp(-2.), x, -x) + infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype) + infinity = array_ops.fill(array_ops.shape(p), infinity_scalar) + x_nan_replaced = array_ops.where_v2(p <= 0.0, -infinity, + array_ops.where_v2(p >= 1.0, infinity, x)) + return x_nan_replaced + + +def log_ndtr(x, series_order=3, name="log_ndtr"): + """Log Normal distribution function. + + For details of the Normal distribution function see `ndtr`. + + This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or + using an asymptotic series. Specifically: + - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on + `log(1-x) ~= -x, x << 1`. + - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique + and take a log. + - For `x <= lower_segment`, we use the series approximation of erf to compute + the log CDF directly. + + The `lower_segment` is set based on the precision of the input: + + ``` + lower_segment = { -20, x.dtype=float64 + { -10, x.dtype=float32 + upper_segment = { 8, x.dtype=float64 + { 5, x.dtype=float32 + ``` + + When `x < lower_segment`, the `ndtr` asymptotic series approximation is: + + ``` + ndtr(x) = scale * (1 + sum) + R_N + scale = exp(-0.5 x**2) / (-x sqrt(2 pi)) + sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N} + R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3}) + ``` + + where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a + [double-factorial](https://en.wikipedia.org/wiki/Double_factorial). + + + Args: + x: `Tensor` of type `float32`, `float64`. + series_order: Positive Python `integer`. Maximum depth to + evaluate the asymptotic expansion. This is the `N` above. + name: Python string. A name for the operation (default="log_ndtr"). + + Returns: + log_ndtr: `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x.dtype` is not handled. + TypeError: if `series_order` is a not Python `integer.` + ValueError: if `series_order` is not in `[0, 30]`. + """ + if not isinstance(series_order, int): + raise TypeError("series_order must be a Python integer.") + if series_order < 0: + raise ValueError("series_order must be non-negative.") + if series_order > 30: + raise ValueError("series_order must be <= 30.") + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + + if x.dtype.as_numpy_dtype == np.float64: + lower_segment = LOGNDTR_FLOAT64_LOWER + upper_segment = LOGNDTR_FLOAT64_UPPER + elif x.dtype.as_numpy_dtype == np.float32: + lower_segment = LOGNDTR_FLOAT32_LOWER + upper_segment = LOGNDTR_FLOAT32_UPPER + else: + raise TypeError("x.dtype=%s is not supported." % x.dtype) + + # The basic idea here was ported from: + # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html + # We copy the main idea, with a few changes + # * For x >> 1, and X ~ Normal(0, 1), + # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x], + # which extends the range of validity of this function. + # * We use one fixed series_order for all of 'x', rather than adaptive. + # * Our docstring properly reflects that this is an asymptotic series, not a + # Taylor series. We also provided a correct bound on the remainder. + # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when + # x=0. This happens even though the branch is unchosen because when x=0 + # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan + # regardless of whether dy is finite. Note that the minimum is a NOP if + # the branch is chosen. + return array_ops.where_v2( + math_ops.greater(x, upper_segment), + -_ndtr(-x), # log(1-x) ~= -x, x << 1 # pylint: disable=invalid-unary-operand-type + array_ops.where_v2( + math_ops.greater(x, lower_segment), + math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))), + _log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order))) + + +def _log_ndtr_lower(x, series_order): + """Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.""" + x_2 = math_ops.square(x) + # Log of the term multiplying (1 + sum) + log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * np.log(2. * np.pi) + return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order)) + + +def _log_ndtr_asymptotic_series(x, series_order): + """Calculates the asymptotic series used in log_ndtr.""" + dtype = x.dtype.as_numpy_dtype + if series_order <= 0: + return np.array(1, dtype) + x_2 = math_ops.square(x) + even_sum = array_ops.zeros_like(x) + odd_sum = array_ops.zeros_like(x) + x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1. + for n in range(1, series_order + 1): + y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n + if n % 2: + odd_sum += y + else: + even_sum += y + x_2n *= x_2 + return 1. + even_sum - odd_sum + + +def erfinv(x, name="erfinv"): + """The inverse function for erf, the error function. + + Args: + x: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="erfinv"). + + Returns: + x: `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x` is not floating-type. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + if x.dtype.as_numpy_dtype not in [np.float32, np.float64]: + raise TypeError( + "x.dtype=%s is not handled, see docstring for supported types." + % x.dtype) + return ndtri((x + 1.0) / 2.0) / np.sqrt(2) + + +def _double_factorial(n): + """The double factorial function for small Python integer `n`.""" + return np.prod(np.arange(n, 1, -2)) + + +def log_cdf_laplace(x, name="log_cdf_laplace"): + """Log Laplace distribution function. + + This function calculates `Log[L(x)]`, where `L(x)` is the cumulative + distribution function of the Laplace distribution, i.e. + + ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt``` + + For numerical accuracy, `L(x)` is computed in different ways depending on `x`, + + ``` + x <= 0: + Log[L(x)] = Log[0.5] + x, which is exact + + 0 < x: + Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact + ``` + + Args: + x: `Tensor` of type `float32`, `float64`. + name: Python string. A name for the operation (default="log_ndtr"). + + Returns: + `Tensor` with `dtype=x.dtype`. + + Raises: + TypeError: if `x.dtype` is not handled. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + + # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x. + lower_solution = -np.log(2.) + x + + # safe_exp_neg_x = exp{-x} for x > 0, but is + # bounded above by 1, which avoids + # log[1 - 1] = -inf for x = log(1/2), AND + # exp{-x} --> inf, for x << -1 + safe_exp_neg_x = math_ops.exp(-math_ops.abs(x)) + + # log1p(z) = log(1 + z) approx z for |z| << 1. This approximation is used + # internally by log1p, rather than being done explicitly here. + upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x) + + return array_ops.where_v2(x < 0., lower_solution, upper_solution) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/student_t.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/student_t.py new file mode 100644 index 0000000000000000000000000000000000000000..63e4f73c2b0000825f58cb4ff0c5264a59b8946d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/student_t.py @@ -0,0 +1,391 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Student's t distribution class.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +__all__ = [ + "StudentT", + "StudentTWithAbsDfSoftplusScale", +] + + +@tf_export(v1=["distributions.StudentT"]) +class StudentT(distribution.Distribution): + """Student's t-distribution. + + This distribution has parameters: degree of freedom `df`, location `loc`, + and `scale`. + + #### Mathematical details + + The probability density function (pdf) is, + + ```none + pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z + where, + y = (x - mu) / sigma + Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) + ``` + + where: + * `loc = mu`, + * `scale = sigma`, and, + * `Z` is the normalization constant, and, + * `Gamma` is the [gamma function]( + https://en.wikipedia.org/wiki/Gamma_function). + + The StudentT distribution is a member of the [location-scale family]( + https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be + constructed as, + + ```none + X ~ StudentT(df, loc=0, scale=1) + Y = loc + scale * X + ``` + + Notice that `scale` has semantics more similar to standard deviation than + variance. However it is not actually the std. deviation; the Student's + t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`. + + Samples of this distribution are reparameterized (pathwise differentiable). + The derivatives are computed using the approach described in + (Figurnov et al., 2018). + + #### Examples + + Examples of initialization of one or a batch of distributions. + + ```python + import tensorflow_probability as tfp + tfd = tfp.distributions + + # Define a single scalar Student t distribution. + single_dist = tfd.StudentT(df=3) + + # Evaluate the pdf at 1, returning a scalar Tensor. + single_dist.prob(1.) + + # Define a batch of two scalar valued Student t's. + # The first has degrees of freedom 2, mean 1, and scale 11. + # The second 3, 2 and 22. + multi_dist = tfd.StudentT(df=[2, 3], loc=[1, 2.], scale=[11, 22.]) + + # Evaluate the pdf of the first distribution on 0, and the second on 1.5, + # returning a length two tensor. + multi_dist.prob([0, 1.5]) + + # Get 3 samples, returning a 3 x 2 tensor. + multi_dist.sample(3) + ``` + + Arguments are broadcast when possible. + + ```python + # Define a batch of two Student's t distributions. + # Both have df 2 and mean 1, but different scales. + dist = tfd.StudentT(df=2, loc=1, scale=[11, 22.]) + + # Evaluate the pdf of both distributions on the same point, 3.0, + # returning a length 2 tensor. + dist.prob(3.0) + ``` + + Compute the gradients of samples w.r.t. the parameters: + + ```python + df = tf.constant(2.0) + loc = tf.constant(2.0) + scale = tf.constant(11.0) + dist = tfd.StudentT(df=df, loc=loc, scale=scale) + samples = dist.sample(5) # Shape [5] + loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function + # Unbiased stochastic gradients of the loss function + grads = tf.gradients(loss, [df, loc, scale]) + ``` + + References: + Implicit Reparameterization Gradients: + [Figurnov et al., 2018] + (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) + ([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf)) + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + df, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="StudentT"): + """Construct Student's t distributions. + + The distributions have degree of freedom `df`, mean `loc`, and scale + `scale`. + + The parameters `df`, `loc`, and `scale` must be shaped in a way that + supports broadcasting (e.g. `df + loc + scale` is a valid operation). + + Args: + df: Floating-point `Tensor`. The degrees of freedom of the + distribution(s). `df` must contain only positive values. + loc: Floating-point `Tensor`. The mean(s) of the distribution(s). + scale: Floating-point `Tensor`. The scaling factor(s) for the + distribution(s). Note that `scale` is not technically the standard + deviation of this distribution but has semantics more similar to + standard deviation than variance. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, + statistics (e.g., mean, mode, variance) use the value "`NaN`" to + indicate the result is undefined. When `False`, an exception is raised + if one or more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + TypeError: if loc and scale are different dtypes. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[df, loc, scale]) as name: + with ops.control_dependencies([check_ops.assert_positive(df)] + if validate_args else []): + self._df = array_ops.identity(df, name="df") + self._loc = array_ops.identity(loc, name="loc") + self._scale = array_ops.identity(scale, name="scale") + check_ops.assert_same_float_dtype( + (self._df, self._loc, self._scale)) + super(StudentT, self).__init__( + dtype=self._scale.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._df, self._loc, self._scale], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("df", "loc", "scale"), ( + [ops.convert_to_tensor( + sample_shape, dtype=dtypes.int32)] * 3))) + + @property + def df(self): + """Degrees of freedom in these Student's t distribution(s).""" + return self._df + + @property + def loc(self): + """Locations of these Student's t distribution(s).""" + return self._loc + + @property + def scale(self): + """Scaling factors of these Student's t distribution(s).""" + return self._scale + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.df), + array_ops.broadcast_dynamic_shape( + array_ops.shape(self.loc), array_ops.shape(self.scale))) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + array_ops.broadcast_static_shape(self.df.get_shape(), + self.loc.get_shape()), + self.scale.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=math_ops.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + # The sampling method comes from the fact that if: + # X ~ Normal(0, 1) + # Z ~ Chi2(df) + # Y = X / sqrt(Z / df) + # then: + # Y ~ StudentT(df). + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) + df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) + gamma_sample = random_ops.random_gamma( + [n], + 0.5 * df, + beta=0.5, + dtype=self.dtype, + seed=distribution_util.gen_new_seed(seed, salt="student_t")) + samples = normal_sample * math_ops.rsqrt(gamma_sample / df) + return samples * self.scale + self.loc # Abs(scale) not wanted. + + def _log_prob(self, x): + return self._log_unnormalized_prob(x) - self._log_normalization() + + def _log_unnormalized_prob(self, x): + y = (x - self.loc) / self.scale # Abs(scale) superfluous. + return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df) + + def _log_normalization(self): + return (math_ops.log(math_ops.abs(self.scale)) + + 0.5 * math_ops.log(self.df) + + 0.5 * np.log(np.pi) + + math_ops.lgamma(0.5 * self.df) - + math_ops.lgamma(0.5 * (self.df + 1.))) + + def _cdf(self, x): + # Take Abs(scale) to make subsequent where work correctly. + y = (x - self.loc) / math_ops.abs(self.scale) + x_t = self.df / (y**2. + self.df) + neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t) + return array_ops.where_v2(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf) + + def _entropy(self): + v = array_ops.ones(self.batch_shape_tensor(), + dtype=self.dtype)[..., array_ops.newaxis] + u = v * self.df[..., array_ops.newaxis] + beta_arg = array_ops.concat([u, v], -1) / 2. + return (math_ops.log(math_ops.abs(self.scale)) + + 0.5 * math_ops.log(self.df) + + special_math_ops.lbeta(beta_arg) + + 0.5 * (self.df + 1.) * + (math_ops.digamma(0.5 * (self.df + 1.)) - + math_ops.digamma(0.5 * self.df))) + + @distribution_util.AppendDocstring( + """The mean of Student's T equals `loc` if `df > 1`, otherwise it is + `NaN`. If `self.allow_nan_stats=True`, then an exception will be raised + rather than returning `NaN`.""") + def _mean(self): + mean = self.loc * array_ops.ones(self.batch_shape_tensor(), + dtype=self.dtype) + if self.allow_nan_stats: + nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) + return array_ops.where_v2( + math_ops.greater( + self.df, + array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), + mean, array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) + else: + return control_flow_ops.with_dependencies( + [ + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.df, + message="mean not defined for components of df <= 1"), + ], + mean) + + @distribution_util.AppendDocstring(""" + The variance for Student's T equals + + ``` + df / (df - 2), when df > 2 + infinity, when 1 < df <= 2 + NaN, when df <= 1 + ``` + """) + def _variance(self): + # We need to put the tf.where inside the outer tf.where to ensure we never + # hit a NaN in the gradient. + denom = array_ops.where_v2( + math_ops.greater(self.df, 2.), self.df - 2., + array_ops.ones_like(self.df)) + # Abs(scale) superfluous. + var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) * + math_ops.square(self.scale) * self.df / denom) + # When 1 < df <= 2, variance is infinite. + inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype()) + result_where_defined = array_ops.where_v2( + self.df > array_ops.fill(self.batch_shape_tensor(), 2.), var, + array_ops.fill(self.batch_shape_tensor(), inf, name="inf")) + + if self.allow_nan_stats: + nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) + return array_ops.where_v2( + math_ops.greater( + self.df, + array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)), + result_where_defined, + array_ops.fill(self.batch_shape_tensor(), nan, name="nan")) + else: + return control_flow_ops.with_dependencies( + [ + check_ops.assert_less( + array_ops.ones([], dtype=self.dtype), + self.df, + message="variance not defined for components of df <= 1"), + ], + result_where_defined) + + def _mode(self): + return array_ops.identity(self.loc) + + +class StudentTWithAbsDfSoftplusScale(StudentT): + """StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`.""" + + @deprecation.deprecated( + "2019-01-01", + "Use `tfd.StudentT(tf.floor(tf.abs(df)), loc, " + "tf.nn.softplus(scale)) instead.", + warn_once=True) + def __init__(self, + df, + loc, + scale, + validate_args=False, + allow_nan_stats=True, + name="StudentTWithAbsDfSoftplusScale"): + parameters = dict(locals()) + with ops.name_scope(name, values=[df, scale]) as name: + super(StudentTWithAbsDfSoftplusScale, self).__init__( + df=math_ops.floor(math_ops.abs(df)), + loc=loc, + scale=nn.softplus(scale, name="softplus_scale"), + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + name=name) + self._parameters = parameters diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/uniform.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/uniform.py new file mode 100644 index 0000000000000000000000000000000000000000..9b6a8874ed1f94019b96640c0d046820ee0fdb40 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/uniform.py @@ -0,0 +1,204 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Uniform distribution class.""" + +import math + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import distribution +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["distributions.Uniform"]) +class Uniform(distribution.Distribution): + """Uniform distribution with `low` and `high` parameters. + + #### Mathematical Details + + The probability density function (pdf) is, + + ```none + pdf(x; a, b) = I[a <= x < b] / Z + Z = b - a + ``` + + where + + - `low = a`, + - `high = b`, + - `Z` is the normalizing constant, and + - `I[predicate]` is the [indicator function]( + https://en.wikipedia.org/wiki/Indicator_function) for `predicate`. + + The parameters `low` and `high` must be shaped in a way that supports + broadcasting (e.g., `high - low` is a valid operation). + + #### Examples + + ```python + # Without broadcasting: + u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4] + u2 = Uniform(low=[1.0, 2.0], + high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4] + u3 = Uniform(low=[[1.0, 2.0], + [3.0, 4.0]], + high=[[1.5, 2.5], + [3.5, 4.5]]) # 4 distributions + ``` + + ```python + # With broadcasting: + u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions + ``` + + """ + + @deprecation.deprecated( + "2019-01-01", + "The TensorFlow Distributions library has moved to " + "TensorFlow Probability " + "(https://github.com/tensorflow/probability). You " + "should update all references to use `tfp.distributions` " + "instead of `tf.distributions`.", + warn_once=True) + def __init__(self, + low=0., + high=1., + validate_args=False, + allow_nan_stats=True, + name="Uniform"): + """Initialize a batch of Uniform distributions. + + Args: + low: Floating point tensor, lower boundary of the output interval. Must + have `low < high`. + high: Floating point tensor, upper boundary of the output interval. Must + have `low < high`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + allow_nan_stats: Python `bool`, default `True`. When `True`, statistics + (e.g., mean, mode, variance) use the value "`NaN`" to indicate the + result is undefined. When `False`, an exception is raised if one or + more of the statistic's batch members are undefined. + name: Python `str` name prefixed to Ops created by this class. + + Raises: + InvalidArgumentError: if `low >= high` and `validate_args=False`. + """ + parameters = dict(locals()) + with ops.name_scope(name, values=[low, high]) as name: + with ops.control_dependencies([ + check_ops.assert_less( + low, high, message="uniform not defined when low >= high.") + ] if validate_args else []): + self._low = array_ops.identity(low, name="low") + self._high = array_ops.identity(high, name="high") + check_ops.assert_same_float_dtype([self._low, self._high]) + super(Uniform, self).__init__( + dtype=self._low.dtype, + reparameterization_type=distribution.FULLY_REPARAMETERIZED, + validate_args=validate_args, + allow_nan_stats=allow_nan_stats, + parameters=parameters, + graph_parents=[self._low, + self._high], + name=name) + + @staticmethod + def _param_shapes(sample_shape): + return dict( + zip(("low", "high"), + ([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))) + + @property + def low(self): + """Lower boundary of the output interval.""" + return self._low + + @property + def high(self): + """Upper boundary of the output interval.""" + return self._high + + def range(self, name="range"): + """`high - low`.""" + with self._name_scope(name): + return self.high - self.low + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shape( + array_ops.shape(self.low), + array_ops.shape(self.high)) + + def _batch_shape(self): + return array_ops.broadcast_static_shape( + self.low.get_shape(), + self.high.get_shape()) + + def _event_shape_tensor(self): + return constant_op.constant([], dtype=dtypes.int32) + + def _event_shape(self): + return tensor_shape.TensorShape([]) + + def _sample_n(self, n, seed=None): + shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) + samples = random_ops.random_uniform(shape=shape, + dtype=self.dtype, + seed=seed) + return self.low + self.range() * samples + + def _prob(self, x): + broadcasted_x = x * array_ops.ones( + self.batch_shape_tensor(), dtype=x.dtype) + return array_ops.where_v2( + math_ops.is_nan(broadcasted_x), broadcasted_x, + array_ops.where_v2( + math_ops.logical_or(broadcasted_x < self.low, + broadcasted_x >= self.high), + array_ops.zeros_like(broadcasted_x), + array_ops.ones_like(broadcasted_x) / self.range())) + + def _cdf(self, x): + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(x), self.batch_shape_tensor()) + zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype) + ones = array_ops.ones(broadcast_shape, dtype=self.dtype) + broadcasted_x = x * ones + result_if_not_big = array_ops.where_v2( + x < self.low, zeros, (broadcasted_x - self.low) / self.range()) + return array_ops.where_v2(x >= self.high, ones, result_if_not_big) + + def _entropy(self): + return math_ops.log(self.range()) + + def _mean(self): + return (self.low + self.high) / 2. + + def _variance(self): + return math_ops.square(self.range()) / 12. + + def _stddev(self): + return self.range() / math.sqrt(12.) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/util.py new file mode 100644 index 0000000000000000000000000000000000000000..62f91b7003a8a092c38e19f7b7cfbe0139337fa0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/distributions/util.py @@ -0,0 +1,1448 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for probability distributions.""" + +import functools +import hashlib + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.util import tf_inspect + + +def assert_integer_form(x, + data=None, + summarize=None, + message=None, + int_dtype=None, + name="assert_integer_form"): + """Assert that x has integer components (or floats equal to integers). + + Args: + x: Floating-point `Tensor` + data: The tensors to print out if the condition is `False`. Defaults to + error message and first few entries of `x` and `y`. + summarize: Print this many entries of each tensor. + message: A string to prefix to the default message. + int_dtype: A `tf.dtype` used to cast the float to. The default (`None`) + implies the smallest possible signed int will be used for casting. + name: A name for this operation (optional). + + Returns: + Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`. + """ + with ops.name_scope(name, values=[x, data]): + x = ops.convert_to_tensor(x, name="x") + if x.dtype.is_integer: + return control_flow_ops.no_op() + message = message or "{} has non-integer components".format(x) + if int_dtype is None: + try: + int_dtype = { + dtypes.float16: dtypes.int16, + dtypes.float32: dtypes.int32, + dtypes.float64: dtypes.int64, + }[x.dtype.base_dtype] + except KeyError: + raise TypeError("Unrecognized type {}".format(x.dtype.name)) + return check_ops.assert_equal( + x, + math_ops.cast(math_ops.cast(x, int_dtype), x.dtype), + data=data, + summarize=summarize, + message=message, + name=name) + + +def assert_symmetric(matrix): + matrix_t = array_ops.matrix_transpose(matrix) + return control_flow_ops.with_dependencies( + [check_ops.assert_equal(matrix, matrix_t)], matrix) + + +def embed_check_nonnegative_integer_form( + x, name="embed_check_nonnegative_integer_form"): + """Assert x is a non-negative tensor, and optionally of integers.""" + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + assertions = [ + check_ops.assert_non_negative( + x, message="'{}' must be non-negative.".format(x)), + ] + if not x.dtype.is_integer: + assertions += [ + assert_integer_form( + x, + message="'{}' cannot contain fractional components.".format(x)), + ] + return control_flow_ops.with_dependencies(assertions, x) + + +def same_dynamic_shape(a, b): + """Returns whether a and b have the same dynamic shape. + + Args: + a: `Tensor` + b: `Tensor` + + Returns: + `bool` `Tensor` representing if both tensors have the same shape. + """ + a = ops.convert_to_tensor(a, name="a") + b = ops.convert_to_tensor(b, name="b") + + # Here we can't just do math_ops.equal(a.shape, b.shape), since + # static shape inference may break the equality comparison between + # shape(a) and shape(b) in math_ops.equal. + def all_shapes_equal(): + return math_ops.reduce_all( + math_ops.equal( + array_ops.concat( + [array_ops.shape(a), array_ops.shape(b)], 0), + array_ops.concat( + [array_ops.shape(b), array_ops.shape(a)], 0))) + + # One of the shapes isn't fully defined, so we need to use the dynamic + # shape. + return tf_cond.cond( + math_ops.equal(array_ops.rank(a), array_ops.rank(b)), + all_shapes_equal, lambda: constant_op.constant(False)) + + +def maybe_get_static_value(x, dtype=None): + """Helper which tries to return a static value. + + Given `x`, extract it's value statically, optionally casting to a specific + dtype. If this is not possible, None is returned. + + Args: + x: `Tensor` for which to extract a value statically. + dtype: Optional dtype to cast to. + + Returns: + Statically inferred value if possible, otherwise None. + """ + if x is None: + return x + try: + # This returns an np.ndarray. + x_ = tensor_util.constant_value(x) + except TypeError: + x_ = x + if x_ is None or dtype is None: + return x_ + return np.array(x_, dtype) + + +def get_logits_and_probs(logits=None, + probs=None, + multidimensional=False, + validate_args=False, + name="get_logits_and_probs", + dtype=None): + """Converts logit to probabilities (or vice-versa), and returns both. + + Args: + logits: Floating-point `Tensor` representing log-odds. + probs: Floating-point `Tensor` representing probabilities. + multidimensional: Python `bool`, default `False`. If `True`, represents + whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]` + dimensional tensor, representing the logit or probability of `shape[-1]` + classes. + validate_args: Python `bool`, default `False`. When `True`, either assert `0 + <= probs <= 1` (if not `multidimensional`) or that the last dimension of + `probs` sums to one. + name: A name for this operation (optional). + dtype: `tf.DType` to prefer when converting args to `Tensor`s. + + Returns: + logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or + `1`, then the corresponding entry in the returned logit will be `-Inf` and + `Inf` respectively. + + Raises: + ValueError: if neither `probs` nor `logits` were passed in, or both were. + """ + with ops.name_scope(name, values=[probs, logits]): + if (probs is None) == (logits is None): + raise ValueError("Must pass probs or logits, but not both.") + + if probs is None: + logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype) + if not logits.dtype.is_floating: + raise TypeError("logits must having floating type.") + # We can early return since we constructed probs and therefore know + # they're valid. + if multidimensional: + if validate_args: + logits = embed_check_categorical_event_shape(logits) + return logits, nn.softmax(logits, name="probs") + return logits, math_ops.sigmoid(logits, name="probs") + + probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype) + if not probs.dtype.is_floating: + raise TypeError("probs must having floating type.") + + if validate_args: + with ops.name_scope("validate_probs"): + one = constant_op.constant(1., probs.dtype) + dependencies = [check_ops.assert_non_negative(probs)] + if multidimensional: + probs = embed_check_categorical_event_shape(probs) + dependencies += [ + check_ops.assert_near( + math_ops.reduce_sum(probs, -1), + one, + message="probs does not sum to 1.") + ] + else: + dependencies += [ + check_ops.assert_less_equal( + probs, one, message="probs has components greater than 1.") + ] + probs = control_flow_ops.with_dependencies(dependencies, probs) + + with ops.name_scope("logits"): + if multidimensional: + # Here we don't compute the multidimensional case, in a manner + # consistent with respect to the unidimensional case. We do so + # following the TF convention. Typically, you might expect to see + # logits = log(probs) - log(probs[pivot]). A side-effect of + # being consistent with the TF approach is that the unidimensional case + # implicitly handles the second dimension but the multidimensional case + # explicitly keeps the pivot dimension. + return math_ops.log(probs), probs + return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs + + +def _is_known_unsigned_by_dtype(dt): + """Helper returning True if dtype is known to be unsigned.""" + return { + dtypes.bool: True, + dtypes.uint8: True, + dtypes.uint16: True, + }.get(dt.base_dtype, False) + + +def _is_known_signed_by_dtype(dt): + """Helper returning True if dtype is known to be signed.""" + return { + dtypes.float16: True, + dtypes.float32: True, + dtypes.float64: True, + dtypes.int8: True, + dtypes.int16: True, + dtypes.int32: True, + dtypes.int64: True, + }.get(dt.base_dtype, False) + + +def _is_known_dtype(dt): + """Helper returning True if dtype is known.""" + return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt) + + +def _largest_integer_by_dtype(dt): + """Helper returning the largest integer exactly representable by dtype.""" + if not _is_known_dtype(dt): + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + if dt.is_floating: + return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1)) + if dt.is_integer: + return np.iinfo(dt.as_numpy_dtype).max + if dt.base_dtype == dtypes.bool: + return int(1) + # We actually can't land here but keep the case for completeness. + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + + +def _smallest_integer_by_dtype(dt): + """Helper returning the smallest integer exactly representable by dtype.""" + if not _is_known_dtype(dt): + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + if _is_known_unsigned_by_dtype(dt): + return 0 + return -1 * _largest_integer_by_dtype(dt) + + +def _is_integer_like_by_dtype(dt): + """Helper returning True if dtype.is_integer or is `bool`.""" + if not _is_known_dtype(dt): + raise TypeError("Unrecognized dtype: {}".format(dt.name)) + return dt.is_integer or dt.base_dtype == dtypes.bool + + +def embed_check_categorical_event_shape( + categorical_param, name="embed_check_categorical_event_shape"): + """Embeds checks that categorical distributions don't have too many classes. + + A categorical-type distribution is one which, e.g., returns the class label + rather than a one-hot encoding. E.g., `Categorical(probs)`. + + Since distributions output samples in the same dtype as the parameters, we + must ensure that casting doesn't lose precision. That is, the + `parameter.dtype` implies a maximum number of classes. However, since shape is + `int32` and categorical variables are presumed to be indexes into a `Tensor`, + we must also ensure that the number of classes is no larger than the largest + possible `int32` index, i.e., `2**31-1`. + + In other words the number of classes, `K`, must satisfy the following + condition: + + ```python + K <= min( + int(2**31 - 1), # Largest float as an index. + { + dtypes.float16: int(2**11), # Largest int as a float16. + dtypes.float32: int(2**24), + dtypes.float64: int(2**53), + }.get(categorical_param.dtype.base_dtype, 0)) + ``` + + Args: + categorical_param: Floating-point `Tensor` representing parameters of + distribution over categories. The rightmost shape is presumed to be the + number of categories. + name: A name for this operation (optional). + + Returns: + categorical_param: Input `Tensor` with appropriate assertions embedded. + + Raises: + TypeError: if `categorical_param` has an unknown `dtype`. + ValueError: if we can statically identify `categorical_param` as being too + large (for being closed under int32/float casting). + """ + with ops.name_scope(name, values=[categorical_param]): + x = ops.convert_to_tensor(categorical_param, name="categorical_param") + # The size must not exceed both of: + # - The largest possible int32 (since categorical values are presumed to be + # indexes into a Tensor). + # - The largest possible integer exactly representable under the given + # floating-point dtype (since we need to cast to/from). + # + # The chosen floating-point thresholds are 2**(1 + mantissa_bits). + # For more details, see: + # https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation + x_dtype = x.dtype.base_dtype + max_event_size = ( + _largest_integer_by_dtype(x_dtype) if x_dtype.is_floating else 0) + if max_event_size == 0: + raise TypeError("Unable to validate size of unrecognized dtype " + "({}).".format(x_dtype.name)) + try: + x_shape_static = x.get_shape().with_rank_at_least(1) + except ValueError: + raise ValueError("A categorical-distribution parameter must have " + "at least 1 dimension.") + if tensor_shape.dimension_value(x_shape_static[-1]) is not None: + event_size = x_shape_static.dims[-1].value + if event_size < 2: + raise ValueError("A categorical-distribution parameter must have at " + "least 2 events.") + if event_size > max_event_size: + raise ValueError("Number of classes exceeds `dtype` precision, i.e., " + "{} implies shape ({}) cannot exceed {}.".format( + x_dtype.name, event_size, max_event_size)) + return x + else: + event_size = array_ops.shape(x, name="x_shape")[-1] + return control_flow_ops.with_dependencies([ + check_ops.assert_rank_at_least( + x, + 1, + message=("A categorical-distribution parameter must have " + "at least 1 dimension.")), + check_ops.assert_greater_equal( + array_ops.shape(x)[-1], + 2, + message=("A categorical-distribution parameter must have at " + "least 2 events.")), + check_ops.assert_less_equal( + event_size, + max_event_size, + message="Number of classes exceeds `dtype` precision, " + "i.e., {} dtype cannot exceed {} shape.".format( + x_dtype.name, max_event_size)), + ], x) + + +def embed_check_integer_casting_closed(x, + target_dtype, + assert_nonnegative=True, + name="embed_check_casting_closed"): + """Ensures integers remain unaffected despite casting to/from int/float types. + + Example integer-types: `uint8`, `int32`, `bool`. + Example floating-types: `float32`, `float64`. + + The largest possible integer representable by an IEEE754 floating-point is + `2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is + `2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have + integer-form values can be cast to some other type without loss of precision. + + The smallest representable integer is the negative of the largest + representable integer, except for types: `uint8`, `uint16`, `bool`. For these + types, the smallest representable integer is `0`. + + Args: + x: `Tensor` representing integer-form values. + target_dtype: TF `dtype` under which `x` should have identical values. + assert_nonnegative: `bool` indicating `x` should contain nonnegative values. + name: A name for this operation (optional). + + Returns: + x: Input `Tensor` with appropriate assertions embedded. + + Raises: + TypeError: if `x` is neither integer- nor floating-type. + TypeError: if `target_dtype` is neither integer- nor floating-type. + TypeError: if neither `x` nor `target_dtype` are integer-type. + """ + + with ops.name_scope(name, values=[x]): + x = ops.convert_to_tensor(x, name="x") + if (not _is_integer_like_by_dtype(x.dtype) and not x.dtype.is_floating): + raise TypeError("{}.dtype must be floating- or " + "integer-type.".format(x.dtype.name)) + if (not _is_integer_like_by_dtype(target_dtype) and + not target_dtype.is_floating): + raise TypeError("target_dtype ({}) must be floating- or " + "integer-type.".format(target_dtype.name)) + if (not _is_integer_like_by_dtype(x.dtype) and + not _is_integer_like_by_dtype(target_dtype)): + raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) " + "must be integer-type.".format(x, x.dtype.name, + target_dtype.name)) + + assertions = [] + if assert_nonnegative: + assertions += [ + check_ops.assert_non_negative( + x, message="Elements must be non-negative."), + ] + + if x.dtype.is_floating: + # Being here means _is_integer_like_by_dtype(target_dtype) = True. + # Since this check implies the magnitude check below, we need only it. + assertions += [ + assert_integer_form( + x, + int_dtype=target_dtype, + message="Elements must be {}-equivalent.".format( + target_dtype.name)), + ] + else: + if (_largest_integer_by_dtype(x.dtype) > + _largest_integer_by_dtype(target_dtype)): + # Cast may lose integer precision. + assertions += [ + check_ops.assert_less_equal( + x, + _largest_integer_by_dtype(target_dtype), + message=("Elements cannot exceed {}.".format( + _largest_integer_by_dtype(target_dtype)))), + ] + if (not assert_nonnegative and (_smallest_integer_by_dtype( + x.dtype) < _smallest_integer_by_dtype(target_dtype))): + assertions += [ + check_ops.assert_greater_equal( + x, + _smallest_integer_by_dtype(target_dtype), + message=("Elements cannot be smaller than {}.".format( + _smallest_integer_by_dtype(target_dtype)))), + ] + + if not assertions: + return x + return control_flow_ops.with_dependencies(assertions, x) + + +def log_combinations(n, counts, name="log_combinations"): + """Multinomial coefficient. + + Given `n` and `counts`, where `counts` has last dimension `k`, we compute + the multinomial coefficient as: + + ```n! / sum_i n_i!``` + + where `i` runs over all `k` classes. + + Args: + n: Floating-point `Tensor` broadcastable with `counts`. This represents `n` + outcomes. + counts: Floating-point `Tensor` broadcastable with `n`. This represents + counts in `k` classes, where `k` is the last dimension of the tensor. + name: A name for this operation (optional). + + Returns: + `Tensor` representing the multinomial coefficient between `n` and `counts`. + """ + # First a bit about the number of ways counts could have come in: + # E.g. if counts = [1, 2], then this is 3 choose 2. + # In general, this is (sum counts)! / sum(counts!) + # The sum should be along the last dimension of counts. This is the + # "distribution" dimension. Here n a priori represents the sum of counts. + with ops.name_scope(name, values=[n, counts]): + n = ops.convert_to_tensor(n, name="n") + counts = ops.convert_to_tensor(counts, name="counts") + total_permutations = math_ops.lgamma(n + 1) + counts_factorial = math_ops.lgamma(counts + 1) + redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1]) + return total_permutations - redundant_permutations + + +def matrix_diag_transform(matrix, transform=None, name=None): + """Transform diagonal of [batch-]matrix, leave rest of matrix unchanged. + + Create a trainable covariance defined by a Cholesky factor: + + ```python + # Transform network layer into 2 x 2 array. + matrix_values = tf.contrib.layers.fully_connected(activations, 4) + matrix = tf.reshape(matrix_values, (batch_size, 2, 2)) + + # Make the diagonal positive. If the upper triangle was zero, this would be a + # valid Cholesky factor. + chol = matrix_diag_transform(matrix, transform=tf.nn.softplus) + + # LinearOperatorLowerTriangular ignores the upper triangle. + operator = LinearOperatorLowerTriangular(chol) + ``` + + Example of heteroskedastic 2-D linear regression. + + ```python + tfd = tfp.distributions + + # Get a trainable Cholesky factor. + matrix_values = tf.contrib.layers.fully_connected(activations, 4) + matrix = tf.reshape(matrix_values, (batch_size, 2, 2)) + chol = matrix_diag_transform(matrix, transform=tf.nn.softplus) + + # Get a trainable mean. + mu = tf.contrib.layers.fully_connected(activations, 2) + + # This is a fully trainable multivariate normal! + dist = tfd.MultivariateNormalTriL(mu, chol) + + # Standard log loss. Minimizing this will "train" mu and chol, and then dist + # will be a distribution predicting labels as multivariate Gaussians. + loss = -1 * tf.reduce_mean(dist.log_prob(labels)) + ``` + + Args: + matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are + equal. + transform: Element-wise function mapping `Tensors` to `Tensors`. To be + applied to the diagonal of `matrix`. If `None`, `matrix` is returned + unchanged. Defaults to `None`. + name: A name to give created ops. Defaults to "matrix_diag_transform". + + Returns: + A `Tensor` with same shape and `dtype` as `matrix`. + """ + with ops.name_scope(name, "matrix_diag_transform", [matrix]): + matrix = ops.convert_to_tensor(matrix, name="matrix") + if transform is None: + return matrix + # Replace the diag with transformed diag. + diag = array_ops.matrix_diag_part(matrix) + transformed_diag = transform(diag) + transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag) + + return transformed_mat + + +def rotate_transpose(x, shift, name="rotate_transpose"): + """Circularly moves dims left or right. + + Effectively identical to: + + ```python + numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift)) + ``` + + When `validate_args=False` additional graph-runtime checks are + performed. These checks entail moving data from to GPU to CPU. + + Example: + + ```python + x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4]. + rotate_transpose(x, -1).shape == [2, 3, 4, 1] + rotate_transpose(x, -2).shape == [3, 4, 1, 2] + rotate_transpose(x, 1).shape == [4, 1, 2, 3] + rotate_transpose(x, 2).shape == [3, 4, 1, 2] + rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1] + rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3] + ``` + + Args: + x: `Tensor`. + shift: `Tensor`. Number of dimensions to transpose left (shift<0) or + transpose right (shift>0). + name: Python `str`. The name to give this op. + + Returns: + rotated_x: Input `Tensor` with dimensions circularly rotated by shift. + + Raises: + TypeError: if shift is not integer type. + """ + with ops.name_scope(name, values=[x, shift]): + x = ops.convert_to_tensor(x, name="x") + shift = ops.convert_to_tensor(shift, name="shift") + # We do not assign back to preserve constant-ness. + check_ops.assert_integer(shift) + shift_value_static = tensor_util.constant_value(shift) + ndims = x.get_shape().ndims + if ndims is not None and shift_value_static is not None: + if ndims < 2: + return x + shift_value_static = np.sign(shift_value_static) * ( + abs(shift_value_static) % ndims) + if shift_value_static == 0: + return x + perm = np.roll(np.arange(ndims), shift_value_static) + return array_ops.transpose(x, perm=perm) + else: + # Consider if we always had a positive shift, and some specified + # direction. + # When shifting left we want the new array: + # last(x, n-shift) + first(x, shift) + # and if shifting right then we want: + # last(x, shift) + first(x, n-shift) + # Observe that last(a) == slice(a, n) and first(a) == slice(0, a). + # Also, we can encode direction and shift as one: direction * shift. + # Combining these facts, we have: + # a = cond(shift<0, -shift, n-shift) + # last(x, n-a) + first(x, a) == x[a:n] + x[0:a] + # Finally, we transform shift by modulo length so it can be specified + # independently from the array upon which it operates (like python). + ndims = array_ops.rank(x) + shift = array_ops.where_v2( + math_ops.less(shift, 0), + math_ops.mod(-shift, ndims), # pylint: disable=invalid-unary-operand-type + ndims - math_ops.mod(shift, ndims)) + first = math_ops.range(0, shift) + last = math_ops.range(shift, ndims) + perm = array_ops.concat([last, first], 0) + return array_ops.transpose(x, perm=perm) + + +def pick_vector(cond, true_vector, false_vector, name="pick_vector"): + """Picks possibly different length row `Tensor`s based on condition. + + Value `Tensor`s should have exactly one dimension. + + If `cond` is a python Boolean or `tf.constant` then either `true_vector` or + `false_vector` is immediately returned. I.e., no graph nodes are created and + no validation happens. + + Args: + cond: `Tensor`. Must have `dtype=tf.bool` and be scalar. + true_vector: `Tensor` of one dimension. Returned when cond is `True`. + false_vector: `Tensor` of one dimension. Returned when cond is `False`. + name: Python `str`. The name to give this op. + Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, + 18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, + 18)) # [15, 16, 17] ``` + + Returns: + true_or_false_vector: `Tensor`. + + Raises: + TypeError: if `cond.dtype != tf.bool` + TypeError: if `cond` is not a constant and + `true_vector.dtype != false_vector.dtype` + """ + with ops.name_scope(name, values=(cond, true_vector, false_vector)): + cond = ops.convert_to_tensor(cond, name="cond") + if cond.dtype != dtypes.bool: + raise TypeError("%s.dtype=%s which is not %s" % + (cond, cond.dtype, dtypes.bool)) + cond_value_static = tensor_util.constant_value(cond) + if cond_value_static is not None: + return true_vector if cond_value_static else false_vector + true_vector = ops.convert_to_tensor(true_vector, name="true_vector") + false_vector = ops.convert_to_tensor(false_vector, name="false_vector") + if true_vector.dtype != false_vector.dtype: + raise TypeError( + "%s.dtype=%s does not match %s.dtype=%s" % + (true_vector, true_vector.dtype, false_vector, false_vector.dtype)) + n = array_ops.shape(true_vector)[0] + return array_ops.slice( + array_ops.concat([true_vector, false_vector], 0), + [array_ops.where_v2(cond, 0, n)], [array_ops.where(cond, n, -1)]) + + +def prefer_static_broadcast_shape(shape1, + shape2, + name="prefer_static_broadcast_shape"): + """Convenience function which statically broadcasts shape when possible. + + Args: + shape1: `1-D` integer `Tensor`. Already converted to tensor! + shape2: `1-D` integer `Tensor`. Already converted to tensor! + name: A string name to prepend to created ops. + + Returns: + The broadcast shape, either as `TensorShape` (if broadcast can be done + statically), or as a `Tensor`. + """ + with ops.name_scope(name, values=[shape1, shape2]): + + def make_shape_tensor(x): + return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32) + + def get_tensor_shape(s): + if isinstance(s, tensor_shape.TensorShape): + return s + s_ = tensor_util.constant_value(make_shape_tensor(s)) + if s_ is not None: + return tensor_shape.TensorShape(s_) + return None + + def get_shape_tensor(s): + if not isinstance(s, tensor_shape.TensorShape): + return make_shape_tensor(s) + if s.is_fully_defined(): + return make_shape_tensor(s.as_list()) + raise ValueError("Cannot broadcast from partially " + "defined `TensorShape`.") + + shape1_ = get_tensor_shape(shape1) + shape2_ = get_tensor_shape(shape2) + if shape1_ is not None and shape2_ is not None: + return array_ops.broadcast_static_shape(shape1_, shape2_) + + shape1_ = get_shape_tensor(shape1) + shape2_ = get_shape_tensor(shape2) + return array_ops.broadcast_dynamic_shape(shape1_, shape2_) + + +def prefer_static_rank(x): + """Return static rank of tensor `x` if available, else `tf.rank(x)`. + + Args: + x: `Tensor` (already converted). + + Returns: + Numpy array (if static rank is obtainable), else `Tensor`. + """ + return prefer_static_value(array_ops.rank(x)) + + +def prefer_static_shape(x): + """Return static shape of tensor `x` if available, else `tf.shape(x)`. + + Args: + x: `Tensor` (already converted). + + Returns: + Numpy array (if static shape is obtainable), else `Tensor`. + """ + return prefer_static_value(array_ops.shape(x)) + + +def prefer_static_value(x): + """Return static value of tensor `x` if available, else `x`. + + Args: + x: `Tensor` (already converted). + + Returns: + Numpy array (if static value is obtainable), else `Tensor`. + """ + static_x = tensor_util.constant_value(x) + if static_x is not None: + return static_x + return x + + +def gen_new_seed(seed, salt): + """Generate a new seed, from the given seed and salt.""" + if seed is None: + return None + string = (str(seed) + salt).encode("utf-8") + return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF + + +def fill_triangular(x, upper=False, name=None): + """Creates a (batch of) triangular matrix from a vector of inputs. + + Created matrix can be lower- or upper-triangular. (It is more efficient to + create the matrix as upper or lower, rather than transpose.) + + Triangular matrix elements are filled in a clockwise spiral. See example, + below. + + If `x.get_shape()` is `[b1, b2, ..., bB, d]` then the output shape is + `[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e., + `n = int(np.sqrt(0.25 + 2. * m) - 0.5)`. + + Example: + + ```python + fill_triangular([1, 2, 3, 4, 5, 6]) + # ==> [[4, 0, 0], + # [6, 5, 0], + # [3, 2, 1]] + + fill_triangular([1, 2, 3, 4, 5, 6], upper=True) + # ==> [[1, 2, 3], + # [0, 5, 6], + # [0, 0, 4]] + ``` + + For comparison, a pure numpy version of this function can be found in + `util_test.py`, function `_fill_triangular`. + + Args: + x: `Tensor` representing lower (or upper) triangular elements. + upper: Python `bool` representing whether output matrix should be upper + triangular (`True`) or lower triangular (`False`, default). + name: Python `str`. The name to give this op. + + Returns: + tril: `Tensor` with lower (or upper) triangular elements filled from `x`. + + Raises: + ValueError: if `x` cannot be mapped to a triangular matrix. + """ + + with ops.name_scope(name, "fill_triangular", values=[x]): + x = ops.convert_to_tensor(x, name="x") + if tensor_shape.dimension_value( + x.shape.with_rank_at_least(1)[-1]) is not None: + # Formula derived by solving for n: m = n(n+1)/2. + m = np.int32(x.shape.dims[-1].value) + n = np.sqrt(0.25 + 2. * m) - 0.5 + if n != np.floor(n): + raise ValueError("Input right-most shape ({}) does not " + "correspond to a triangular matrix.".format(m)) + n = np.int32(n) + static_final_shape = x.shape[:-1].concatenate([n, n]) + else: + m = array_ops.shape(x)[-1] + # For derivation, see above. Casting automatically lops off the 0.5, so we + # omit it. We don't validate n is an integer because this has + # graph-execution cost; an error will be thrown from the reshape, below. + n = math_ops.cast( + math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)), + dtype=dtypes.int32) + static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate( + [None, None]) + # We now concatenate the "tail" of `x` to `x` (and reverse one of them). + # + # We do this based on the insight that the input `x` provides `ceil(n/2)` + # rows of an `n x n` matrix, some of which will get zeroed out being on the + # wrong side of the diagonal. The first row will not get zeroed out at all, + # and we need `floor(n/2)` more rows, so the first is what we omit from + # `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)` + # rows provided by a reversed tail, it is exactly the other set of elements + # of the reversed tail which will be zeroed out for being on the wrong side + # of the diagonal further up/down the matrix. And, in doing-so, we've filled + # the triangular matrix in a clock-wise spiral pattern. Neat! + # + # Try it out in numpy: + # n = 3 + # x = np.arange(n * (n + 1) / 2) + # m = x.shape[0] + # n = np.int32(np.sqrt(.25 + 2 * m) - .5) + # x_tail = x[(m - (n**2 - m)):] + # np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower + # # ==> array([[3, 4, 5], + # [5, 4, 3], + # [2, 1, 0]]) + # np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper + # # ==> array([[0, 1, 2], + # [3, 4, 5], + # [5, 4, 3]]) + # + # Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't + # correctly handle `m == n == 1`. Hence, we do nonnegative indexing. + # Furthermore observe that: + # m - (n**2 - m) + # = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2) + # = 2 (n**2 / 2 + n / 2) - n**2 + # = n**2 + n - n**2 + # = n + ndims = prefer_static_rank(x) + if upper: + x_list = [x, array_ops.reverse(x[..., n:], axis=[ndims - 1])] + else: + x_list = [x[..., n:], array_ops.reverse(x, axis=[ndims - 1])] + new_shape = ( + static_final_shape.as_list() if static_final_shape.is_fully_defined() + else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0)) + x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape) + x = array_ops.matrix_band_part( + x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0)) + x.set_shape(static_final_shape) + return x + + +def fill_triangular_inverse(x, upper=False, name=None): + """Creates a vector from a (batch of) triangular matrix. + + The vector is created from the lower-triangular or upper-triangular portion + depending on the value of the parameter `upper`. + + If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is + `[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`. + + Example: + + ```python + fill_triangular_inverse( + [[4, 0, 0], + [6, 5, 0], + [3, 2, 1]]) + + # ==> [1, 2, 3, 4, 5, 6] + + fill_triangular_inverse( + [[1, 2, 3], + [0, 5, 6], + [0, 0, 4]], upper=True) + + # ==> [1, 2, 3, 4, 5, 6] + ``` + + Args: + x: `Tensor` representing lower (or upper) triangular elements. + upper: Python `bool` representing whether output matrix should be upper + triangular (`True`) or lower triangular (`False`, default). + name: Python `str`. The name to give this op. + + Returns: + flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower + (or upper) triangular elements from `x`. + """ + + with ops.name_scope(name, "fill_triangular_inverse", values=[x]): + x = ops.convert_to_tensor(x, name="x") + if tensor_shape.dimension_value( + x.shape.with_rank_at_least(2)[-1]) is not None: + n = np.int32(x.shape.dims[-1].value) + m = np.int32((n * (n + 1)) // 2) + static_final_shape = x.shape[:-2].concatenate([m]) + else: + n = array_ops.shape(x)[-1] + m = (n * (n + 1)) // 2 + static_final_shape = x.shape.with_rank_at_least(2)[:-2].concatenate( + [None]) + ndims = prefer_static_rank(x) + if upper: + initial_elements = x[..., 0, :] + triangular_portion = x[..., 1:, :] + else: + initial_elements = array_ops.reverse(x[..., -1, :], axis=[ndims - 2]) + triangular_portion = x[..., :-1, :] + rotated_triangular_portion = array_ops.reverse( + array_ops.reverse(triangular_portion, axis=[ndims - 1]), + axis=[ndims - 2]) + consolidated_matrix = triangular_portion + rotated_triangular_portion + end_sequence = array_ops.reshape( + consolidated_matrix, + array_ops.concat([array_ops.shape(x)[:-2], [n * (n - 1)]], axis=0)) + y = array_ops.concat([initial_elements, end_sequence[..., :m - n]], axis=-1) + y.set_shape(static_final_shape) + return y + + +def tridiag(below=None, diag=None, above=None, name=None): + """Creates a matrix with values set above, below, and on the diagonal. + + Example: + + ```python + tridiag(below=[1., 2., 3.], + diag=[4., 5., 6., 7.], + above=[8., 9., 10.]) + # ==> array([[ 4., 8., 0., 0.], + # [ 1., 5., 9., 0.], + # [ 0., 2., 6., 10.], + # [ 0., 0., 3., 7.]], dtype=float32) + ``` + + Warning: This Op is intended for convenience, not efficiency. + + Args: + below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below + diagonal part. `None` is logically equivalent to `below = 0`. + diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal + part. `None` is logically equivalent to `diag = 0`. + above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above + diagonal part. `None` is logically equivalent to `above = 0`. + name: Python `str`. The name to give this op. + + Returns: + tridiag: `Tensor` with values set above, below and on the diagonal. + + Raises: + ValueError: if all inputs are `None`. + """ + + def _pad(x): + """Prepends and appends a zero to every vector in a batch of vectors.""" + shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0) + z = array_ops.zeros(shape, dtype=x.dtype) + return array_ops.concat([z, x, z], axis=-1) + + def _add(*x): + """Adds list of Tensors, ignoring `None`.""" + s = None + for y in x: + if y is None: + continue + elif s is None: + s = y + else: + s += y + if s is None: + raise ValueError("Must specify at least one of `below`, `diag`, `above`.") + return s + + with ops.name_scope(name, "tridiag", [below, diag, above]): + if below is not None: + below = ops.convert_to_tensor(below, name="below") + below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:] + if diag is not None: + diag = ops.convert_to_tensor(diag, name="diag") + diag = array_ops.matrix_diag(diag) + if above is not None: + above = ops.convert_to_tensor(above, name="above") + above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1] + # TODO(jvdillon): Consider using scatter_nd instead of creating three full + # matrices. + return _add(below, diag, above) + + +def reduce_weighted_logsumexp(logx, + w=None, + axis=None, + keep_dims=False, + return_sign=False, + name=None): + """Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`. + + If all weights `w` are known to be positive, it is more efficient to directly + use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.math.log(w))` is + more + efficient than `du.reduce_weighted_logsumexp(logx, w)`. + + Reduces `input_tensor` along the dimensions given in `axis`. + Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each + entry in `axis`. If `keep_dims` is true, the reduced dimensions + are retained with length 1. + + If `axis` has no entries, all dimensions are reduced, and a + tensor with a single element is returned. + + This function is more numerically stable than log(sum(w * exp(input))). It + avoids overflows caused by taking the exp of large inputs and underflows + caused by taking the log of small inputs. + + For example: + + ```python + x = tf.constant([[0., 0, 0], + [0, 0, 0]]) + + w = tf.constant([[-1., 1, 1], + [1, 1, 1]]) + + du.reduce_weighted_logsumexp(x, w) + # ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4) + + du.reduce_weighted_logsumexp(x, w, axis=0) + # ==> [log(-1+1), log(1+1), log(1+1)] + + du.reduce_weighted_logsumexp(x, w, axis=1) + # ==> [log(-1+1+1), log(1+1+1)] + + du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True) + # ==> [[log(-1+1+1)], [log(1+1+1)]] + + du.reduce_weighted_logsumexp(x, w, axis=[0, 1]) + # ==> log(-1+5) + ``` + + Args: + logx: The tensor to reduce. Should have numeric type. + w: The weight tensor. Should have numeric type identical to `logx`. + axis: The dimensions to reduce. If `None` (the default), reduces all + dimensions. Must be in the range `[-rank(input_tensor), + rank(input_tensor))`. + keep_dims: If true, retains reduced dimensions with length 1. + return_sign: If `True`, returns the sign of the result. + name: A name for the operation (optional). + + Returns: + lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor. + sign: (Optional) The sign of `sum(weight * exp(x))`. + """ + with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]): + logx = ops.convert_to_tensor(logx, name="logx") + if w is None: + lswe = math_ops.reduce_logsumexp(logx, axis=axis, keepdims=keep_dims) + if return_sign: + sgn = array_ops.ones_like(lswe) + return lswe, sgn + return lswe + w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w") + log_absw_x = logx + math_ops.log(math_ops.abs(w)) + max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keepdims=True) + # If the largest element is `-inf` or `inf` then we don't bother subtracting + # off the max. We do this because otherwise we'd get `inf - inf = NaN`. That + # this is ok follows from the fact that we're actually free to subtract any + # value we like, so long as we add it back after taking the `log(sum(...))`. + max_log_absw_x = array_ops.where_v2( + math_ops.is_inf(max_log_absw_x), array_ops.zeros_like(max_log_absw_x), + max_log_absw_x) + wx_over_max_absw_x = ( + math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x)) + sum_wx_over_max_absw_x = math_ops.reduce_sum( + wx_over_max_absw_x, axis=axis, keepdims=keep_dims) + if not keep_dims: + max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis) + sgn = math_ops.sign(sum_wx_over_max_absw_x) + lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x) + if return_sign: + return lswe, sgn + return lswe + + +# TODO(jvdillon): Merge this test back into: +# tensorflow/python/ops/softplus_op_test.py +# once TF core is accepting new ops. +def softplus_inverse(x, name=None): + """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). + + Mathematically this op is equivalent to: + + ```none + softplus_inverse = log(exp(x) - 1.) + ``` + + Args: + x: `Tensor`. Non-negative (not enforced), floating-point. + name: A name for the operation (optional). + + Returns: + `Tensor`. Has the same type/shape as input `x`. + """ + with ops.name_scope(name, "softplus_inverse", values=[x]): + x = ops.convert_to_tensor(x, name="x") + # We begin by deriving a more numerically stable softplus_inverse: + # x = softplus(y) = Log[1 + exp{y}], (which means x > 0). + # ==> exp{x} = 1 + exp{y} (1) + # ==> y = Log[exp{x} - 1] (2) + # = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}] + # = Log[(1 - exp{-x}) / 1] + Log[exp{x}] + # = Log[1 - exp{-x}] + x (3) + # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x. + # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will + # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0. + # + # In addition to the numerically stable derivation above, we clamp + # small/large values to be congruent with the logic in: + # tensorflow/core/kernels/softplus_op.h + # + # Finally, we set the input to one whenever the input is too large or too + # small. This ensures that no unchosen codepath is +/- inf. This is + # necessary to ensure the gradient doesn't get NaNs. Recall that the + # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false` + # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful + # to overwrite `x` with ones only when we will never actually use this + # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`. + threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2. + is_too_small = math_ops.less(x, np.exp(threshold)) + is_too_large = math_ops.greater(x, -threshold) + too_small_value = math_ops.log(x) + too_large_value = x + # This `where` will ultimately be a NOP because we won't select this + # codepath whenever we used the surrogate `ones_like`. + x = array_ops.where_v2( + math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x), + x) + y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x)) + return array_ops.where_v2( + is_too_small, too_small_value, + array_ops.where_v2(is_too_large, too_large_value, y)) + + +# TODO(b/35290280): Add unit-tests. +def dimension_size(x, axis): + """Returns the size of a specific dimension.""" + # Since tf.gather isn't "constant-in, constant-out", we must first check the + # static shape or fallback to dynamic shape. + s = tensor_shape.dimension_value( + x.shape.with_rank_at_least(np.abs(axis))[axis]) + if s is not None: + return s + return array_ops.shape(x)[axis] + + +def process_quadrature_grid_and_probs(quadrature_grid_and_probs, + dtype, + validate_args, + name=None): + """Validates quadrature grid, probs or computes them as necessary. + + Args: + quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s + representing the sample points and the corresponding (possibly + normalized) weight. When `None`, defaults to: + `np.polynomial.hermite.hermgauss(deg=8)`. + dtype: The expected `dtype` of `grid` and `probs`. + validate_args: Python `bool`, default `False`. When `True` distribution + parameters are checked for validity despite possibly degrading runtime + performance. When `False` invalid inputs may silently render incorrect + outputs. + name: Python `str` name prefixed to Ops created by this class. + + Returns: + quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s + representing the sample points and the corresponding (possibly + normalized) weight. + + Raises: + ValueError: if `quadrature_grid_and_probs is not None` and + `len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])` + """ + with ops.name_scope(name, "process_quadrature_grid_and_probs", + [quadrature_grid_and_probs]): + if quadrature_grid_and_probs is None: + grid, probs = np.polynomial.hermite.hermgauss(deg=8) + grid = grid.astype(dtype.as_numpy_dtype) + probs = probs.astype(dtype.as_numpy_dtype) + probs /= np.linalg.norm(probs, ord=1, keepdims=True) + grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype) + probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype) + return grid, probs + + grid, probs = tuple(quadrature_grid_and_probs) + grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype) + probs = ops.convert_to_tensor(probs, name="unnormalized_probs", dtype=dtype) + probs /= linalg_ops.norm(probs, ord=1, axis=-1, keepdims=True, name="probs") + + def _static_event_size(x): + """Returns the static size of a specific dimension or `None`.""" + return tensor_shape.dimension_value(x.shape.with_rank_at_least(1)[-1]) + + m, n = _static_event_size(probs), _static_event_size(grid) + if m is not None and n is not None: + if m != n: + raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of " + "same-length zero-th-dimension `Tensor`s " + "(saw lengths {}, {})".format(m, n)) + elif validate_args: + assertions = [ + check_ops.assert_equal( + dimension_size(probs, axis=-1), + dimension_size(grid, axis=-1), + message=("`quadrature_grid_and_probs` must be a `tuple` of " + "same-length zero-th-dimension `Tensor`s")), + ] + with ops.control_dependencies(assertions): + grid = array_ops.identity(grid) + probs = array_ops.identity(probs) + return grid, probs + + +def pad(x, axis, front=False, back=False, value=0, count=1, name=None): + """Pads `value` to the front and/or back of a `Tensor` dim, `count` times. + + Args: + x: `Tensor` input. + axis: Scalar `int`-like `Tensor` representing the single dimension to pad. + (Negative indexing is supported.) + front: Python `bool`; if `True` the beginning of the `axis` dimension is + padded with `value`, `count` times. If `False` no front padding is made. + back: Python `bool`; if `True` the end of the `axis` dimension is padded + with `value`, `count` times. If `False` no end padding is made. + value: Scalar `int`-like `Tensor` representing the actual value added to the + front and/or back of the `axis` dimension of `x`. + count: Scalar `int`-like `Tensor` representing number of elements added to + the front and/or back of the `axis` dimension of `x`. E.g., if `front = + back = True` then `2 * count` elements are added. + name: Python `str` name prefixed to Ops created by this function. + + Returns: + pad: The padded version of input `x`. + + Raises: + ValueError: if both `front` and `back` are `False`. + TypeError: if `count` is not `int`-like. + """ + with ops.name_scope(name, "pad", [x, value, count]): + x = ops.convert_to_tensor(x, name="x") + value = ops.convert_to_tensor(value, dtype=x.dtype, name="value") + count = ops.convert_to_tensor(count, name="count") + if not count.dtype.is_integer: + raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format( + count.dtype.name)) + if not front and not back: + raise ValueError("At least one of `front`, `back` must be `True`.") + ndims = ( + x.shape.ndims if x.shape.ndims is not None else array_ops.rank( + x, name="ndims")) + axis = ops.convert_to_tensor(axis, name="axis") + axis_ = tensor_util.constant_value(axis) + if axis_ is not None: + axis = axis_ + if axis < 0: + axis = ndims + axis + count_ = tensor_util.constant_value(count) + if axis_ >= 0 or x.shape.ndims is not None: + head = x.shape[:axis] + middle = tensor_shape.TensorShape(None if count_ is None else ( + tensor_shape.dimension_at_index(x.shape, axis) + count_ * + (front + back))) + tail = x.shape[axis + 1:] + final_shape = head.concatenate(middle.concatenate(tail)) + else: + final_shape = None + else: + axis = array_ops.where_v2(axis < 0, ndims + axis, axis) + final_shape = None + x = array_ops.pad( + x, + paddings=array_ops.one_hot( + indices=array_ops_stack.stack( + [axis if front else -1, axis if back else -1]), + depth=ndims, + axis=0, + on_value=count, + dtype=dtypes.int32), + constant_values=value) + if final_shape is not None: + x.set_shape(final_shape) + return x + + +def parent_frame_arguments(): + """Returns parent frame arguments. + + When called inside a function, returns a dictionary with the caller's function + arguments. These are positional arguments and keyword arguments (**kwargs), + while variable arguments (*varargs) are excluded. + + When called at global scope, this will return an empty dictionary, since there + are no arguments. + + WARNING: If caller function argument names are overloaded before invoking + this method, then values will reflect the overloaded value. For this reason, + we recommend calling `parent_frame_arguments` at the beginning of the + function. + """ + # All arguments and the names used for *varargs, and **kwargs + arg_names, variable_arg_name, keyword_arg_name, local_vars = ( + tf_inspect._inspect.getargvalues( # pylint: disable=protected-access + # Get the first frame of the caller of this method. + tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access + + # Remove the *varargs, and flatten the **kwargs. Both are + # nested lists. + local_vars.pop(variable_arg_name, {}) + keyword_args = local_vars.pop(keyword_arg_name, {}) + + final_args = {} + # Copy over arguments and their values. In general, local_vars + # may contain more than just the arguments, since this method + # can be called anywhere in a function. + for arg_name in arg_names: + final_args[arg_name] = local_vars.pop(arg_name) + final_args.update(keyword_args) + + return final_args + + +class AppendDocstring: + """Helper class to promote private subclass docstring to public counterpart. + + Example: + + ```python + class TransformedDistribution(Distribution): + @distribution_util.AppendDocstring( + additional_note="A special note!", + kwargs_dict={"foo": "An extra arg."}) + def _prob(self, y, foo=None): + pass + ``` + + In this case, the `AppendDocstring` decorator appends the `additional_note` to + the docstring of `prob` (not `_prob`) and adds a new `kwargs` + section with each dictionary item as a bullet-point. + + For a more detailed example, see `TransformedDistribution`. + """ + + def __init__(self, additional_note="", kwargs_dict=None): + """Initializes the AppendDocstring object. + + Args: + additional_note: Python string added as additional docstring to public + version of function. + kwargs_dict: Python string/string dictionary representing specific kwargs + expanded from the **kwargs input. + + Raises: + ValueError: if kwargs_dict.key contains whitespace. + ValueError: if kwargs_dict.value contains newlines. + """ + self._additional_note = additional_note + if kwargs_dict: + bullets = [] + for key in sorted(kwargs_dict.keys()): + value = kwargs_dict[key] + if any(x.isspace() for x in key): + raise ValueError("Parameter name \"%s\" contains whitespace." % key) + value = value.lstrip() + if "\n" in value: + raise ValueError( + "Parameter description for \"%s\" contains newlines." % key) + bullets.append("* `%s`: %s" % (key, value)) + self._additional_note += ("\n\n##### `kwargs`:\n\n" + "\n".join(bullets)) + + def __call__(self, fn): + + @functools.wraps(fn) + def _fn(*args, **kwargs): + return fn(*args, **kwargs) + + if _fn.__doc__ is None: + _fn.__doc__ = self._additional_note + else: + _fn.__doc__ += "\n%s" % self._additional_note + return _fn diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffb3126c6e255c75b572088fbec663a66e1741af Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2ad8d0baf70eba0b275d08e31528064d47ba6ec Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses_impl.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..547003793ef2c6f4797010d6c7db5b0d8106af99 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/losses_impl.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a8e9316c224cf39019cf53f7d0e2f6fdd880408 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/losses/__pycache__/util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..983351a7d159872a8c83619467ddacfffffa8d78 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Signal processing operations. + +See the [tf.signal](https://tensorflow.org/api_guides/python/contrib.signal) +guide. + +@@frame +@@hamming_window +@@hann_window +@@inverse_stft +@@inverse_stft_window_fn +@@mfccs_from_log_mel_spectrograms +@@linear_to_mel_weight_matrix +@@overlap_and_add +@@stft + +[hamming]: https://en.wikipedia.org/wiki/Window_function#Hamming_window +[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_window +[mel]: https://en.wikipedia.org/wiki/Mel_scale +[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum +[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform + +API docstring: tensorflow.signal +""" diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9bd40d107eba9f6ff8b6a6e8d4f567dec45ef81 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/dct_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/dct_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c71d31908c7a096166f2d765700f970815dd367b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/dct_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/fft_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/fft_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7d874bceb4bb63d3545e5f0698e75c13ee4fe00 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/fft_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mel_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mel_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcdfdd48e0aef9f3788756738c3531ddac2d9da4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mel_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mfcc_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mfcc_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..201b2db8baf8f3be49d769c3dbbba6d561731479 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mfcc_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/reconstruction_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/reconstruction_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..003a1ca62d2d5b4919bc205da85948712588107a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/reconstruction_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/shape_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/shape_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6c8f30dfc33d73c760fb471e9e3c63f4c45f2e2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/shape_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/signal.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/signal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9265a9e60cdb8bcf517324792d8bddd3f2f0515c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/signal.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/spectral_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/spectral_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..980131f311d8c81d6b32a1d74879ac745d55e718 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/spectral_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/util_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/util_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..177c6f4864f53d5b4fd359b0ef7eb66a067095ea Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/util_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/window_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/window_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66f157773a9c690c5c613e3340a48482df294229 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/window_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/dct_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/dct_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a47080b1edb77d3e7d0fc1949617aebe895a70e4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/dct_ops.py @@ -0,0 +1,256 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Discrete Cosine Transform ops.""" +import math as _math + +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import smart_cond +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops as _array_ops +from tensorflow.python.ops import math_ops as _math_ops +from tensorflow.python.ops.signal import fft_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm): + """Checks that DCT/IDCT arguments are compatible and well formed.""" + if axis != -1: + raise NotImplementedError("axis must be -1. Got: %s" % axis) + if n is not None and n < 1: + raise ValueError("n should be a positive integer or None") + if dct_type not in (1, 2, 3, 4): + raise ValueError("Types I, II, III and IV (I)DCT are supported.") + if dct_type == 1: + if norm == "ortho": + raise ValueError("Normalization is not supported for the Type-I DCT.") + if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2: + raise ValueError( + "Type-I DCT requires the dimension to be greater than one.") + + if norm not in (None, "ortho"): + raise ValueError( + "Unknown normalization. Expected None or 'ortho', got: %s" % norm) + + +# TODO(rjryan): Implement `axis` parameter. +@tf_export("signal.dct", v1=["signal.dct", "spectral.dct"]) +@dispatch.add_dispatch_support +def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin + """Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`. + + Types I, II, III and IV are supported. + Type I is implemented using a length `2N` padded `tf.signal.rfft`. + Type II is implemented using a length `2N` padded `tf.signal.rfft`, as + described here: [Type 2 DCT using 2N FFT padded (Makhoul)] + (https://dsp.stackexchange.com/a/10606). + Type III is a fairly straightforward inverse of Type II + (i.e. using a length `2N` padded `tf.signal.irfft`). + Type IV is calculated through 2N length DCT2 of padded signal and + picking the odd indices. + + @compatibility(scipy) + Equivalent to [scipy.fftpack.dct] + (https://docs.scipy.org/doc/scipy-1.4.0/reference/generated/scipy.fftpack.dct.html) + for Type-I, Type-II, Type-III and Type-IV DCT. + @end_compatibility + + Args: + input: A `[..., samples]` `float32`/`float64` `Tensor` containing the + signals to take the DCT of. + type: The DCT type to perform. Must be 1, 2, 3 or 4. + n: The length of the transform. If length is less than sequence length, + only the first n elements of the sequence are considered for the DCT. + If n is greater than the sequence length, zeros are padded and then + the DCT is computed as usual. + axis: For future expansion. The axis to compute the DCT along. Must be `-1`. + norm: The normalization to apply. `None` for no normalization or `'ortho'` + for orthonormal normalization. + name: An optional name for the operation. + + Returns: + A `[..., samples]` `float32`/`float64` `Tensor` containing the DCT of + `input`. + + Raises: + ValueError: If `type` is not `1`, `2`, `3` or `4`, `axis` is + not `-1`, `n` is not `None` or greater than 0, + or `norm` is not `None` or `'ortho'`. + ValueError: If `type` is `1` and `norm` is `ortho`. + + [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform + """ + _validate_dct_arguments(input, type, n, axis, norm) + return _dct_internal(input, type, n, axis, norm, name) + + +def _dct_internal(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin + """Computes the 1D Discrete Cosine Transform (DCT) of `input`. + + This internal version of `dct` does not perform any validation and accepts a + dynamic value for `n` in the form of a rank 0 tensor. + + Args: + input: A `[..., samples]` `float32`/`float64` `Tensor` containing the + signals to take the DCT of. + type: The DCT type to perform. Must be 1, 2, 3 or 4. + n: The length of the transform. If length is less than sequence length, + only the first n elements of the sequence are considered for the DCT. + If n is greater than the sequence length, zeros are padded and then + the DCT is computed as usual. Can be an int or rank 0 tensor. + axis: For future expansion. The axis to compute the DCT along. Must be `-1`. + norm: The normalization to apply. `None` for no normalization or `'ortho'` + for orthonormal normalization. + name: An optional name for the operation. + + Returns: + A `[..., samples]` `float32`/`float64` `Tensor` containing the DCT of + `input`. + """ + with _ops.name_scope(name, "dct", [input]): + input = _ops.convert_to_tensor(input) + zero = _ops.convert_to_tensor(0.0, dtype=input.dtype) + + seq_len = ( + tensor_shape.dimension_value(input.shape[-1]) or + _array_ops.shape(input)[-1]) + if n is not None: + + def truncate_input(): + return input[..., 0:n] + + def pad_input(): + rank = len(input.shape) + padding = [[0, 0] for _ in range(rank)] + padding[rank - 1][1] = n - seq_len + padding = _ops.convert_to_tensor(padding, dtype=_dtypes.int32) + return _array_ops.pad(input, paddings=padding) + + input = smart_cond.smart_cond(n <= seq_len, truncate_input, pad_input) + + axis_dim = (tensor_shape.dimension_value(input.shape[-1]) + or _array_ops.shape(input)[-1]) + axis_dim_float = _math_ops.cast(axis_dim, input.dtype) + + if type == 1: + dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1) + dct1 = _math_ops.real(fft_ops.rfft(dct1_input)) + return dct1 + + if type == 2: + scale = 2.0 * _math_ops.exp( + _math_ops.complex( + zero, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 / + axis_dim_float)) + + # TODO(rjryan): Benchmark performance and memory usage of the various + # approaches to computing a DCT via the RFFT. + dct2 = _math_ops.real( + fft_ops.rfft( + input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale) + + if norm == "ortho": + n1 = 0.5 * _math_ops.rsqrt(axis_dim_float) + n2 = n1 * _math.sqrt(2.0) + # Use tf.pad to make a vector of [n1, n2, n2, n2, ...]. + weights = _array_ops.pad( + _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]], + constant_values=n2) + dct2 *= weights + + return dct2 + + elif type == 3: + if norm == "ortho": + n1 = _math_ops.sqrt(axis_dim_float) + n2 = n1 * _math.sqrt(0.5) + # Use tf.pad to make a vector of [n1, n2, n2, n2, ...]. + weights = _array_ops.pad( + _array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]], + constant_values=n2) + input *= weights + else: + input *= axis_dim_float + scale = 2.0 * _math_ops.exp( + _math_ops.complex( + zero, + _math_ops.range(axis_dim_float) * _math.pi * 0.5 / + axis_dim_float)) + dct3 = _math_ops.real( + fft_ops.irfft( + scale * _math_ops.complex(input, zero), + fft_length=[2 * axis_dim]))[..., :axis_dim] + + return dct3 + + elif type == 4: + # DCT-2 of 2N length zero-padded signal, unnormalized. + dct2 = _dct_internal(input, type=2, n=2*axis_dim, axis=axis, norm=None) + # Get odd indices of DCT-2 of zero padded 2N signal to obtain + # DCT-4 of the original N length signal. + dct4 = dct2[..., 1::2] + if norm == "ortho": + dct4 *= _math.sqrt(0.5) * _math_ops.rsqrt(axis_dim_float) + + return dct4 + + +# TODO(rjryan): Implement `n` and `axis` parameters. +@tf_export("signal.idct", v1=["signal.idct", "spectral.idct"]) +@dispatch.add_dispatch_support +def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin + """Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`. + + Currently Types I, II, III, IV are supported. Type III is the inverse of + Type II, and vice versa. + + Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is + not `'ortho'`. That is: + `signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`. + When `norm='ortho'`, we have: + `signal == idct(dct(signal, norm='ortho'), norm='ortho')`. + + @compatibility(scipy) + Equivalent to [scipy.fftpack.idct] + (https://docs.scipy.org/doc/scipy-1.4.0/reference/generated/scipy.fftpack.idct.html) + for Type-I, Type-II, Type-III and Type-IV DCT. + @end_compatibility + + Args: + input: A `[..., samples]` `float32`/`float64` `Tensor` containing the + signals to take the DCT of. + type: The IDCT type to perform. Must be 1, 2, 3 or 4. + n: For future expansion. The length of the transform. Must be `None`. + axis: For future expansion. The axis to compute the DCT along. Must be `-1`. + norm: The normalization to apply. `None` for no normalization or `'ortho'` + for orthonormal normalization. + name: An optional name for the operation. + + Returns: + A `[..., samples]` `float32`/`float64` `Tensor` containing the IDCT of + `input`. + + Raises: + ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is + not `-1`, or `norm` is not `None` or `'ortho'`. + + [idct]: + https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms + """ + _validate_dct_arguments(input, type, n, axis, norm) + inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type] + return _dct_internal( + input, type=inverse_type, n=n, axis=axis, norm=norm, name=name) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/fft_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/fft_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7a292dc672a5b49df7d9f44dfe63414c2234813b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/fft_ops.py @@ -0,0 +1,696 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Fast-Fourier Transform ops.""" +import re + +import numpy as np + +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import tensor_util as _tensor_util +from tensorflow.python.ops import array_ops as _array_ops +from tensorflow.python.ops import array_ops_stack as _array_ops_stack +from tensorflow.python.ops import gen_spectral_ops +from tensorflow.python.ops import manip_ops +from tensorflow.python.ops import math_ops as _math_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +def _infer_fft_length_for_fftn(input_tensor): + return _array_ops.shape(input_tensor)[-len(input_tensor.shape) :] + + +def _infer_fft_length_for_irfftn(input_tensor): + fft_shape = input_tensor.get_shape()[-len(input_tensor.shape) :] + fft_length = fft_shape.as_list() + fft_length[-1] = max(0, 2 * (fft_length[-1] - 1)) + return _ops.convert_to_tensor(fft_length, _dtypes.int32) + + +def _infer_axes_for_fftn(input_tensor): + return _ops.convert_to_tensor( + np.arange(len(input_tensor.shape)), _dtypes.int32 + ) + + +def _process_empty_axes(input_tensor, axes): + if axes is None: + axes = _infer_axes_for_fftn(input_tensor) + else: + axes = _ops.convert_to_tensor(axes, _dtypes.int32) + return axes + + +def _infer_fft_length_for_rfft(input_tensor, fft_rank): + """Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`.""" + # A TensorShape for the inner fft_rank dimensions. + fft_shape = input_tensor.get_shape()[-fft_rank:] + + # If any dim is unknown, fall back to tensor-based math. + if not fft_shape.is_fully_defined(): + return _array_ops.shape(input_tensor)[-fft_rank:] + + # Otherwise, return a constant. + return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32) + + +def _infer_fft_length_for_irfft(input_tensor, fft_rank): + """Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`.""" + # A TensorShape for the inner fft_rank dimensions. + fft_shape = input_tensor.get_shape()[-fft_rank:] + + # If any dim is unknown, fall back to tensor-based math. + if not fft_shape.is_fully_defined(): + fft_length = _array_ops_stack.unstack( + _array_ops.shape(input_tensor)[-fft_rank:]) + fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1)) + return _array_ops_stack.stack(fft_length) + + # Otherwise, return a constant. + fft_length = fft_shape.as_list() + if fft_length: + fft_length[-1] = max(0, 2 * (fft_length[-1] - 1)) + return _ops.convert_to_tensor(fft_length, _dtypes.int32) + + +def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False): + """Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims.""" + fft_shape = _tensor_util.constant_value_as_shape(fft_length) + + # Edge case: skip padding empty tensors. + if (input_tensor.shape.ndims is not None and + any(dim.value == 0 for dim in input_tensor.shape.dims)): + return input_tensor + + # If we know the shapes ahead of time, we can either skip or pre-compute the + # appropriate paddings. Otherwise, fall back to computing paddings in + # TensorFlow. + if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None: + # Slice the last FFT-rank dimensions from input_tensor's shape. + input_fft_shape = input_tensor.shape[-fft_shape.ndims:] # pylint: disable=invalid-unary-operand-type + + if input_fft_shape.is_fully_defined(): + # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1. + if is_reverse: + fft_shape = fft_shape[:-1].concatenate( + fft_shape.dims[-1].value // 2 + 1) + + paddings = [[0, max(fft_dim.value - input_dim.value, 0)] + for fft_dim, input_dim in zip( + fft_shape.dims, input_fft_shape.dims)] + if any(pad > 0 for _, pad in paddings): + outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims - + fft_shape.ndims), 0) + return _array_ops.pad(input_tensor, outer_paddings + paddings) + return input_tensor + + # If we can't determine the paddings ahead of time, then we have to pad. If + # the paddings end up as zero, tf.pad has a special-case that does no work. + input_rank = _array_ops.rank(input_tensor) + input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:] + outer_dims = _math_ops.maximum(0, input_rank - fft_rank) + outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype) + # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1. + if is_reverse: + fft_length = _array_ops.concat([fft_length[:-1], + fft_length[-1:] // 2 + 1], 0) + fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape) + paddings = _array_ops.concat([outer_paddings, fft_paddings], 0) + paddings = _array_ops_stack.stack( + [_array_ops.zeros_like(paddings), paddings], axis=1) + return _array_ops.pad(input_tensor, paddings) + + +def _rfft_wrapper(fft_fn, fft_rank, default_name): + """Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.""" + + def _rfft(input_tensor, fft_length=None, name=None): + """Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.""" + with _ops.name_scope(name, default_name, + [input_tensor, fft_length]) as name: + input_tensor = _ops.convert_to_tensor(input_tensor, + preferred_dtype=_dtypes.float32) + if input_tensor.dtype not in (_dtypes.float32, _dtypes.float64): + raise ValueError( + "RFFT requires tf.float32 or tf.float64 inputs, got: %s" % + input_tensor) + real_dtype = input_tensor.dtype + if real_dtype == _dtypes.float32: + complex_dtype = _dtypes.complex64 + else: + assert real_dtype == _dtypes.float64 + complex_dtype = _dtypes.complex128 + input_tensor.shape.with_rank_at_least(fft_rank) + if fft_length is None: + fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank) + else: + fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) + input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length) + + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + return fft_fn(input_tensor, fft_length, Tcomplex=complex_dtype, name=name) + _rfft.__doc__ = re.sub(" Tcomplex.*?\n", "", fft_fn.__doc__) + return _rfft + + +def _irfft_wrapper(ifft_fn, fft_rank, default_name): + """Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.""" + + def _irfft(input_tensor, fft_length=None, name=None): + """Wrapper irfft* that infers fft_length argument.""" + with _ops.name_scope(name, default_name, + [input_tensor, fft_length]) as name: + input_tensor = _ops.convert_to_tensor(input_tensor, + preferred_dtype=_dtypes.complex64) + input_tensor.shape.with_rank_at_least(fft_rank) + if input_tensor.dtype not in (_dtypes.complex64, _dtypes.complex128): + raise ValueError( + "IRFFT requires tf.complex64 or tf.complex128 inputs, got: %s" % + input_tensor) + complex_dtype = input_tensor.dtype + real_dtype = complex_dtype.real_dtype + if fft_length is None: + fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank) + else: + fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) + input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, + is_reverse=True) + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + return ifft_fn(input_tensor, fft_length, Treal=real_dtype, name=name) + + _irfft.__doc__ = re.sub("`input`", "`input_tensor`", + re.sub(" Treal.*?\n", "", ifft_fn.__doc__)) + return _irfft + + +def _fftn_wrapper(fft_n, default_name): + """Wrapper around gen_spectral_ops.fftn.""" + + def _fftn(input_tensor, fft_length=None, axes=None, norm=None, name=None): + """Wrapper around gen_spectral_ops.*fft that infers fft_length and axes arguments.""" + with _ops.name_scope( + name, default_name, [input_tensor, fft_length, axes] + ) as name: + axes = _process_empty_axes(input_tensor, axes) + fft_rank = axes.shape[0] + input_tensor = _ops.convert_to_tensor( + input_tensor, preferred_dtype=_dtypes.complex64 + ) + input_tensor.shape.with_rank_at_least(fft_rank) + if fft_length is None: + fft_length = _infer_fft_length_for_fftn(input_tensor) + else: + fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) + input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length) + + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + if norm is None: + norm = "backward" + n = 1 + if norm != "backward": + for fft_length_i in fft_length: + n *= fft_length_i + if norm == "forward": + input_tensor /= n + elif norm == "ortho": + input_tensor /= np.sqrt(n) # should be sqrt(N) + return fft_n(input_tensor, fft_length, axes, name=name) + + _fftn.__doc__ = re.sub(r" Tcomplex.*?\n", "", fft_n.__doc__) + return _fftn + + +def _ifftn_wrapper(ifft_n, default_name): + """Wrapper around gen_spectral_ops.ifftn.""" + + def _ifftn(input_tensor, fft_length=None, axes=None, norm=None, name=None): + """Wrapper around gen_spectral_ops.*fft that infers fft_length and axes arguments.""" + with _ops.name_scope( + name, default_name, [input_tensor, fft_length, axes] + ) as name: + axes = _process_empty_axes(input_tensor, axes) + fft_rank = axes.shape[0] + input_tensor = _ops.convert_to_tensor( + input_tensor, preferred_dtype=_dtypes.complex64 + ) + input_tensor.shape.with_rank_at_least(fft_rank) + if fft_length is None: + fft_length = _infer_fft_length_for_fftn(input_tensor) + else: + fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) + input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length) + + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + if norm is None: + norm = "backward" + n = 1 + if norm != "backward": + for fft_length_i in fft_length: + n *= fft_length_i + if norm == "forward": + input_tensor *= n + elif norm == "ortho": + input_tensor *= np.sqrt(n) # should be sqrt(N) + return ifft_n(input_tensor, fft_length, axes, name=name) + + _ifftn.__doc__ = re.sub(r" Tcomplex.*?\n", "", ifft_n.__doc__) + return _ifftn + + +def _rfftn_wrapper(rfft_n, default_name): + """Wrapper around gen_spectral_ops.rfftn.""" + + def _rfftn(input_tensor, fft_length=None, axes=None, norm=None, name=None): + """Wrapper around gen_spectral_ops.*fft that infers fft_length and axes arguments.""" + with _ops.name_scope( + name, default_name, [input_tensor, fft_length, axes] + ) as name: + axes = _process_empty_axes(input_tensor, axes) + fft_rank = axes.shape[0] + input_tensor = _ops.convert_to_tensor( + input_tensor, preferred_dtype=_dtypes.float32 + ) + if input_tensor.dtype not in (_dtypes.float32, _dtypes.float64): + raise ValueError( + "RFFT requires tf.float32 or tf.float64 inputs, got: %s" + % input_tensor + ) + real_dtype = input_tensor.dtype + if real_dtype == _dtypes.float32: + complex_dtype = _dtypes.complex64 + else: + assert real_dtype == _dtypes.float64 + complex_dtype = _dtypes.complex128 + input_tensor.shape.with_rank_at_least(fft_rank) + if fft_length is None: + fft_length = _infer_fft_length_for_fftn(input_tensor) + else: + fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) + input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length) + + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + if norm is None: + norm = "backward" + n = 1 + if norm != "backward": + for fft_length_i in fft_length: + n *= fft_length_i + if norm == "forward": + input_tensor /= n + elif norm == "ortho": + input_tensor /= np.sqrt(n) # should be sqrt(N) + return rfft_n( + input_tensor, + fft_length, + axes, + Tcomplex=complex_dtype, + name=name, + ) + + _rfftn.__doc__ = re.sub(r" Tcomplex.*?\n", "", rfft_n.__doc__) + return _rfftn + + +def _irfftn_wrapper(irfft_n, default_name): + """Wrapper around gen_spectral_ops.irfftn.""" + + def _irfftn(input_tensor, fft_length=None, axes=None, norm=None, name=None): + """Wrapper irfft* that infers fft_length argument.""" + with _ops.name_scope( + name, default_name, [input_tensor, fft_length] + ) as name: + axes = _process_empty_axes(input_tensor, axes) + fft_rank = axes.shape[0] + input_tensor = _ops.convert_to_tensor( + input_tensor, preferred_dtype=_dtypes.complex64 + ) + input_tensor.shape.with_rank_at_least(fft_rank) + if input_tensor.dtype not in (_dtypes.complex64, _dtypes.complex128): + raise ValueError( + "IRFFT requires tf.complex64 or tf.complex128 inputs, got: %s" + % input_tensor + ) + complex_dtype = input_tensor.dtype + real_dtype = complex_dtype.real_dtype + if fft_length is None: + fft_length = _infer_fft_length_for_irfftn(input_tensor) + else: + fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) + input_tensor = _maybe_pad_for_rfft( + input_tensor, fft_rank, fft_length, is_reverse=True + ) + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + + if norm is None: + norm = "backward" + n = 1 + if norm != "backward": + for fft_length_i in fft_length: + n *= fft_length_i + if norm == "forward": + input_tensor *= n + elif norm == "ortho": + input_tensor *= np.sqrt(n) # should be sqrt(N) + return irfft_n( + input_tensor, fft_length, axes, Treal=real_dtype, name=name + ) + + _irfftn.__doc__ = re.sub( + "`input`", + "`input_tensor`", + re.sub(r" Treal.*?\n", "", irfft_n.__doc__), + ) + return _irfftn + + +# FFT/IFFT 1/2/3D are exported via +# third_party/tensorflow/core/api_def/python_api/ +fft = gen_spectral_ops.fft +ifft = gen_spectral_ops.ifft +fft2d = gen_spectral_ops.fft2d +ifft2d = gen_spectral_ops.ifft2d +fft3d = gen_spectral_ops.fft3d +ifft3d = gen_spectral_ops.ifft3d +fftnd = _fftn_wrapper(gen_spectral_ops.fftnd, "fftnd") +tf_export("signal.fftnd")( + dispatch.add_dispatch_support(fftnd) +) +ifftnd = _ifftn_wrapper(gen_spectral_ops.ifftnd, "ifftnd") +tf_export("signal.ifftnd")( + dispatch.add_dispatch_support(ifftnd) +) +rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft") +tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])( + dispatch.add_dispatch_support(rfft)) +irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft") +tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])( + dispatch.add_dispatch_support(irfft)) +rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d") +tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])( + dispatch.add_dispatch_support(rfft2d)) +irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d") +tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])( + dispatch.add_dispatch_support(irfft2d)) +rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d") +tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])( + dispatch.add_dispatch_support(rfft3d)) +irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d") +tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])( + dispatch.add_dispatch_support(irfft3d)) +rfftnd = _rfftn_wrapper(gen_spectral_ops.rfftnd, "rfftnd") +tf_export("signal.rfftnd")( + dispatch.add_dispatch_support(rfftnd) +) +irfftnd = _irfftn_wrapper(gen_spectral_ops.irfftnd, "irfftnd") +tf_export("signal.irfftnd")( + dispatch.add_dispatch_support(irfftnd) +) + + +def _fft_size_for_grad(grad, rank): + return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:]) + + +@_ops.RegisterGradient("FFT") +def _fft_grad(_, grad): + size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype) + return ifft(grad) * size + + +@_ops.RegisterGradient("IFFT") +def _ifft_grad(_, grad): + rsize = _math_ops.cast( + 1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype), + grad.dtype) + return fft(grad) * rsize + + +@_ops.RegisterGradient("FFT2D") +def _fft2d_grad(_, grad): + size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype) + return ifft2d(grad) * size + + +@_ops.RegisterGradient("IFFT2D") +def _ifft2d_grad(_, grad): + rsize = _math_ops.cast( + 1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype), + grad.dtype) + return fft2d(grad) * rsize + + +@_ops.RegisterGradient("FFT3D") +def _fft3d_grad(_, grad): + size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype) + return ifft3d(grad) * size + + +@_ops.RegisterGradient("IFFT3D") +def _ifft3d_grad(_, grad): + rsize = _math_ops.cast( + 1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype), + grad.dtype) + return fft3d(grad) * rsize + + +def _rfft_grad_helper(rank, irfft_fn): + """Returns a gradient function for an RFFT of the provided rank.""" + # Can't happen because we don't register a gradient for RFFT3D. + assert rank in (1, 2), "Gradient for RFFT3D is not implemented." + + def _grad(op, grad): + """A gradient function for RFFT with the provided `rank` and `irfft_fn`.""" + fft_length = op.inputs[1] + complex_dtype = grad.dtype + real_dtype = complex_dtype.real_dtype + input_shape = _array_ops.shape(op.inputs[0]) + is_even = _math_ops.cast(1 - (fft_length[-1] % 2), complex_dtype) + + def _tile_for_broadcasting(matrix, t): + expanded = _array_ops.reshape( + matrix, + _array_ops.concat([ + _array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32), + _array_ops.shape(matrix) + ], 0)) + return _array_ops.tile( + expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0)) + + def _mask_matrix(length): + """Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len).""" + # TODO(rjryan): Speed up computation of twiddle factors using the + # following recurrence relation and cache them across invocations of RFFT. + # + # t_n = exp(sqrt(-1) * pi * n^2 / line_len) + # for n = 0, 1,..., line_len-1. + # For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2 + a = _array_ops.tile( + _array_ops.expand_dims(_math_ops.range(length), 0), (length, 1)) + b = _array_ops.transpose(a, [1, 0]) + return _math_ops.exp( + -2j * np.pi * _math_ops.cast(a * b, complex_dtype) / + _math_ops.cast(length, complex_dtype)) + + def _ymask(length): + """A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`.""" + return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2), + complex_dtype) + + y0 = grad[..., 0:1] + if rank == 1: + ym = grad[..., -1:] + extra_terms = y0 + is_even * ym * _ymask(input_shape[-1]) + elif rank == 2: + # Create a mask matrix for y0 and ym. + base_mask = _mask_matrix(input_shape[-2]) + + # Tile base_mask to match y0 in shape so that we can batch-matmul the + # inner 2 dimensions. + tiled_mask = _tile_for_broadcasting(base_mask, y0) + + y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0)) + extra_terms = y0_term + + ym = grad[..., -1:] + ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym)) + + inner_dim = input_shape[-1] + ym_term = _array_ops.tile( + ym_term, + _array_ops.concat([ + _array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32), + [inner_dim] + ], 0)) * _ymask(inner_dim) + + extra_terms += is_even * ym_term + + # The gradient of RFFT is the IRFFT of the incoming gradient times a scaling + # factor, plus some additional terms to make up for the components dropped + # due to Hermitian symmetry. + input_size = _math_ops.cast( + _fft_size_for_grad(op.inputs[0], rank), real_dtype) + the_irfft = irfft_fn(grad, fft_length) + return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None + + return _grad + + +def _irfft_grad_helper(rank, rfft_fn): + """Returns a gradient function for an IRFFT of the provided rank.""" + # Can't happen because we don't register a gradient for IRFFT3D. + assert rank in (1, 2), "Gradient for IRFFT3D is not implemented." + + def _grad(op, grad): + """A gradient function for IRFFT with the provided `rank` and `rfft_fn`.""" + # Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs + # and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the + # graph we special-case the situation where the FFT length and last + # dimension of the input are known at graph construction time. + fft_length = op.inputs[1] + fft_length_static = _tensor_util.constant_value(fft_length) + if fft_length_static is not None: + fft_length = fft_length_static + real_dtype = grad.dtype + if real_dtype == _dtypes.float32: + complex_dtype = _dtypes.complex64 + elif real_dtype == _dtypes.float64: + complex_dtype = _dtypes.complex128 + is_odd = _math_ops.mod(fft_length[-1], 2) + input_last_dimension = _array_ops.shape(op.inputs[0])[-1] + mask = _array_ops.concat( + [[1.0], 2.0 * _array_ops.ones( + [input_last_dimension - 2 + is_odd], real_dtype), + _array_ops.ones([1 - is_odd], real_dtype)], 0) + + rsize = _math_ops.reciprocal(_math_ops.cast( + _fft_size_for_grad(grad, rank), real_dtype)) + + # The gradient of IRFFT is the RFFT of the incoming gradient times a scaling + # factor and a mask. The mask scales the gradient for the Hermitian + # symmetric components of the RFFT by a factor of two, since these + # components are de-duplicated in the RFFT. + the_rfft = rfft_fn(grad, fft_length) + return the_rfft * _math_ops.cast(rsize * mask, complex_dtype), None + + return _grad + + +@tf_export("signal.fftshift") +@dispatch.add_dispatch_support +def fftshift(x, axes=None, name=None): + """Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + @compatibility(numpy) + Equivalent to numpy.fft.fftshift. + https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html + @end_compatibility + + For example: + + ```python + x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) + x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + ``` + + Args: + x: `Tensor`, input tensor. + axes: `int` or shape `tuple`, optional Axes over which to shift. Default is + None, which shifts all axes. + name: An optional name for the operation. + + Returns: + A `Tensor`, The shifted tensor. + """ + with _ops.name_scope(name, "fftshift") as name: + x = _ops.convert_to_tensor(x) + if axes is None: + axes = tuple(range(x.shape.ndims)) + shift = _array_ops.shape(x) // 2 + elif isinstance(axes, int): + shift = _array_ops.shape(x)[axes] // 2 + else: + rank = _array_ops.rank(x) + # allows negative axis + axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes) + shift = _array_ops.gather(_array_ops.shape(x), axes) // 2 + + return manip_ops.roll(x, shift, axes, name) + + +@tf_export("signal.ifftshift") +@dispatch.add_dispatch_support +def ifftshift(x, axes=None, name=None): + """The inverse of fftshift. + + Although identical for even-length x, + the functions differ by one sample for odd-length x. + + @compatibility(numpy) + Equivalent to numpy.fft.ifftshift. + https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html + @end_compatibility + + For example: + + ```python + x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]]) + x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]]) + ``` + + Args: + x: `Tensor`, input tensor. + axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None, + which shifts all axes. + name: An optional name for the operation. + + Returns: + A `Tensor`, The shifted tensor. + """ + with _ops.name_scope(name, "ifftshift") as name: + x = _ops.convert_to_tensor(x) + if axes is None: + axes = tuple(range(x.shape.ndims)) + shift = -(_array_ops.shape(x) // 2) + elif isinstance(axes, int): + shift = -(_array_ops.shape(x)[axes] // 2) + else: + rank = _array_ops.rank(x) + # allows negative axis + axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes) + shift = -(_array_ops.gather(_array_ops.shape(x), axes) // 2) + + return manip_ops.roll(x, shift, axes, name) + + +_ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft)) +_ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft)) +_ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d)) +_ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/mel_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/mel_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..47d85859ddd9b4b2266af5a3c0700f55be8c8dad --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/mel_ops.py @@ -0,0 +1,216 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""mel conversion ops.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.signal import shape_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +# mel spectrum constants. +_MEL_BREAK_FREQUENCY_HERTZ = 700.0 +_MEL_HIGH_FREQUENCY_Q = 1127.0 + + +def _mel_to_hertz(mel_values, name=None): + """Converts frequencies in `mel_values` from the mel scale to linear scale. + + Args: + mel_values: A `Tensor` of frequencies in the mel scale. + name: An optional name for the operation. + + Returns: + A `Tensor` of the same shape and type as `mel_values` containing linear + scale frequencies in Hertz. + """ + with ops.name_scope(name, 'mel_to_hertz', [mel_values]): + mel_values = ops.convert_to_tensor(mel_values) + return _MEL_BREAK_FREQUENCY_HERTZ * ( + math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0 + ) + + +def _hertz_to_mel(frequencies_hertz, name=None): + """Converts frequencies in `frequencies_hertz` in Hertz to the mel scale. + + Args: + frequencies_hertz: A `Tensor` of frequencies in Hertz. + name: An optional name for the operation. + + Returns: + A `Tensor` of the same shape and type of `frequencies_hertz` containing + frequencies in the mel scale. + """ + with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]): + frequencies_hertz = ops.convert_to_tensor(frequencies_hertz) + return _MEL_HIGH_FREQUENCY_Q * math_ops.log( + 1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)) + + +def _validate_arguments(num_mel_bins, sample_rate, + lower_edge_hertz, upper_edge_hertz, dtype): + """Checks the inputs to linear_to_mel_weight_matrix.""" + if num_mel_bins <= 0: + raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins) + if lower_edge_hertz < 0.0: + raise ValueError('lower_edge_hertz must be non-negative. Got: %s' % + lower_edge_hertz) + if lower_edge_hertz >= upper_edge_hertz: + raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' % + (lower_edge_hertz, upper_edge_hertz)) + if not isinstance(sample_rate, tensor.Tensor): + if sample_rate <= 0.0: + raise ValueError('sample_rate must be positive. Got: %s' % sample_rate) + if upper_edge_hertz > sample_rate / 2: + raise ValueError('upper_edge_hertz must not be larger than the Nyquist ' + 'frequency (sample_rate / 2). Got %s for sample_rate: %s' + % (upper_edge_hertz, sample_rate)) + if not dtype.is_floating: + raise ValueError('dtype must be a floating point type. Got: %s' % dtype) + + +@tf_export('signal.linear_to_mel_weight_matrix') +@dispatch.add_dispatch_support +def linear_to_mel_weight_matrix(num_mel_bins=20, + num_spectrogram_bins=129, + sample_rate=8000, + lower_edge_hertz=125.0, + upper_edge_hertz=3800.0, + dtype=dtypes.float32, + name=None): + r"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel]. + + Returns a weight matrix that can be used to re-weight a `Tensor` containing + `num_spectrogram_bins` linearly sampled frequency information from + `[0, sample_rate / 2]` into `num_mel_bins` frequency information from + `[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel]. + + This function follows the [Hidden Markov Model Toolkit + (HTK)](http://htk.eng.cam.ac.uk/) convention, defining the mel scale in + terms of a frequency in hertz according to the following formula: + + $$\textrm{mel}(f) = 2595 * \textrm{log}_{10}(1 + \frac{f}{700})$$ + + In the returned matrix, all the triangles (filterbanks) have a peak value + of 1.0. + + For example, the returned matrix `A` can be used to right-multiply a + spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear + scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram" + `M` of shape `[frames, num_mel_bins]`. + + # `S` has shape [frames, num_spectrogram_bins] + # `M` has shape [frames, num_mel_bins] + M = tf.matmul(S, A) + + The matrix can be used with `tf.tensordot` to convert an arbitrary rank + `Tensor` of linear-scale spectral bins into the mel scale. + + # S has shape [..., num_spectrogram_bins]. + # M has shape [..., num_mel_bins]. + M = tf.tensordot(S, A, 1) + + Args: + num_mel_bins: Python int. How many bands in the resulting mel spectrum. + num_spectrogram_bins: An integer `Tensor`. How many bins there are in the + source spectrogram data, which is understood to be `fft_size // 2 + 1`, + i.e. the spectrogram only contains the nonredundant FFT bins. + sample_rate: An integer or float `Tensor`. Samples per second of the input + signal used to create the spectrogram. Used to figure out the frequencies + corresponding to each spectrogram bin, which dictates how they are mapped + into the mel scale. + lower_edge_hertz: Python float. Lower bound on the frequencies to be + included in the mel spectrum. This corresponds to the lower edge of the + lowest triangular band. + upper_edge_hertz: Python float. The desired top edge of the highest + frequency band. + dtype: The `DType` of the result matrix. Must be a floating point type. + name: An optional name for the operation. + + Returns: + A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`. + + Raises: + ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not + positive, `lower_edge_hertz` is negative, frequency edges are incorrectly + ordered, `upper_edge_hertz` is larger than the Nyquist frequency. + + [mel]: https://en.wikipedia.org/wiki/Mel_scale + """ + with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name: + # Convert Tensor `sample_rate` to float, if possible. + if isinstance(sample_rate, tensor.Tensor): + maybe_const_val = tensor_util.constant_value(sample_rate) + if maybe_const_val is not None: + sample_rate = maybe_const_val + + # Note: As num_spectrogram_bins is passed to `math_ops.linspace` + # and the validation is already done in linspace (both in shape function + # and in kernel), there is no need to validate num_spectrogram_bins here. + _validate_arguments(num_mel_bins, sample_rate, + lower_edge_hertz, upper_edge_hertz, dtype) + + # This function can be constant folded by graph optimization since there are + # no Tensor inputs. + sample_rate = math_ops.cast( + sample_rate, dtype, name='sample_rate') + lower_edge_hertz = ops.convert_to_tensor( + lower_edge_hertz, dtype, name='lower_edge_hertz') + upper_edge_hertz = ops.convert_to_tensor( + upper_edge_hertz, dtype, name='upper_edge_hertz') + zero = ops.convert_to_tensor(0.0, dtype) + + # HTK excludes the spectrogram DC bin. + bands_to_zero = 1 + nyquist_hertz = sample_rate / 2.0 + linear_frequencies = math_ops.linspace( + zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:] + spectrogram_bins_mel = array_ops.expand_dims( + _hertz_to_mel(linear_frequencies), 1) + + # Compute num_mel_bins triples of (lower_edge, center, upper_edge). The + # center of each band is the lower and upper edge of the adjacent bands. + # Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into + # num_mel_bins + 2 pieces. + band_edges_mel = shape_ops.frame( + math_ops.linspace(_hertz_to_mel(lower_edge_hertz), + _hertz_to_mel(upper_edge_hertz), + num_mel_bins + 2), frame_length=3, frame_step=1) + + # Split the triples up and reshape them into [1, num_mel_bins] tensors. + lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape( + t, [1, num_mel_bins]) for t in array_ops.split( + band_edges_mel, 3, axis=1)) + + # Calculate lower and upper slopes for every spectrogram bin. + # Line segments are linear in the mel domain, not Hertz. + lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / ( + center_mel - lower_edge_mel) + upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / ( + upper_edge_mel - center_mel) + + # Intersect the line segments with each other and zero. + mel_weights_matrix = math_ops.maximum( + zero, math_ops.minimum(lower_slopes, upper_slopes)) + + # Re-add the zeroed lower bins we sliced out above. + return array_ops.pad( + mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/mfcc_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/mfcc_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4ec83b674aef258b6be2e147f7656d137e637bd8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/mfcc_ops.py @@ -0,0 +1,107 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mel-Frequency Cepstral Coefficients (MFCCs) ops.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.signal import dct_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export('signal.mfccs_from_log_mel_spectrograms') +@dispatch.add_dispatch_support +def mfccs_from_log_mel_spectrograms(log_mel_spectrograms, name=None): + """Computes [MFCCs][mfcc] of `log_mel_spectrograms`. + + Implemented with GPU-compatible ops and supports gradients. + + [Mel-Frequency Cepstral Coefficient (MFCC)][mfcc] calculation consists of + taking the DCT-II of a log-magnitude mel-scale spectrogram. [HTK][htk]'s MFCCs + use a particular scaling of the DCT-II which is almost orthogonal + normalization. We follow this convention. + + All `num_mel_bins` MFCCs are returned and it is up to the caller to select + a subset of the MFCCs based on their application. For example, it is typical + to only use the first few for speech recognition, as this results in + an approximately pitch-invariant representation of the signal. + + For example: + + ```python + batch_size, num_samples, sample_rate = 32, 32000, 16000.0 + # A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1]. + pcm = tf.random.normal([batch_size, num_samples], dtype=tf.float32) + + # A 1024-point STFT with frames of 64 ms and 75% overlap. + stfts = tf.signal.stft(pcm, frame_length=1024, frame_step=256, + fft_length=1024) + spectrograms = tf.abs(stfts) + + # Warp the linear scale spectrograms into the mel-scale. + num_spectrogram_bins = stfts.shape[-1].value + lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80 + linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix( + num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, + upper_edge_hertz) + mel_spectrograms = tf.tensordot( + spectrograms, linear_to_mel_weight_matrix, 1) + mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate( + linear_to_mel_weight_matrix.shape[-1:])) + + # Compute a stabilized log to get log-magnitude mel-scale spectrograms. + log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6) + + # Compute MFCCs from log_mel_spectrograms and take the first 13. + mfccs = tf.signal.mfccs_from_log_mel_spectrograms( + log_mel_spectrograms)[..., :13] + ``` + + Args: + log_mel_spectrograms: A `[..., num_mel_bins]` `float32`/`float64` `Tensor` + of log-magnitude mel-scale spectrograms. + name: An optional name for the operation. + Returns: + A `[..., num_mel_bins]` `float32`/`float64` `Tensor` of the MFCCs of + `log_mel_spectrograms`. + + Raises: + ValueError: If `num_mel_bins` is not positive. + + [mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + [htk]: https://en.wikipedia.org/wiki/HTK_(software) + """ + with ops.name_scope(name, 'mfccs_from_log_mel_spectrograms', + [log_mel_spectrograms]): + # Compute the DCT-II of the resulting log-magnitude mel-scale spectrogram. + # The DCT used in HTK scales every basis vector by sqrt(2/N), which is the + # scaling required for an "orthogonal" DCT-II *except* in the 0th bin, where + # the true orthogonal DCT (as implemented by scipy) scales by sqrt(1/N). For + # this reason, we don't apply orthogonal normalization and scale the DCT by + # `0.5 * sqrt(2/N)` manually. + log_mel_spectrograms = ops.convert_to_tensor(log_mel_spectrograms) + if (log_mel_spectrograms.shape.ndims and + log_mel_spectrograms.shape.dims[-1].value is not None): + num_mel_bins = log_mel_spectrograms.shape.dims[-1].value + if num_mel_bins == 0: + raise ValueError('num_mel_bins must be positive. Got: %s' % + log_mel_spectrograms) + else: + num_mel_bins = array_ops.shape(log_mel_spectrograms)[-1] + + dct2 = dct_ops.dct(log_mel_spectrograms, type=2) + return dct2 * math_ops.rsqrt( + math_ops.cast(num_mel_bins, dct2.dtype) * 2.0) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/reconstruction_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/reconstruction_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fdef247b2a78a649cdb689818d4bbf1204761007 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/reconstruction_ops.py @@ -0,0 +1,163 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Signal reconstruction via overlapped addition of frames.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("signal.overlap_and_add") +@dispatch.add_dispatch_support +def overlap_and_add(signal, frame_step, name=None): + """Reconstructs a signal from a framed representation. + + Adds potentially overlapping frames of a signal with shape + `[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`. + The resulting tensor has shape `[..., output_size]` where + + output_size = (frames - 1) * frame_step + frame_length + + Args: + signal: A [..., frames, frame_length] `Tensor`. All dimensions may be + unknown, and rank must be at least 2. + frame_step: An integer or scalar `Tensor` denoting overlap offsets. Must be + less than or equal to `frame_length`. + name: An optional name for the operation. + + Returns: + A `Tensor` with shape `[..., output_size]` containing the overlap-added + frames of `signal`'s inner-most two dimensions. + + Raises: + ValueError: If `signal`'s rank is less than 2, or `frame_step` is not a + scalar integer. + """ + with ops.name_scope(name, "overlap_and_add", [signal, frame_step]): + signal = ops.convert_to_tensor(signal, name="signal") + signal.shape.with_rank_at_least(2) + frame_step = ops.convert_to_tensor(frame_step, name="frame_step") + frame_step.shape.assert_has_rank(0) + if not frame_step.dtype.is_integer: + raise ValueError("frame_step must be an integer. Got %s" % + frame_step.dtype) + frame_step_static = tensor_util.constant_value(frame_step) + frame_step_is_static = frame_step_static is not None + frame_step = frame_step_static if frame_step_is_static else frame_step + + signal_shape = array_ops.shape(signal) + signal_shape_static = tensor_util.constant_value(signal_shape) + if signal_shape_static is not None: + signal_shape = signal_shape_static + + # All dimensions that are not part of the overlap-and-add. Can be empty for + # rank 2 inputs. + outer_dimensions = signal_shape[:-2] + outer_rank = array_ops.size(outer_dimensions) + outer_rank_static = tensor_util.constant_value(outer_rank) + if outer_rank_static is not None: + outer_rank = outer_rank_static + + def full_shape(inner_shape): + return array_ops.concat([outer_dimensions, inner_shape], 0) + + frame_length = signal_shape[-1] + frames = signal_shape[-2] + + # Compute output length. + output_length = frame_length + frame_step * (frames - 1) + + # If frame_length is equal to frame_step, there's no overlap so just + # reshape the tensor. + if (frame_step_is_static and signal.shape.dims is not None and + frame_step == signal.shape.dims[-1].value): + output_shape = full_shape([output_length]) + return array_ops.reshape(signal, output_shape, name="fast_path") + + # The following code is documented using this example: + # + # frame_step = 2 + # signal.shape = (3, 5) + # a b c d e + # f g h i j + # k l m n o + + # Compute the number of segments, per frame. + segments = -(-frame_length // frame_step) # Divide and round up. + + # Pad the frame_length dimension to a multiple of the frame step. + # Pad the frames dimension by `segments` so that signal.shape = (6, 6) + # a b c d e 0 + # f g h i j 0 + # k l m n o 0 + # 0 0 0 0 0 0 + # 0 0 0 0 0 0 + # 0 0 0 0 0 0 + paddings = [[0, segments], [0, segments * frame_step - frame_length]] + outer_paddings = array_ops.zeros([outer_rank, 2], dtypes.int32) + paddings = array_ops.concat([outer_paddings, paddings], 0) + signal = array_ops.pad(signal, paddings) + + # Reshape so that signal.shape = (3, 6, 2) + # ab cd e0 + # fg hi j0 + # kl mn o0 + # 00 00 00 + # 00 00 00 + # 00 00 00 + shape = full_shape([frames + segments, segments, frame_step]) + signal = array_ops.reshape(signal, shape) + + # Transpose dimensions so that signal.shape = (3, 6, 2) + # ab fg kl 00 00 00 + # cd hi mn 00 00 00 + # e0 j0 o0 00 00 00 + perm = array_ops.concat( + [math_ops.range(outer_rank), outer_rank + [1, 0, 2]], 0) + perm_static = tensor_util.constant_value(perm) + perm = perm_static if perm_static is not None else perm + signal = array_ops.transpose(signal, perm) + + # Reshape so that signal.shape = (18, 2) + # ab fg kl 00 00 00 cd hi mn 00 00 00 e0 j0 o0 00 00 00 + shape = full_shape([(frames + segments) * segments, frame_step]) + signal = array_ops.reshape(signal, shape) + + # Truncate so that signal.shape = (15, 2) + # ab fg kl 00 00 00 cd hi mn 00 00 00 e0 j0 o0 + signal = signal[..., :(frames + segments - 1) * segments, :] + + # Reshape so that signal.shape = (3, 5, 2) + # ab fg kl 00 00 + # 00 cd hi mn 00 + # 00 00 e0 j0 o0 + shape = full_shape([segments, (frames + segments - 1), frame_step]) + signal = array_ops.reshape(signal, shape) + + # Now, reduce over the columns, to achieve the desired sum. + signal = math_ops.reduce_sum(signal, -3) + + # Flatten the array. + shape = full_shape([(frames + segments - 1) * frame_step]) + signal = array_ops.reshape(signal, shape) + + # Truncate to final length. + signal = signal[..., :output_length] + + return signal diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/shape_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/shape_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e4846b90a2f6db02cea4eba470b4504cf025bf4f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/shape_ops.py @@ -0,0 +1,231 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""General shape ops for frames.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.signal import util_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +def _infer_frame_shape(signal, frame_length, frame_step, pad_end, axis): + """Infers the shape of the return value of `frame`.""" + frame_length = tensor_util.constant_value(frame_length) + frame_step = tensor_util.constant_value(frame_step) + axis = tensor_util.constant_value(axis) + if signal.shape.ndims is None: + return None + if axis is None: + return [None] * (signal.shape.ndims + 1) + + signal_shape = signal.shape.as_list() + num_frames = None + frame_axis = signal_shape[axis] + outer_dimensions = signal_shape[:axis] + inner_dimensions = signal_shape[axis:][1:] + if signal_shape and frame_axis is not None: + if frame_step is not None and pad_end: + # Double negative is so that we round up. + num_frames = max(0, -(-frame_axis // frame_step)) + elif frame_step is not None and frame_length is not None: + assert not pad_end + num_frames = max( + 0, (frame_axis - frame_length + frame_step) // frame_step) + return outer_dimensions + [num_frames, frame_length] + inner_dimensions + + +@tf_export("signal.frame") +@dispatch.add_dispatch_support +def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1, + name=None): + """Expands `signal`'s `axis` dimension into frames of `frame_length`. + + Slides a window of size `frame_length` over `signal`'s `axis` dimension + with a stride of `frame_step`, replacing the `axis` dimension with + `[frames, frame_length]` frames. + + If `pad_end` is True, window positions that are past the end of the `axis` + dimension are padded with `pad_value` until the window moves fully past the + end of the dimension. Otherwise, only window positions that fully overlap the + `axis` dimension are produced. + + For example: + + >>> # A batch size 3 tensor of 9152 audio samples. + >>> audio = tf.random.normal([3, 9152]) + >>> + >>> # Compute overlapping frames of length 512 with a step of 180 (frames overlap + >>> # by 332 samples). By default, only 49 frames are generated since a frame + >>> # with start position j*180 for j > 48 would overhang the end. + >>> frames = tf.signal.frame(audio, 512, 180) + >>> frames.shape.assert_is_compatible_with([3, 49, 512]) + >>> + >>> # When pad_end is enabled, the final two frames are kept (padded with zeros). + >>> frames = tf.signal.frame(audio, 512, 180, pad_end=True) + >>> frames.shape.assert_is_compatible_with([3, 51, 512]) + + If the dimension along `axis` is N, and `pad_end=False`, the number of frames + can be computed by: + ```python + num_frames = 1 + (N - frame_size) // frame_step + ``` + If `pad_end=True`, the number of frames can be computed by: + ```python + num_frames = -(-N // frame_step) # ceiling division + ``` + + Args: + signal: A `[..., samples, ...]` `Tensor`. The rank and dimensions + may be unknown. Rank must be at least 1. + frame_length: The frame length in samples. An integer or scalar `Tensor`. + frame_step: The frame hop size in samples. An integer or scalar `Tensor`. + pad_end: Whether to pad the end of `signal` with `pad_value`. + pad_value: An optional scalar `Tensor` to use where the input signal + does not exist when `pad_end` is True. + axis: A scalar integer `Tensor` indicating the axis to frame. Defaults to + the last axis. Supports negative values for indexing from the end. + name: An optional name for the operation. + + Returns: + A `Tensor` of frames with shape `[..., num_frames, frame_length, ...]`. + + Raises: + ValueError: If `frame_length`, `frame_step`, `pad_value`, or `axis` are not + scalar. + """ + with ops.name_scope(name, "frame", [signal, frame_length, frame_step, + pad_value]): + signal = ops.convert_to_tensor(signal, name="signal") + frame_length = ops.convert_to_tensor(frame_length, name="frame_length") + frame_step = ops.convert_to_tensor(frame_step, name="frame_step") + axis = ops.convert_to_tensor(axis, name="axis") + + signal.shape.with_rank_at_least(1) + frame_length.shape.assert_has_rank(0) + frame_step.shape.assert_has_rank(0) + axis.shape.assert_has_rank(0) + + result_shape = _infer_frame_shape(signal, frame_length, frame_step, pad_end, + axis) + + def maybe_constant(val): + val_static = tensor_util.constant_value(val) + return (val_static, True) if val_static is not None else (val, False) + + signal_shape, signal_shape_is_static = maybe_constant( + array_ops.shape(signal)) + axis, axis_is_static = maybe_constant(axis) + + if signal_shape_is_static and axis_is_static: + # Axis can be negative. Convert it to positive. + axis = range(len(signal_shape))[axis] + outer_dimensions, length_samples, inner_dimensions = np.split( + signal_shape, indices_or_sections=[axis, axis + 1]) + length_samples = length_samples.item() + else: + signal_rank = array_ops.rank(signal) + # Axis can be negative. Convert it to positive. + axis = math_ops.range(signal_rank)[axis] + outer_dimensions, length_samples, inner_dimensions = array_ops.split( + signal_shape, [axis, 1, signal_rank - 1 - axis]) + length_samples = array_ops.reshape(length_samples, []) + num_outer_dimensions = array_ops.size(outer_dimensions) + num_inner_dimensions = array_ops.size(inner_dimensions) + + # If padding is requested, pad the input signal tensor with pad_value. + if pad_end: + pad_value = ops.convert_to_tensor(pad_value, signal.dtype) + pad_value.shape.assert_has_rank(0) + + # Calculate number of frames, using double negatives to round up. + num_frames = -(-length_samples // frame_step) + + # Pad the signal by up to frame_length samples based on how many samples + # are remaining starting from last_frame_position. + pad_samples = math_ops.maximum( + 0, frame_length + frame_step * (num_frames - 1) - length_samples) + + # Pad the inner dimension of signal by pad_samples. + paddings = array_ops.concat([ + array_ops.zeros([num_outer_dimensions, 2], dtype=pad_samples.dtype), + ops.convert_to_tensor([[0, pad_samples]]), + array_ops.zeros([num_inner_dimensions, 2], dtype=pad_samples.dtype) + ], 0) + signal = array_ops.pad(signal, paddings, constant_values=pad_value) + + signal_shape = array_ops.shape(signal) + length_samples = signal_shape[axis] + else: + num_frames = math_ops.maximum( + constant_op.constant(0, dtype=frame_length.dtype), + 1 + (length_samples - frame_length) // frame_step) + + subframe_length, _ = maybe_constant(util_ops.gcd(frame_length, frame_step)) + subframes_per_frame = frame_length // subframe_length + subframes_per_hop = frame_step // subframe_length + num_subframes = length_samples // subframe_length + + slice_shape = array_ops.concat([outer_dimensions, + [num_subframes * subframe_length], + inner_dimensions], 0) + subframe_shape = array_ops.concat([outer_dimensions, + [num_subframes, subframe_length], + inner_dimensions], 0) + subframes = array_ops.reshape(array_ops.strided_slice( + signal, array_ops.zeros_like(signal_shape), + slice_shape), subframe_shape) + + # frame_selector is a [num_frames, subframes_per_frame] tensor + # that indexes into the appropriate frame in subframes. For example: + # [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]] + frame_selector = array_ops.reshape( + math_ops.range(num_frames, dtype=frame_length.dtype) * + subframes_per_hop, [num_frames, 1]) + + # subframe_selector is a [num_frames, subframes_per_frame] tensor + # that indexes into the appropriate subframe within a frame. For example: + # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]] + subframe_selector = array_ops.reshape( + math_ops.range(subframes_per_frame, dtype=frame_length.dtype), + [1, subframes_per_frame]) + + # Adding the 2 selector tensors together produces a [num_frames, + # subframes_per_frame] tensor of indices to use with tf.gather to select + # subframes from subframes. We then reshape the inner-most + # subframes_per_frame dimension to stitch the subframes together into + # frames. For example: [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]]. + selector = frame_selector + subframe_selector + + # Dtypes have to match. + outer_dimensions = ops.convert_to_tensor(outer_dimensions) + inner_dimensions = ops.convert_to_tensor( + inner_dimensions, dtype=outer_dimensions.dtype) + mid_dimensions = ops.convert_to_tensor([num_frames, frame_length], + dtype=outer_dimensions.dtype) + + frames = array_ops.reshape( + array_ops.gather(subframes, selector, axis=axis), + array_ops.concat([outer_dimensions, mid_dimensions, inner_dimensions], + 0)) + + if result_shape: + frames.set_shape(result_shape) + return frames diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/signal.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..53d9e1f0a7bcafad341eca66fc89b0ac51aef985 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/signal.py @@ -0,0 +1,44 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Signal processing operations.""" + +# pylint: disable=unused-import +from tensorflow.python.ops import signal +from tensorflow.python.ops.signal.dct_ops import dct +from tensorflow.python.ops.signal.fft_ops import fft +from tensorflow.python.ops.signal.fft_ops import fft2d +from tensorflow.python.ops.signal.fft_ops import fft3d +from tensorflow.python.ops.signal.fft_ops import fftshift +from tensorflow.python.ops.signal.fft_ops import rfft +from tensorflow.python.ops.signal.fft_ops import rfft2d +from tensorflow.python.ops.signal.fft_ops import rfft3d +from tensorflow.python.ops.signal.dct_ops import idct +from tensorflow.python.ops.signal.fft_ops import ifft +from tensorflow.python.ops.signal.fft_ops import ifft2d +from tensorflow.python.ops.signal.fft_ops import ifft3d +from tensorflow.python.ops.signal.fft_ops import ifftshift +from tensorflow.python.ops.signal.fft_ops import irfft +from tensorflow.python.ops.signal.fft_ops import irfft2d +from tensorflow.python.ops.signal.fft_ops import irfft3d +from tensorflow.python.ops.signal.mel_ops import linear_to_mel_weight_matrix +from tensorflow.python.ops.signal.mfcc_ops import mfccs_from_log_mel_spectrograms +from tensorflow.python.ops.signal.reconstruction_ops import overlap_and_add +from tensorflow.python.ops.signal.shape_ops import frame +from tensorflow.python.ops.signal.spectral_ops import inverse_stft +from tensorflow.python.ops.signal.spectral_ops import inverse_stft_window_fn +from tensorflow.python.ops.signal.spectral_ops import stft +from tensorflow.python.ops.signal.window_ops import hamming_window +from tensorflow.python.ops.signal.window_ops import hann_window +# pylint: enable=unused-import diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/spectral_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/spectral_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f10826b655d1f6553aa4c89efb705731f186b793 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/spectral_ops.py @@ -0,0 +1,449 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Spectral operations (e.g. Short-time Fourier Transform).""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.signal import dct_ops +from tensorflow.python.ops.signal import fft_ops +from tensorflow.python.ops.signal import reconstruction_ops +from tensorflow.python.ops.signal import shape_ops +from tensorflow.python.ops.signal import window_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export('signal.stft') +@dispatch.add_dispatch_support +def stft(signals, frame_length, frame_step, fft_length=None, + window_fn=window_ops.hann_window, + pad_end=False, name=None): + """Computes the [Short-time Fourier Transform][stft] of `signals`. + + Implemented with TPU/GPU-compatible ops and supports gradients. + + Args: + signals: A `[..., samples]` `float32`/`float64` `Tensor` of real-valued + signals. + frame_length: An integer scalar `Tensor`. The window length in samples. + frame_step: An integer scalar `Tensor`. The number of samples to step. + fft_length: An integer scalar `Tensor`. The size of the FFT to apply. + If not provided, uses the smallest power of 2 enclosing `frame_length`. + window_fn: A callable that takes a window length and a `dtype` keyword + argument and returns a `[window_length]` `Tensor` of samples in the + provided datatype. If set to `None`, no windowing is used. + pad_end: Whether to pad the end of `signals` with zeros when the provided + frame length and step produces a frame that lies partially past its end. + name: An optional name for the operation. + + Returns: + A `[..., frames, fft_unique_bins]` `Tensor` of `complex64`/`complex128` + STFT values where `fft_unique_bins` is `fft_length // 2 + 1` (the unique + components of the FFT). + + Raises: + ValueError: If `signals` is not at least rank 1, `frame_length` is + not scalar, or `frame_step` is not scalar. + + [stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform + """ + with ops.name_scope(name, 'stft', [signals, frame_length, + frame_step]): + signals = ops.convert_to_tensor(signals, name='signals') + signals.shape.with_rank_at_least(1) + frame_length = ops.convert_to_tensor(frame_length, name='frame_length') + frame_length.shape.assert_has_rank(0) + frame_step = ops.convert_to_tensor(frame_step, name='frame_step') + frame_step.shape.assert_has_rank(0) + + if fft_length is None: + fft_length = _enclosing_power_of_two(frame_length) + else: + fft_length = ops.convert_to_tensor(fft_length, name='fft_length') + + framed_signals = shape_ops.frame( + signals, frame_length, frame_step, pad_end=pad_end) + + # Optionally window the framed signals. + if window_fn is not None: + window = window_fn(frame_length, dtype=framed_signals.dtype) + framed_signals *= window + + # fft_ops.rfft produces the (fft_length/2 + 1) unique components of the + # FFT of the real windowed signals in framed_signals. + return fft_ops.rfft(framed_signals, [fft_length]) + + +@tf_export('signal.inverse_stft_window_fn') +@dispatch.add_dispatch_support +def inverse_stft_window_fn(frame_step, + forward_window_fn=window_ops.hann_window, + name=None): + """Generates a window function that can be used in `inverse_stft`. + + Constructs a window that is equal to the forward window with a further + pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to + `forward_window_fn` in the case where it would produce an exact inverse. + + See examples in `inverse_stft` documentation for usage. + + Args: + frame_step: An integer scalar `Tensor`. The number of samples to step. + forward_window_fn: window_fn used in the forward transform, `stft`. + name: An optional name for the operation. + + Returns: + A callable that takes a window length and a `dtype` keyword argument and + returns a `[window_length]` `Tensor` of samples in the provided datatype. + The returned window is suitable for reconstructing original waveform in + inverse_stft. + """ + def inverse_stft_window_fn_inner(frame_length, dtype): + """Computes a window that can be used in `inverse_stft`. + + Args: + frame_length: An integer scalar `Tensor`. The window length in samples. + dtype: Data type of waveform passed to `stft`. + + Returns: + A window suitable for reconstructing original waveform in `inverse_stft`. + + Raises: + ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a + callable that takes a window length and a `dtype` keyword argument and + returns a `[window_length]` `Tensor` of samples in the provided datatype + `frame_step` is not scalar, or `frame_step` is not scalar. + """ + with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]): + frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step') + frame_step_.shape.assert_has_rank(0) + frame_length = ops.convert_to_tensor(frame_length, name='frame_length') + frame_length.shape.assert_has_rank(0) + + # Use equation 7 from Griffin + Lim. + forward_window = forward_window_fn(frame_length, dtype=dtype) + denom = math_ops.square(forward_window) + overlaps = -(-frame_length // frame_step_) # Ceiling division. # pylint: disable=invalid-unary-operand-type + denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)]) + denom = array_ops.reshape(denom, [overlaps, frame_step_]) + denom = math_ops.reduce_sum(denom, 0, keepdims=True) + denom = array_ops.tile(denom, [overlaps, 1]) + denom = array_ops.reshape(denom, [overlaps * frame_step_]) + + return forward_window / denom[:frame_length] + return inverse_stft_window_fn_inner + + +@tf_export('signal.inverse_stft') +@dispatch.add_dispatch_support +def inverse_stft(stfts, + frame_length, + frame_step, + fft_length=None, + window_fn=window_ops.hann_window, + name=None): + """Computes the inverse [Short-time Fourier Transform][stft] of `stfts`. + + To reconstruct an original waveform, a complementary window function should + be used with `inverse_stft`. Such a window function can be constructed with + `tf.signal.inverse_stft_window_fn`. + Example: + + ```python + frame_length = 400 + frame_step = 160 + waveform = tf.random.normal(dtype=tf.float32, shape=[1000]) + stft = tf.signal.stft(waveform, frame_length, frame_step) + inverse_stft = tf.signal.inverse_stft( + stft, frame_length, frame_step, + window_fn=tf.signal.inverse_stft_window_fn(frame_step)) + ``` + + If a custom `window_fn` is used with `tf.signal.stft`, it must be passed to + `tf.signal.inverse_stft_window_fn`: + + ```python + frame_length = 400 + frame_step = 160 + window_fn = tf.signal.hamming_window + waveform = tf.random.normal(dtype=tf.float32, shape=[1000]) + stft = tf.signal.stft( + waveform, frame_length, frame_step, window_fn=window_fn) + inverse_stft = tf.signal.inverse_stft( + stft, frame_length, frame_step, + window_fn=tf.signal.inverse_stft_window_fn( + frame_step, forward_window_fn=window_fn)) + ``` + + Implemented with TPU/GPU-compatible ops and supports gradients. + + Args: + stfts: A `complex64`/`complex128` `[..., frames, fft_unique_bins]` + `Tensor` of STFT bins representing a batch of `fft_length`-point STFTs + where `fft_unique_bins` is `fft_length // 2 + 1` + frame_length: An integer scalar `Tensor`. The window length in samples. + frame_step: An integer scalar `Tensor`. The number of samples to step. + fft_length: An integer scalar `Tensor`. The size of the FFT that produced + `stfts`. If not provided, uses the smallest power of 2 enclosing + `frame_length`. + window_fn: A callable that takes a window length and a `dtype` keyword + argument and returns a `[window_length]` `Tensor` of samples in the + provided datatype. If set to `None`, no windowing is used. + name: An optional name for the operation. + + Returns: + A `[..., samples]` `Tensor` of `float32`/`float64` signals representing + the inverse STFT for each input STFT in `stfts`. + + Raises: + ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar, + `frame_step` is not scalar, or `fft_length` is not scalar. + + [stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform + """ + with ops.name_scope(name, 'inverse_stft', [stfts]): + stfts = ops.convert_to_tensor(stfts, name='stfts') + stfts.shape.with_rank_at_least(2) + frame_length = ops.convert_to_tensor(frame_length, name='frame_length') + frame_length.shape.assert_has_rank(0) + frame_step = ops.convert_to_tensor(frame_step, name='frame_step') + frame_step.shape.assert_has_rank(0) + if fft_length is None: + fft_length = _enclosing_power_of_two(frame_length) + else: + fft_length = ops.convert_to_tensor(fft_length, name='fft_length') + fft_length.shape.assert_has_rank(0) + + real_frames = fft_ops.irfft(stfts, [fft_length]) + + # frame_length may be larger or smaller than fft_length, so we pad or + # truncate real_frames to frame_length. + frame_length_static = tensor_util.constant_value(frame_length) + # If we don't know the shape of real_frames's inner dimension, pad and + # truncate to frame_length. + if (frame_length_static is None or real_frames.shape.ndims is None or + real_frames.shape.as_list()[-1] is None): + real_frames = real_frames[..., :frame_length] + real_frames_rank = array_ops.rank(real_frames) + real_frames_shape = array_ops.shape(real_frames) + paddings = array_ops.concat( + [array_ops.zeros([real_frames_rank - 1, 2], + dtype=frame_length.dtype), + [[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0) + real_frames = array_ops.pad(real_frames, paddings) + # We know real_frames's last dimension and frame_length statically. If they + # are different, then pad or truncate real_frames to frame_length. + elif real_frames.shape.as_list()[-1] > frame_length_static: + real_frames = real_frames[..., :frame_length_static] + elif real_frames.shape.as_list()[-1] < frame_length_static: + pad_amount = frame_length_static - real_frames.shape.as_list()[-1] + real_frames = array_ops.pad(real_frames, + [[0, 0]] * (real_frames.shape.ndims - 1) + + [[0, pad_amount]]) + + # The above code pads the inner dimension of real_frames to frame_length, + # but it does so in a way that may not be shape-inference friendly. + # Restore shape information if we are able to. + if frame_length_static is not None and real_frames.shape.ndims is not None: + real_frames.set_shape([None] * (real_frames.shape.ndims - 1) + + [frame_length_static]) + + # Optionally window and overlap-add the inner 2 dimensions of real_frames + # into a single [samples] dimension. + if window_fn is not None: + window = window_fn(frame_length, dtype=stfts.dtype.real_dtype) + real_frames *= window + return reconstruction_ops.overlap_and_add(real_frames, frame_step) + + +def _enclosing_power_of_two(value): + """Return 2**N for integer N such that 2**N >= value.""" + value_static = tensor_util.constant_value(value) + if value_static is not None: + return constant_op.constant( + int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype) + return math_ops.cast( + math_ops.pow( + 2.0, + math_ops.ceil( + math_ops.log(math_ops.cast(value, dtypes.float32)) / + math_ops.log(2.0))), value.dtype) + + +@tf_export('signal.mdct') +@dispatch.add_dispatch_support +def mdct(signals, frame_length, window_fn=window_ops.vorbis_window, + pad_end=False, norm=None, name=None): + """Computes the [Modified Discrete Cosine Transform][mdct] of `signals`. + + Implemented with TPU/GPU-compatible ops and supports gradients. + + Args: + signals: A `[..., samples]` `float32`/`float64` `Tensor` of real-valued + signals. + frame_length: An integer scalar `Tensor`. The window length in samples + which must be divisible by 4. + window_fn: A callable that takes a frame_length and a `dtype` keyword + argument and returns a `[frame_length]` `Tensor` of samples in the + provided datatype. If set to `None`, a rectangular window with a scale of + 1/sqrt(2) is used. For perfect reconstruction of a signal from `mdct` + followed by `inverse_mdct`, please use `tf.signal.vorbis_window`, + `tf.signal.kaiser_bessel_derived_window` or `None`. If using another + window function, make sure that w[n]^2 + w[n + frame_length // 2]^2 = 1 + and w[n] = w[frame_length - n - 1] for n = 0,...,frame_length // 2 - 1 to + achieve perfect reconstruction. + pad_end: Whether to pad the end of `signals` with zeros when the provided + frame length and step produces a frame that lies partially past its end. + norm: If it is None, unnormalized dct4 is used, if it is "ortho" + orthonormal dct4 is used. + name: An optional name for the operation. + + Returns: + A `[..., frames, frame_length // 2]` `Tensor` of `float32`/`float64` + MDCT values where `frames` is roughly `samples // (frame_length // 2)` + when `pad_end=False`. + + Raises: + ValueError: If `signals` is not at least rank 1, `frame_length` is + not scalar, or `frame_length` is not a multiple of `4`. + + [mdct]: https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform + """ + with ops.name_scope(name, 'mdct', [signals, frame_length]): + signals = ops.convert_to_tensor(signals, name='signals') + signals.shape.with_rank_at_least(1) + frame_length = ops.convert_to_tensor(frame_length, name='frame_length') + frame_length.shape.assert_has_rank(0) + # Assert that frame_length is divisible by 4. + frame_length_static = tensor_util.constant_value(frame_length) + if frame_length_static is not None: + if frame_length_static % 4 != 0: + raise ValueError('The frame length must be a multiple of 4.') + frame_step = ops.convert_to_tensor(frame_length_static // 2, + dtype=frame_length.dtype) + else: + frame_step = frame_length // 2 + + framed_signals = shape_ops.frame( + signals, frame_length, frame_step, pad_end=pad_end) + + # Optionally window the framed signals. + if window_fn is not None: + window = window_fn(frame_length, dtype=framed_signals.dtype) + framed_signals *= window + else: + framed_signals *= 1.0 / np.sqrt(2) + + split_frames = array_ops.split(framed_signals, 4, axis=-1) + frame_firsthalf = -array_ops.reverse(split_frames[2], + [-1]) - split_frames[3] + frame_secondhalf = split_frames[0] - array_ops.reverse(split_frames[1], + [-1]) + frames_rearranged = array_ops.concat((frame_firsthalf, frame_secondhalf), + axis=-1) + # Below call produces the (frame_length // 2) unique components of the + # type 4 orthonormal DCT of the real windowed signals in frames_rearranged. + return dct_ops.dct(frames_rearranged, type=4, norm=norm) + + +@tf_export('signal.inverse_mdct') +@dispatch.add_dispatch_support +def inverse_mdct(mdcts, + window_fn=window_ops.vorbis_window, + norm=None, + name=None): + """Computes the inverse modified DCT of `mdcts`. + + To reconstruct an original waveform, the same window function should + be used with `mdct` and `inverse_mdct`. + + Example usage: + + >>> @tf.function + ... def compare_round_trip(): + ... samples = 1000 + ... frame_length = 400 + ... halflen = frame_length // 2 + ... waveform = tf.random.normal(dtype=tf.float32, shape=[samples]) + ... waveform_pad = tf.pad(waveform, [[halflen, 0],]) + ... mdct = tf.signal.mdct(waveform_pad, frame_length, pad_end=True, + ... window_fn=tf.signal.vorbis_window) + ... inverse_mdct = tf.signal.inverse_mdct(mdct, + ... window_fn=tf.signal.vorbis_window) + ... inverse_mdct = inverse_mdct[halflen: halflen + samples] + ... return waveform, inverse_mdct + >>> waveform, inverse_mdct = compare_round_trip() + >>> np.allclose(waveform.numpy(), inverse_mdct.numpy(), rtol=1e-3, atol=1e-4) + True + + Implemented with TPU/GPU-compatible ops and supports gradients. + + Args: + mdcts: A `float32`/`float64` `[..., frames, frame_length // 2]` + `Tensor` of MDCT bins representing a batch of `frame_length // 2`-point + MDCTs. + window_fn: A callable that takes a frame_length and a `dtype` keyword + argument and returns a `[frame_length]` `Tensor` of samples in the + provided datatype. If set to `None`, a rectangular window with a scale of + 1/sqrt(2) is used. For perfect reconstruction of a signal from `mdct` + followed by `inverse_mdct`, please use `tf.signal.vorbis_window`, + `tf.signal.kaiser_bessel_derived_window` or `None`. If using another + window function, make sure that w[n]^2 + w[n + frame_length // 2]^2 = 1 + and w[n] = w[frame_length - n - 1] for n = 0,...,frame_length // 2 - 1 to + achieve perfect reconstruction. + norm: If "ortho", orthonormal inverse DCT4 is performed, if it is None, + a regular dct4 followed by scaling of `1/frame_length` is performed. + name: An optional name for the operation. + + Returns: + A `[..., samples]` `Tensor` of `float32`/`float64` signals representing + the inverse MDCT for each input MDCT in `mdcts` where `samples` is + `(frames - 1) * (frame_length // 2) + frame_length`. + + Raises: + ValueError: If `mdcts` is not at least rank 2. + + [mdct]: https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform + """ + with ops.name_scope(name, 'inverse_mdct', [mdcts]): + mdcts = ops.convert_to_tensor(mdcts, name='mdcts') + mdcts.shape.with_rank_at_least(2) + half_len = math_ops.cast(mdcts.shape[-1], dtype=dtypes.int32) + + if norm is None: + half_len_float = math_ops.cast(half_len, dtype=mdcts.dtype) + result_idct4 = (0.5 / half_len_float) * dct_ops.dct(mdcts, type=4) + elif norm == 'ortho': + result_idct4 = dct_ops.dct(mdcts, type=4, norm='ortho') + split_result = array_ops.split(result_idct4, 2, axis=-1) + real_frames = array_ops.concat((split_result[1], + -array_ops.reverse(split_result[1], [-1]), + -array_ops.reverse(split_result[0], [-1]), + -split_result[0]), axis=-1) + + # Optionally window and overlap-add the inner 2 dimensions of real_frames + # into a single [samples] dimension. + if window_fn is not None: + window = window_fn(2 * half_len, dtype=mdcts.dtype) + real_frames *= window + else: + real_frames *= 1.0 / np.sqrt(2) + return reconstruction_ops.overlap_and_add(real_frames, half_len) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/util_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/util_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f3f2ef86bb58742ace1a504696fe4f43ad143c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/util_ops.py @@ -0,0 +1,69 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility ops shared across tf.contrib.signal.""" + +import fractions # gcd is here for Python versions < 3 +import math # Get gcd here for Python versions >= 3 +import sys + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import while_loop + + +def gcd(a, b, name=None): + """Returns the greatest common divisor via Euclid's algorithm. + + Args: + a: The dividend. A scalar integer `Tensor`. + b: The divisor. A scalar integer `Tensor`. + name: An optional name for the operation. + + Returns: + A scalar `Tensor` representing the greatest common divisor between `a` and + `b`. + + Raises: + ValueError: If `a` or `b` are not scalar integers. + """ + with ops.name_scope(name, 'gcd', [a, b]): + a = ops.convert_to_tensor(a) + b = ops.convert_to_tensor(b) + + a.shape.assert_has_rank(0) + b.shape.assert_has_rank(0) + + if not a.dtype.is_integer: + raise ValueError('a must be an integer type. Got: %s' % a.dtype) + if not b.dtype.is_integer: + raise ValueError('b must be an integer type. Got: %s' % b.dtype) + + # TPU requires static shape inference. GCD is used for subframe size + # computation, so we should prefer static computation where possible. + const_a = tensor_util.constant_value(a) + const_b = tensor_util.constant_value(b) + if const_a is not None and const_b is not None: + if sys.version_info.major < 3: + math_gcd = fractions.gcd + else: + math_gcd = math.gcd + return ops.convert_to_tensor(math_gcd(const_a, const_b)) + + cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b)) + body = lambda a, b: [b, math_ops.mod(a, b)] + a, b = while_loop.while_loop(cond, body, [a, b], back_prop=False) + return a diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/window_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/window_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e0c62d0ebef43b9d20b0c6fcca7136c96e346d77 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/signal/window_ops.py @@ -0,0 +1,245 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ops for computing common window functions.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +def _check_params(window_length, dtype): + """Check window_length and dtype params. + + Args: + window_length: A scalar value or `Tensor`. + dtype: The data type to produce. Must be a floating point type. + + Returns: + window_length converted to a tensor of type int32. + + Raises: + ValueError: If `dtype` is not a floating point type or window_length is not + a scalar. + """ + if not dtype.is_floating: + raise ValueError('dtype must be a floating point type. Found %s' % dtype) + window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32) + window_length.shape.assert_has_rank(0) + return window_length + + +@tf_export('signal.kaiser_window') +@dispatch.add_dispatch_support +def kaiser_window(window_length, beta=12., dtype=dtypes.float32, name=None): + """Generate a [Kaiser window][kaiser]. + + Args: + window_length: A scalar `Tensor` indicating the window length to generate. + beta: Beta parameter for Kaiser window, see reference below. + dtype: The data type to produce. Must be a floating point type. + name: An optional name for the operation. + + Returns: + A `Tensor` of shape `[window_length]` of type `dtype`. + + [kaiser]: + https://docs.scipy.org/doc/numpy/reference/generated/numpy.kaiser.html + """ + with ops.name_scope(name, 'kaiser_window'): + window_length = _check_params(window_length, dtype) + window_length_const = tensor_util.constant_value(window_length) + if window_length_const == 1: + return array_ops.ones([1], dtype=dtype) + # tf.range does not support float16 so we work with float32 initially. + halflen_float = ( + math_ops.cast(window_length, dtype=dtypes.float32) - 1.0) / 2.0 + arg = math_ops.range(-halflen_float, halflen_float + 0.1, + dtype=dtypes.float32) + # Convert everything into given dtype which can be float16. + arg = math_ops.cast(arg, dtype=dtype) + beta = math_ops.cast(beta, dtype=dtype) + one = math_ops.cast(1.0, dtype=dtype) + halflen_float = math_ops.cast(halflen_float, dtype=dtype) + num = beta * math_ops.sqrt(nn_ops.relu( + one - math_ops.square(arg / halflen_float))) + window = math_ops.exp(num - beta) * ( + special_math_ops.bessel_i0e(num) / special_math_ops.bessel_i0e(beta)) + return window + + +@tf_export('signal.kaiser_bessel_derived_window') +@dispatch.add_dispatch_support +def kaiser_bessel_derived_window(window_length, beta=12., + dtype=dtypes.float32, name=None): + """Generate a [Kaiser Bessel derived window][kbd]. + + Args: + window_length: A scalar `Tensor` indicating the window length to generate. + beta: Beta parameter for Kaiser window. + dtype: The data type to produce. Must be a floating point type. + name: An optional name for the operation. + + Returns: + A `Tensor` of shape `[window_length]` of type `dtype`. + + [kbd]: + https://en.wikipedia.org/wiki/Kaiser_window#Kaiser%E2%80%93Bessel-derived_(KBD)_window + """ + with ops.name_scope(name, 'kaiser_bessel_derived_window'): + window_length = _check_params(window_length, dtype) + halflen = window_length // 2 + kaiserw = kaiser_window(halflen + 1, beta, dtype=dtype) + kaiserw_csum = math_ops.cumsum(kaiserw) + halfw = math_ops.sqrt(kaiserw_csum[:-1] / kaiserw_csum[-1]) + window = array_ops.concat((halfw, halfw[::-1]), axis=0) + return window + + +@tf_export('signal.vorbis_window') +@dispatch.add_dispatch_support +def vorbis_window(window_length, dtype=dtypes.float32, name=None): + """Generate a [Vorbis power complementary window][vorbis]. + + Args: + window_length: A scalar `Tensor` indicating the window length to generate. + dtype: The data type to produce. Must be a floating point type. + name: An optional name for the operation. + + Returns: + A `Tensor` of shape `[window_length]` of type `dtype`. + + [vorbis]: + https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform#Window_functions + """ + with ops.name_scope(name, 'vorbis_window'): + window_length = _check_params(window_length, dtype) + arg = math_ops.cast(math_ops.range(window_length), dtype=dtype) + window = math_ops.sin(np.pi / 2.0 * math_ops.pow(math_ops.sin( + np.pi / math_ops.cast(window_length, dtype=dtype) * + (arg + 0.5)), 2.0)) + return window + + +@tf_export('signal.hann_window') +@dispatch.add_dispatch_support +def hann_window(window_length, periodic=True, dtype=dtypes.float32, name=None): + """Generate a [Hann window][hann]. + + Args: + window_length: A scalar `Tensor` indicating the window length to generate. + periodic: A bool `Tensor` indicating whether to generate a periodic or + symmetric window. Periodic windows are typically used for spectral + analysis while symmetric windows are typically used for digital + filter design. + dtype: The data type to produce. Must be a floating point type. + name: An optional name for the operation. + + Returns: + A `Tensor` of shape `[window_length]` of type `dtype`. + + Raises: + ValueError: If `dtype` is not a floating point type. + + [hann]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows + """ + return _raised_cosine_window(name, 'hann_window', window_length, periodic, + dtype, 0.5, 0.5) + + +@tf_export('signal.hamming_window') +@dispatch.add_dispatch_support +def hamming_window(window_length, periodic=True, dtype=dtypes.float32, + name=None): + """Generate a [Hamming][hamming] window. + + Args: + window_length: A scalar `Tensor` indicating the window length to generate. + periodic: A bool `Tensor` indicating whether to generate a periodic or + symmetric window. Periodic windows are typically used for spectral + analysis while symmetric windows are typically used for digital + filter design. + dtype: The data type to produce. Must be a floating point type. + name: An optional name for the operation. + + Returns: + A `Tensor` of shape `[window_length]` of type `dtype`. + + Raises: + ValueError: If `dtype` is not a floating point type. + + [hamming]: + https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows + """ + return _raised_cosine_window(name, 'hamming_window', window_length, periodic, + dtype, 0.54, 0.46) + + +def _raised_cosine_window(name, default_name, window_length, periodic, + dtype, a, b): + """Helper function for computing a raised cosine window. + + Args: + name: Name to use for the scope. + default_name: Default name to use for the scope. + window_length: A scalar `Tensor` or integer indicating the window length. + periodic: A bool `Tensor` indicating whether to generate a periodic or + symmetric window. + dtype: A floating point `DType`. + a: The alpha parameter to the raised cosine window. + b: The beta parameter to the raised cosine window. + + Returns: + A `Tensor` of shape `[window_length]` of type `dtype`. + + Raises: + ValueError: If `dtype` is not a floating point type or `window_length` is + not scalar or `periodic` is not scalar. + """ + if not dtype.is_floating: + raise ValueError('dtype must be a floating point type. Found %s' % dtype) + + with ops.name_scope(name, default_name, [window_length, periodic]): + window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32, + name='window_length') + window_length.shape.assert_has_rank(0) + window_length_const = tensor_util.constant_value(window_length) + if window_length_const == 1: + return array_ops.ones([1], dtype=dtype) + periodic = math_ops.cast( + ops.convert_to_tensor(periodic, dtype=dtypes.bool, name='periodic'), + dtypes.int32) + periodic.shape.assert_has_rank(0) + even = 1 - math_ops.mod(window_length, 2) + + n = math_ops.cast(window_length + periodic * even - 1, dtype=dtype) + count = math_ops.cast(math_ops.range(window_length), dtype) + cos_arg = constant_op.constant(2 * np.pi, dtype=dtype) * count / n + + if window_length_const is not None: + return math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype) + return cond.cond( + math_ops.equal(window_length, 1), + lambda: array_ops.ones([window_length], dtype=dtype), + lambda: math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e658f6aadbea16102cc98c444f39a4cbec9d63f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_array_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c9f500af5fc1e934a7d779d2f5604b0b0d66a66 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_array_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc9d7144a397c6b311f4cbfe3fa7e62b94820441 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63a48550e5f0453c71418c0a942f98acd4ab5dce Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor_dynamic.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor_dynamic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02da0c871fc9efe9ea3f20b9e6b3fed98c03ff37 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/__pycache__/structured_tensor_dynamic.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_array_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9805418517399b622025884b9660ccd4f3415a87 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_array_ops.py @@ -0,0 +1,624 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""StructuredTensor array ops.""" + +from typing import Sequence + +from tensorflow.core.config import flags +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.ragged import dynamic_ragged_shape +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.ops.structured.structured_tensor import StructuredTensor +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch + + +@dispatch.dispatch_for_api(array_ops.shape_v2) +def shape_v2(input: StructuredTensor, out_type=dtypes.int32, # pylint: disable=redefined-builtin + name=None) -> dynamic_ragged_shape.DynamicRaggedShape: + """Returns a DynamicRaggedShape containing the shape of the input.""" + del name + return input._ragged_shape.with_dtype(out_type) # pylint: disable=protected-access + + +@dispatch.dispatch_for_api(array_ops.shape) +def shape_v1(input: StructuredTensor, name=None, # pylint: disable=redefined-builtin + out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape: + """Returns a DynamicRaggedShape containing the shape of the input.""" + del name + return input._ragged_shape.with_dtype(out_type) # pylint: disable=protected-access + + +@dispatch.dispatch_for_types(array_ops.expand_dims, StructuredTensor) +@deprecation.deprecated_args(None, 'Use the `axis` argument instead', 'dim') +def expand_dims(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin + """Creates a StructuredTensor with a length 1 axis inserted at index `axis`. + + This is an implementation of tf.expand_dims for StructuredTensor. Note + that the `axis` must be less than or equal to rank. + + >>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]]) + >>> tf.expand_dims(st, 0).to_pyval() + [[[{'x': 1}, {'x': 2}], [{'x': 3}]]] + >>> tf.expand_dims(st, 1).to_pyval() + [[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]] + >>> tf.expand_dims(st, 2).to_pyval() + [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]] + >>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2 + [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]] + + Args: + input: the original StructuredTensor. + axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank` + name: the name of the op. + dim: deprecated: use axis. + + Returns: + a new structured tensor with larger rank. + + Raises: + an error if `axis < -(rank + 1)` or `rank < axis`. + """ + axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim) + return _expand_dims_impl(input, axis, name=name) + + +@dispatch.dispatch_for_types(array_ops.expand_dims_v2, StructuredTensor) +def expand_dims_v2(input, axis, name=None): # pylint: disable=redefined-builtin + """Creates a StructuredTensor with a length 1 axis inserted at index `axis`. + + This is an implementation of tf.expand_dims for StructuredTensor. Note + that the `axis` must be less than or equal to rank. + + >>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]]) + >>> tf.expand_dims(st, 0).to_pyval() + [[[{'x': 1}, {'x': 2}], [{'x': 3}]]] + >>> tf.expand_dims(st, 1).to_pyval() + [[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]] + >>> tf.expand_dims(st, 2).to_pyval() + [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]] + >>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2 + [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]] + + Args: + input: the original StructuredTensor. + axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank` + name: the name of the op. + + Returns: + a new structured tensor with larger rank. + + Raises: + an error if `axis < -(rank + 1)` or `rank < axis`. + """ + return _expand_dims_impl(input, axis, name=name) + + +@dispatch.dispatch_for_types(array_ops.gather, StructuredTensor) +def gather(params, + indices, + validate_indices=None, + name=None, + axis=None, + batch_dims=0): + """tf.gather for structured tensors. + + Does not support (yet) checks on illegal axis values, et cetera. + + Indices must be a ragged or dense tensor. + Args: + params: a structured tensor to be gathered + indices: a ragged tensor or tensor to gather by. + validate_indices: whether to validate the indices + name: the name of the op(s). + axis: the axis in params to gather on. + batch_dims: the number of batch dimensions. + + Returns: + the params reorganized according to indices. + """ + if name is None: + name = 'gather' + with ops.name_scope(name): + if axis is None: + axis = batch_dims + axis = array_ops.get_positive_axis(axis, params.shape.rank, + ndims_name='params.shape.rank') + indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( + indices, name='indices') + + def leaf_op(p): + return array_ops.gather( + p, + indices, + validate_indices=validate_indices, + axis=axis, + batch_dims=batch_dims, + name=None) + + return _extend_op_single(params, leaf_op) + + +@dispatch.dispatch_for_types(array_ops.concat, StructuredTensor) +def concat(values, axis, name: str = 'concat'): + """tf.concat for structured tensors. + + Does not support (yet) checks on illegal axis values, et cetera. + + Args: + values: a sequence of StructuredTensors. + axis: an axis to concatenate upon. + name: the name of the op(s). + + Returns: + the params reorganized according to indices. + """ + if name is None: + name = 'concat' + _assert_concat_compatible_structured_tensors(values) + def leaf_op(values): + return array_ops.concat(values, axis) + # TODO(martinz): handle axis when it is a tensor. + axis = array_ops.get_positive_axis(axis, values[0].rank) + with ops.name_scope(name, 'StructuredConcat', values): + return _extend_op(values, leaf_op) + + +@dispatch.dispatch_for_types(random_ops.random_shuffle, StructuredTensor) +def random_shuffle(value, seed=None, name=None): + """Shuffle a structured tensor on the zeroth axis. + + Args: + value: a structured tensor of rank at least one. + seed: the seed for shuffling. + name: the name for shuffle. + + Returns: + The shuffled structured tensor. + """ + with ops.name_scope(name, 'shuffle', [value, seed]): + if value.rank == 0: + raise ValueError('Cannot shuffle a scalar StructuredTensor') + first_dimension = value.nrows() + index = random_ops.random_shuffle(math_ops.range(first_dimension), + seed=seed) + return gather(value, index, axis=0) + + +@dispatch.dispatch_for_types(array_ops.size_v2, StructuredTensor) +def size_v2(input, out_type=None, name=None): + # pylint: disable=redefined-builtin + """Returns the size of a tensor.""" + if out_type is None: + if flags.config().tf_shape_default_int64.value(): + out_type = dtypes.int64 + else: + out_type = dtypes.int32 + return size(input, name=name, out_type=out_type) + + +# pylint: disable=protected-access +@dispatch.dispatch_for_types(array_ops.size, StructuredTensor) +def size(input, name=None, out_type=None): + # pylint: disable=redefined-builtin + """Returns the size of a tensor.""" + if out_type is None: + if flags.config().tf_shape_default_int64.value(): + out_type = dtypes.int64 + else: + out_type = dtypes.int32 + with ops.name_scope(name, 'size', [input]) as name: + if not input.row_partitions: + if input.nrows() is not None: + return math_ops.cast(input.nrows(), out_type) # vector. + else: + return math_ops.cast(1, out_type) # scalar. + # 2D and up. + nvals = input.row_partitions[-1].nvals() + if nvals is None or out_type is None: + return nvals + return math_ops.cast(nvals, dtype=out_type) + + +# pylint: disable=protected-access +@dispatch.dispatch_for_types(array_ops.zeros_like, StructuredTensor) +def zeros_like(tensor, dtype=None, name=None, optimize=True): + """Implementation of zeros_like for StructuredTensor for TF v1.""" + del optimize + return zeros_like_v2(tensor, dtype=dtype, name=name) + + +# pylint: disable=protected-access +@dispatch.dispatch_for_types(array_ops.zeros_like_v2, StructuredTensor) +def zeros_like_v2(input, dtype=None, name=None, layout=None): # pylint: disable=redefined-builtin + """Replace every object with a zero. + + Example: + >>> st = StructuredTensor.from_pyval([{"x":[3]}, {"x":[4,5]}]) + >>> tf.zeros_like(st) + + >>> st = StructuredTensor.from_pyval([[{"x":[3]}], [{"x":[4,5]}, {"x":[]}]]) + >>> tf.zeros_like(st, dtype=tf.int32) + + + Args: + input: a structured tensor. + dtype: the dtype of the resulting zeros. (default is tf.float32) + name: a name for the op. + layout: Optional Layout. Only supports replicated layout. + + Returns: + a tensor of zeros of the same shape. + """ + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'StructuredTensor only allows replicated layout. got {layout}' + ) + if dtype is None: + dtype = dtypes.float32 + with ops.name_scope(name, 'zeros_like', [input]) as name: + if not input.row_partitions: + if input.nrows() is not None: + return array_ops.zeros([input.nrows()], dtype, layout=layout) # vector. + else: + return array_ops.zeros([], dtype, layout=layout) # scalar. + # 2D and up. + last_row_partition = input.row_partitions[-1] + + result = ragged_tensor.RaggedTensor._from_nested_row_partitions( + array_ops.zeros(last_row_partition.nvals(), dtype=dtype), + input.row_partitions) + return result + + +# pylint: disable=protected-access +@dispatch.dispatch_for_types(array_ops.ones_like, StructuredTensor) +def ones_like(tensor, dtype=None, name=None, optimize=True): + """Implementation of zeros_like for StructuredTensor for TF v1.""" + del optimize + return ones_like_v2(tensor, dtype=dtype, name=name) + + +# pylint: disable=protected-access +@dispatch.dispatch_for_types(array_ops.ones_like_v2, StructuredTensor) +def ones_like_v2(input, dtype=None, name=None, layout=None): # pylint: disable=redefined-builtin + """Replace every object with a zero. + + Example: + >>> st = StructuredTensor.from_pyval([{"x":[3]}, {"x":[4,5]}]) + >>> tf.ones_like(st) + + >>> st = StructuredTensor.from_pyval([[{"x":[3]}], [{"x":[4,5]}, {"x":[]}]]) + >>> tf.ones_like(st, dtype=tf.int32) + + + Args: + input: a structured tensor. + dtype: the dtype of the resulting zeros. (default is tf.float32) + name: a name for the op. + layout: Optional Layout. Only supports replicated layout. + + Returns: + a tensor of zeros of the same shape. + """ + if layout is not None and not layout.is_fully_replicated(): + raise ValueError( + f'StructuredTensor only allows replicated layout. got {layout}' + ) + + if dtype is None: + dtype = dtypes.float32 + with ops.name_scope(name, 'ones_like', [input]) as name: + if not input.row_partitions: + if input.nrows() is not None: + return array_ops.ones([input.nrows()], dtype, layout=layout) # vector. + else: + return array_ops.ones([], dtype, layout=layout) # scalar. + # 2D and up. + last_row_partition = input.row_partitions[-1] + + result = ragged_tensor.RaggedTensor._from_nested_row_partitions( + array_ops.ones(last_row_partition.nvals(), dtype=dtype), + input.row_partitions) + return result + + +@dispatch.dispatch_for_types(array_ops.rank, StructuredTensor) +def rank(input, name=None): + # pylint: disable=redefined-builtin + """Returns the rank of a tensor.""" + with ops.name_scope(name, 'rank', [input]) as name: + return constant_op.constant(input.rank, dtype=dtypes.int32) + + +def _expand_dims_impl(st, axis, name=None): # pylint: disable=redefined-builtin + """Creates a StructuredTensor with a length 1 axis inserted at index `axis`. + + This is an implementation of tf.expand_dims for StructuredTensor. Note + that the `axis` must be less than or equal to rank. + + >>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]]) + >>> tf.expand_dims(st, 0).to_pyval() + [[[{'x': 1}, {'x': 2}], [{'x': 3}]]] + >>> tf.expand_dims(st, 1).to_pyval() + [[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]] + >>> tf.expand_dims(st, 2).to_pyval() + [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]] + >>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2 + [[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]] + + Args: + st: the original StructuredTensor. + axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank` + name: the name of the op. + + Returns: + a new structured tensor with larger rank. + + Raises: + an error if `axis < -(rank + 1)` or `rank < axis`. + """ + axis = array_ops.get_positive_axis( + axis, st.rank + 1, axis_name='axis', ndims_name='rank(st)') + with ops.name_scope(name, 'ExpandDims', [st, axis]): + new_fields = { + k: array_ops.expand_dims(v, axis) for (k, v) in st._fields.items() + } + new_shape = st.shape[:axis] + (1,) + st.shape[axis:] + new_row_partitions = _expand_st_row_partitions(st, axis) + new_nrows = st.nrows() if (axis > 0) else 1 + return StructuredTensor.from_fields( + new_fields, + shape=new_shape, + row_partitions=new_row_partitions, + nrows=new_nrows) + + +def _expand_st_row_partitions(st, axis): + """Create the row_partitions for expand_dims.""" + if axis == 0: + if st.shape.rank == 0: + return () + nvals = st.nrows() + new_partition = RowPartition.from_uniform_row_length( + nvals, nvals, nrows=1, validate=False) + return (new_partition,) + st.row_partitions + elif axis == st.rank: + nvals = ( + st.row_partitions[axis - 2].nvals() if (axis - 2 >= 0) else st.nrows()) + return st.row_partitions + (RowPartition.from_uniform_row_length( + 1, nvals, nrows=nvals, validate=False),) + else: + nvals = ( + st.row_partitions[axis - 1].nrows() if (axis - 1 >= 0) else st.nrows()) + return st.row_partitions[:axis - 1] + (RowPartition.from_uniform_row_length( + 1, nvals, nrows=nvals, validate=False),) + st.row_partitions[axis - 1:] + + +# TODO(martinz): consider allowing values to be nested. +def _extend_op(values, leaf_op, empty_st_op=None): + """Extend an op from RaggedTensor and Tensor to StructuredTensor. + + Visits all children of the structured tensor, and children of children, + applying leaf_op whenever it reaches a leaf, and empty_st_op whenever + it reaches an internal node without children. + + Args: + values: a list of structured tensors, ragged tensors, or tensors. All must + have the same type. If they are structured tensors, they must have the + same paths. + leaf_op: an op for handling non-structured tensor. + empty_st_op: op to create a structured tensor without fields. + + Returns: + the result of the extended op (a StructuredTensor, RaggedTensor, or Tensor) + + Raises: + ValueError: + If values is not a Sequence or is empty. + """ + if not isinstance(values, Sequence): + raise ValueError('Expected a list') + + if not values: + raise ValueError('List cannot be empty') + + if empty_st_op is None: + empty_st_op = empty_st_op_like_zeros(leaf_op) + # Use the structure of the first StructuredTensor. They are all assumed to + # be the same. + value = values[0] + + if isinstance(value, StructuredTensor): + # TODO(martinz): Calling empty_st_op may add unnecessary ops. Revisit later. + empty_result = empty_st_op(values) + if not value.field_names(): + return empty_result + new_fields = {} + for k in value.field_names(): + new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op, + empty_st_op) + return StructuredTensor.from_fields(new_fields, shape=empty_result.shape) + else: + return leaf_op(values) + + +def _extend_op_single(value, leaf_op, empty_st_op=None): + """Extend an op to a value instead of a list of values.""" + + def to_list_op(element_op): + if element_op is None: + return None + + def list_op(values): + [value] = values + return element_op(value) + + return list_op + + return _extend_op([value], to_list_op(leaf_op), to_list_op(empty_st_op)) + + +def empty_st_op_like_zeros(leaf_op): + + def empty_st_op(values): + as_zeros = [ + zeros_like_v2(value, dtype=dtypes.int32) for value in values + ] + result = leaf_op(as_zeros) + return _structured_tensor_like(result) + + return empty_st_op + + +def _structured_tensor_from_dense_tensor(t): + """Create a structured tensor with the shape of a dense tensor.""" + # Note: If a tensor will have rank 0, + # it either has a fully defined shape or has unknown rank. + if t.shape.is_fully_defined(): + return StructuredTensor.from_fields({}, shape=t.shape) + elif t.shape.rank is None: + raise ValueError("Can't build StructuredTensor w/ unknown rank") + elif t.shape.rank == 1: + return StructuredTensor.from_fields({}, shape=t.shape, + nrows=array_ops.shape(t)[0]) + else: + rt = ragged_tensor.RaggedTensor.from_tensor(t) + return _structured_tensor_from_row_partitions(t.shape, + rt._nested_row_partitions) + + +def _structured_tensor_from_row_partitions(shape, row_partitions): + return StructuredTensor.from_fields({}, + shape=shape, + row_partitions=row_partitions) + + +# pylint: disable=protected_access +def _all_nested_row_partitions(rt): + """Returns all nested row partitions in rt, including for dense dimensions.""" + if isinstance(rt, tensor_lib.Tensor): + if rt.shape.rank <= 1: + return () + else: + rt2 = ragged_tensor.RaggedTensor.from_tensor(rt) + return rt2._nested_row_partitions + else: + tail_partitions = _all_nested_row_partitions(rt.flat_values) + head_partitions = rt._nested_row_partitions # pylint: disable=protected_access + return head_partitions + tail_partitions + + +def _structured_tensor_like(t): + """Create a StructuredTensor with the shape of a (composite) tensor.""" + if isinstance(t, tensor_lib.Tensor): + return _structured_tensor_from_dense_tensor(t) + if ragged_tensor.is_ragged(t): + return StructuredTensor.from_fields( + {}, shape=t.get_shape(), row_partitions=_all_nested_row_partitions(t)) + # here, it is a StructuredTensor + return StructuredTensor.from_fields({}, + shape=t.shape, + row_partitions=t.row_partitions, + nrows=t.nrows()) + + +def _get_all_paths(st): + """Get all the paths from a StructuredTensor.""" + fields = st.field_names() + all_paths = {()} + for k in fields: + v = st.field_value(k) + if isinstance(v, StructuredTensor): + all_paths = all_paths.union([(k,) + p for p in _get_all_paths(v)]) + else: + all_paths.add((k,)) + return all_paths + + +def _get_all_ranks(st): + """Get ranks of all submessages of a StructuredTensor.""" + fields = st.field_names() + all_ranks = {(): st.rank} + for k in fields: + v = st.field_value(k) + if isinstance(v, StructuredTensor): + for (k2, v2) in _get_all_ranks(v).items(): + all_ranks[(k,) + k2] = v2 + return all_ranks + + +def _assert_all_paths_match(values): + """Raises an error if the paths are not identical.""" + paths = [_get_all_paths(st) for st in values] + path_diff = set() + for other_paths in paths[1:]: + path_diff = path_diff.union(paths[0].symmetric_difference(other_paths)) + if path_diff: + raise ValueError( + 'Some paths are present in some, but not all, structured tensors: %r' % + (path_diff,)) + + +def _assert_all_ranks_match(values): + """Raises an error if the ranks of submessages are not identical.""" + ranks = [_get_all_ranks(st) for st in values] + for other_ranks in ranks[1:]: + if other_ranks != ranks[0]: + # TODO(martinz): If this becomes common, we can provide more detail. + # e.g.: which path is inconsistent. + raise ValueError('Ranks of sub-message do not match') + + +def _assert_concat_compatible_structured_tensors(values): + """Sometimes raises an error if concat doesn't make sense statically on values. + + values must be a sequence, and each element in values must be a structured + tensor, and must have the same paths. Additionally, each path that is a + submessage must have the same rank. + + These constraints are sufficient for concat on the fields to be the same + as concat on structured tensors. This is meant to capture scenarios like + paths that are not in the first structured tensor, but are in later + structured tensors, which will just be ignored by the recursive algorithm. + + If the rank of a submessage was different for two structured tensors, + then that is also a non-sensical merge. + + Note that all of these checks are static, as paths and submessage ranks + are known. + + Args: + values: a Sequence of StructuredTensors. + + Raises: + ValueError: if there is any inconsistency as described above. + """ + if not isinstance(values, Sequence): + raise ValueError('values must be a list of StructuredTensors (not a list)') + if not values: + raise ValueError('values must not be an empty list') + for st in values: + if not isinstance(st, StructuredTensor): + raise ValueError('values must be a list of StructuredTensors') + _assert_all_paths_match(values) + _assert_all_ranks_match(values) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0f9f52a29d710d04630f16e90275197219dd44 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_ops.py @@ -0,0 +1,24 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Import all modules in the `structured` package that define exported symbols. + +We don't import these modules from structured/__init__.py, since we want to +avoid circular dependencies. +""" + + +# pylint: disable=unused-import +from tensorflow.python.ops.structured import structured_array_ops +from tensorflow.python.ops.structured import structured_tensor diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_tensor.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..752d29895fe1a32bb841363632d9b581bbf4ccf5 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_tensor.py @@ -0,0 +1,1782 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Structured Tensors.""" + +import re +from typing import Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import extension_type +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import type_spec +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.ragged import dynamic_ragged_shape +from tensorflow.python.ops.ragged import ragged_factory_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.ops.ragged.row_partition import RowPartition +from tensorflow.python.util import compat +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + +# Each field may contain one of the following types of Tensors. +_FieldValue = Union[ + tensor.Tensor, + ragged_tensor.RaggedTensor, + 'StructuredTensor', + extension_type.ExtensionType +] +# Function that takes a FieldValue as input and returns the transformed +# FieldValue. +_FieldFn = Callable[[_FieldValue], _FieldValue] + + +@tf_export('experimental.StructuredTensor') +class StructuredTensor(extension_type.BatchableExtensionType): + """A multidimensional collection of structures with the same schema. + + A **`StructuredTensor`** is a multi-dimensional collection of ***structures*** + with the same ***schema***, where: + + * A ***schema*** is a collection of fields, each of which has a name and type. + * A ***structure*** maps each field in the schema to a tensor value (which + could be a nested StructuredTensor). + + As an important special case, a 1D `StructuredTensor` encodes a 2D table, + where columns are heterogeneous `Tensor`s, and rows are the aligned elements + in each of those `Tensor`s. + + Internally, StructuredTensors use a "field-major" encoding: for each leaf + field, there is a single tensor that stores the value of that field for all + structures in the `StructuredTensor`. + + ### Examples + + >>> # A scalar StructuredTensor describing a single person. + >>> s1 = tf.experimental.StructuredTensor.from_pyval( + ... {"age": 82, "nicknames": ["Bob", "Bobby"]}) + >>> s1.shape + TensorShape([]) + >>> s1["age"] + + + >>> # A vector StructuredTensor describing three people. + >>> s2 = tf.experimental.StructuredTensor.from_pyval([ + ... {"age": 12, "nicknames": ["Josaphine"]}, + ... {"age": 82, "nicknames": ["Bob", "Bobby"]}, + ... {"age": 42, "nicknames": ["Elmo"]}]) + >>> s2.shape + TensorShape([3]) + >>> s2[0]["age"] + + + + ### Field Paths + + A *field path* is a tuple of field names, specifying the path to a nested + field. + """ + _fields: Mapping[str, _FieldValue] + _ragged_shape: dynamic_ragged_shape.DynamicRaggedShape + + __name__ = 'tf.StructuredTensor' + #============================================================================= + # Common Types + #============================================================================= + # pylint: disable=invalid-name + # Field names work as key, and they can be a sequence to refer to the + # sub-levels (embedded) StructuredTensor's. + FieldName = Union[str, Sequence[str]] + + # pylint: enable=invalid-name + + #============================================================================= + # Constructor & Factory Methods + #============================================================================= + def __init__(self, fields: Mapping[str, _FieldValue], + ragged_shape: dynamic_ragged_shape.DynamicRaggedShape): + self._fields = fields + self._ragged_shape = ragged_shape + + @classmethod + def _old_init(cls, fields, shape, nrows, row_partitions, internal=False): + """Private constructor -- use factory methods to create StructuredTensors. + + This constructor builds a `StructuredTensor` from the given attributes, + performing minimal validation. + + Args: + fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or + `StructuredTensor`. (This dict is not copied, so the caller must ensure + that it does not get mutated via leaked references.) + shape: `tf.TensorShape` with statically known rank. + nrows: scalar integer `tf.Tensor`, or `None` if `shape.rank==0`. + row_partitions: tuple of `RowPartition`s, with length `shape.rank-1`. + internal: ignored argument. + + Returns: + a StructuredTensor. + """ + assert isinstance(fields, dict), fields + assert isinstance(shape, tensor_shape.TensorShape), shape + assert nrows is None or isinstance(nrows, tensor.Tensor), nrows + assert row_partitions is None or isinstance(row_partitions, + tuple), row_partitions + return StructuredTensor( + fields=fields, + ragged_shape=_dynamic_ragged_shape_init(fields, shape, nrows, + row_partitions)) + + @classmethod + def from_shape( + cls, ragged_shape: dynamic_ragged_shape.DynamicRaggedShape + ) -> 'StructuredTensor': + """Creates a `StructuredTensor` with no fields and ragged_shape. + + Args: + ragged_shape: the shape of the structured tensor. + + Returns: + a StructuredTensor with no fields and ragged_shape. + """ + return StructuredTensor(fields={}, ragged_shape=ragged_shape) + + @classmethod + def from_fields(cls, + fields, + shape=(), + nrows=None, + row_partitions=None, + validate=False): + """Creates a `StructuredTensor` from a dictionary of fields. + + Args: + fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or + `StructuredTensor`, providing the values for individual fields in each + structure. If `shape.rank > 0`, then every tensor in `fields` must have + the same shape in the first `shape.rank` dimensions; and that shape must + be compatible with `shape`; and `result[i1...iN][key] = + fields[key][i1...iN]` (where `N==shape.rank`). + shape: A `TensorShape`: static information about the shape of the + `StructuredTensor`. Must have a known `rank`. Defaults to scalar shape + (i.e. `rank=0`). + nrows: scalar integer tensor containing the number of rows in this + `StructuredTensor`. Should only be specified if `shape.rank > 0`. + Default value is inferred from the `fields` values. If `fields` is + empty, then this must be specified. + row_partitions: A list of `RowPartition`s describing the (possibly ragged) + shape of this `StructuredTensor`. Should only be specified if + `shape.rank > 1`. Default value is inferred from the `fields` values. + If `fields` is empty, then this must be specified. + validate: If true, then add runtime validation ops that check that the + field values all have compatible shapes in the outer `shape.rank` + dimensions. + + Returns: + A `StructuredTensor`. + + Examples: + + >>> tf.experimental.StructuredTensor.from_fields({'x': 1, 'y': [1, 2, 3]}) + + + >>> tf.experimental.StructuredTensor.from_fields( + ... {'foo': [1, 2], 'bar': [3, 4]}, shape=[2]) + + """ + shape = tensor_shape.as_shape(shape) + rank = shape.rank + if rank is None: + raise ValueError("StructuredTensor's shape must have known rank.") + if not isinstance(fields, dict): + raise TypeError('fields must be a dictionary, got %s' % + type(fields).__name__) + if rank < 2 and row_partitions: + raise ValueError('row_partitions must be None or [] if shape.rank<2') + if rank == 0 and nrows is not None: + raise ValueError('nrows must be None if shape.rank==0') + if row_partitions is not None: + row_partitions = tuple(row_partitions) + if len(row_partitions) != max(0, rank - 1): + raise ValueError('len(row_partitions) must be shape.rank-1') + elif rank < 2: + row_partitions = () + + fields = dict(fields) # Make a private copy. + with ops.name_scope(None, 'StructuredTensor', fields.values()): + # TODO(martinz): Make this have better errors. + shape = _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions) + + # TODO(martinz): This may not need to be done if all fields are dense. + if shape.rank > 1: + shape = shape._with_num_row_partitions(shape.rank - 1) + + # Validate keys and convert field values to tensors. + for key, value in fields.items(): + if not isinstance(key, str): + raise TypeError(f'Unexpected type for key in `fields`: {key}') + if not _FIELD_NAME_RE.match(key): + raise ValueError('Field name %r is not currently allowed.' % key) + fields[key] = _convert_to_structured_field_value(value) + + fields = dict([(k, _replace_row_partitions(v, row_partitions)) + for (k, v) in fields.items()]) + return cls(fields=fields, ragged_shape=shape) + + @classmethod + def from_fields_and_rank( + cls, + fields: Mapping[str, _FieldValue], + rank: int, + validate: bool = False, + dtype: Optional[dtypes.DType] = None) -> 'StructuredTensor': + """Creates a `StructuredTensor` from a nonempty dictionary of fields. + + Note that if the shape dtype is not specified, the shape dtype will be + inferred from any fields that have a shape dtype. If fields differ, then + int64 will be preferred to int32, because coercing from int32 to int64 is + safer than coercing from int64 to int32. + + If there are no ragged fields, then it will be int64 by default, but this + will be changed to int32 in the future. + + Args: + fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or + `StructuredTensor`, providing the values for individual fields in each + structure. If `rank > 0`, then every tensor in `fields` must have the + same shape in the first `rank` dimensions. Cannot be empty. + rank: The rank of the resulting structured tensor. + validate: If true, then add runtime validation ops that check that the + field values all have compatible shapes in the outer `rank` dimensions. + dtype: If specified, then forces dtype of the shape to be this. + + Returns: + A `StructuredTensor`. + Examples: + >>> tf.experimental.StructuredTensor.from_fields_and_rank( + ... {'x': 1, 'y': [1, 2, 3]}, 0) + + >>> StructuredTensor.from_fields_and_rank({'foo': [1, 2], 'bar': [3, 4]}, + ... 1) + + """ + if not fields: + raise ValueError('Must provide at least one field') + if not isinstance(rank, int): + raise ValueError('rank must be an integer') + if rank < 0: + raise ValueError('rank must be nonnegative') + fields = { + k: _convert_to_structured_field_value(v) for (k, v) in fields.items() + } + if dtype is None: + dtype = _find_shape_dtype(fields, None, None) + fields = _fields_with_dtype(fields, dtype) + + shape = _shape_from_fields(fields, rank, dtype) + if rank > 1: + shape = shape._with_num_row_partitions(rank - 1) + new_rp = shape._row_partitions # pylint: disable=protected-access + fields = { + k: _replace_row_partitions(v, new_rp) for (k, v) in fields.items() + } + return StructuredTensor(fields=fields, ragged_shape=shape) + + def with_updates(self, + updates: Dict[FieldName, Union[_FieldValue, _FieldFn, None]], + validate: bool = False) -> 'StructuredTensor': + """Creates a new `StructuredTensor` with the updated fields. + + If this `StructuredTensor` is a scalar, and `k` is the `FieldName` being + updated and `v` the new value, then: + + ``` + result[k] = v # If (k, v) is in updates and v is a FieldValue + result[k] = f(self[k]) # If (k, f) is in updates and f is a FieldFn + result[k] = self[k] # If k is in self.field_names but not in updates + ``` + + If this `StructuredTensor` has rank `N` and shape `[D1...DN]`, then each + FieldValue `v` in `updates` must have shape `[D1...DN, ...]`, that is, + prefixed with the same shape as the `StructuredTensor`. Then the resulting + `StructuredTensor` will have: + + ``` + result[i1...iN][k] = v[i1...iN] # (k, v) in updates + result[i1...iN][k] = f(self.field_value(k))[i1...iN] # (k, f) in updates + result[i1...iN][k] = self[i1...iN][k] # k not in updates + ``` + + Note that `result.shape` is always equal to `self.shape` (but the shapes + of nested StructuredTensors may be changed if they are updated with new + values). + + Args: + updates: A dictionary mapping `FieldName` to either a `FieldValue` to be + used to update, or a `FieldFn` that will transform the value for the + given `FieldName`. `FieldName` can be a string for a direct field, or a + sequence of strings to refer to a nested sub-field. `FieldFn` is a + function that takes a `FieldValue` as input and should return a + `FieldValue`. All other fields are copied over to the new + `StructuredTensor`. New `FieldName` can be given (to add new fields), + but only to existing `StructuredTensor`, it won't automatically create + new nested structures -- but one can create a whole `StructureTensor` + sub-structure and set that into an existing structure. If the new value + is set to `None`, it is removed. + validate: If true, then add runtime validation ops that check that the + field values all have compatible shapes in the outer `shape.rank` + dimensions. + + Returns: + A `StructuredTensor`. + + Raises: + `ValueError`: If the any of the `FieldName` keys points to non-existent + sub-structures, if parent and child nodes are updated, if shapes + change, if a delete update is given for a non-existent field, or if a + `FieldFn` transforming function is given for a `FieldName` that doesn't + yet exist. + + Examples: + + >>> shoes_us = tf.experimental.StructuredTensor.from_pyval([ + ... {"age": 12, "nicknames": ["Josaphine"], + ... "shoes": {"sizes": [8.0, 7.5, 7.5]}}, + ... {"age": 82, "nicknames": ["Bob", "Bobby"], + ... "shoes": {"sizes": [11.0, 11.5, 12.0]}}, + ... {"age": 42, "nicknames": ["Elmo"], + ... "shoes": {"sizes": [9.0, 9.5, 10.0]}}]) + >>> def us_to_europe(t): + ... return tf.round(t * 2.54 + 17.0) # Rough approximation. + >>> shoe_sizes_key = ("shoes", "sizes") + >>> shoes_eu = shoes_us.with_updates({shoe_sizes_key: us_to_europe}) + >>> shoes_eu.field_value(shoe_sizes_key) + + """ + updates_items = [(_normalize_field_name_to_tuple(name), value) + for name, value in updates.items()] + + # Sort by keys and check for updates of both parent and child nodes. + updates_items = sorted(updates_items) + for i in range(1, len(updates_items)): + # Parent of a node would precede node in the sorted order. + name = updates_items[i][0] # item[0] is the name, item[1] is the value. + prev_name = updates_items[i - 1][0] + if name[:len(prev_name)] == prev_name: + raise ValueError( + '`StructuredTensor.with_updates` does not allow both parent and ' + 'child nodes to be updated: parent={}, child={}. If needed you can ' + 'update child nodes in the parent update value.'.format( + prev_name, name)) + return self._with_updates_impl((), updates_items, validate) + + def _with_updates_impl(self, error_prefix: Tuple[str, ...], + updates: List[Tuple[FieldName, Union[_FieldValue, + _FieldFn]]], + validate: bool) -> 'StructuredTensor': + """Recursive part of `with_updates` implementation.""" + # Get current fields. + new_fields = dict(self._fields) + + # Convert field name to string with full path for error messages. + def name_fullpath(name: Sequence[str]) -> str: + return str(error_prefix + (name,)) + + # Apply value if a function or the value itself. + def apply_value(name: str, value: Union[_FieldValue, + _FieldFn]) -> _FieldValue: + if callable(value): + # `value` is actually a transforming function. + if name not in new_fields: + raise ValueError( + '`StructuredTensor.with_updates` cannot update the field {} ' + 'because a transforming function was given, but that field ' + 'does not already exist.'.format(name_fullpath(name))) + value = value(new_fields[name]) + return value + + # Merge updates. + for name, value in updates: + if not name or not name[0]: + raise ValueError( + '`StructuredTensor.with_updates` does not allow empty names ' + '{}.'.format(name_fullpath(name))) + + if len(name) == 1: + name = name[0] + if value is None: + if name not in new_fields: + raise ValueError( + '`StructuredTensor.with_updates` cannot delete field ' + '{} because it is not present.'.format(name_fullpath(name))) + new_fields.pop(name) + else: + new_fields[name] = apply_value(name, value) + else: + # Recursive + prefix = name[0] + suffix = name[1:] + if prefix not in new_fields: + raise ValueError( + '`StructuredTensor.with_updates` cannot create new sub-field ' + '{} if parent field {} is not set.'.format( + error_prefix + tuple(name), name_fullpath(prefix))) + current_value = new_fields[prefix] + if not isinstance(current_value, StructuredTensor): + raise ValueError( + '`StructuredTensor.with_updates` cannot create new sub-field ' + '{} if parent structure {} is not a `StructuredTensor` that ' + 'can contain sub-structures -- it is a `{}`.'.format( + error_prefix + tuple(name), name_fullpath(prefix), + type(current_value))) + one_update = [(suffix, value)] + + # Accessing protected member in recursion. + # FutureWork: optimize by aggregating the recursions, instead of + # calling one at a time. + # pylint: disable=protected-access + value = current_value._with_updates_impl(error_prefix + (prefix,), + one_update, validate) + # pylint: enable=protected-access + new_fields[prefix] = value + + # TODO(edloper): When validate=True, only validate the modified fields. + try: + return StructuredTensor.from_fields( + new_fields, + shape=self.shape, + row_partitions=self.row_partitions, + nrows=self.nrows(), + validate=validate) + + except ValueError as e: + msg = '`StructuredTensor.with_updates` failed' + if error_prefix: + msg = '{} for field {}'.format(msg, error_prefix) + raise ValueError(msg) from e + + def _promote_helper(self, source_path, new_parent_path): + """Creates a promoted field without adding it to the structure. + + Args: + source_path: the source path in the structured tensor. + new_parent_path: the new parent path. Must be a prefix of source_path. + + Returns: + a composite tensor of source_path promoted. + Raises: + ValueError: if the shape of the field is unknown and the right strategy + cannot be determined. + """ + current_field = self.field_value(source_path) + new_parent_rank = self.field_value(new_parent_path).rank + parent_rank = self.field_value(source_path[:-1]).rank + if new_parent_rank == parent_rank: + return current_field + current_field_rank = current_field.shape.rank + if current_field_rank is None: + raise ValueError('Cannot determine if dimensions should be merged.') + inner_dim = min(parent_rank, current_field_rank - 1) + if inner_dim <= new_parent_rank: + return current_field + return _merge_dims_generic(current_field, new_parent_rank, inner_dim) + + def promote(self, source_path, new_name): + """Promotes a field, merging dimensions between grandparent and parent. + + >>> d = [ + ... {'docs': [{'tokens':[1, 2]}, {'tokens':[3]}]}, + ... {'docs': [{'tokens':[7]}]}] + >>> st = tf.experimental.StructuredTensor.from_pyval(d) + >>> st2 =st.promote(('docs','tokens'), 'docs_tokens') + >>> st2[0]['docs_tokens'] + + >>> st2[1]['docs_tokens'] + + + Args: + source_path: the path of the field or substructure to promote; must have + length at least 2. + new_name: the name of the new field (must be a string). + + Returns: + a modified structured tensor with the new field as a child of the + grandparent of the source_path. + + Raises: + ValueError: if source_path is not a list or a tuple or has a length + less than two, or new_name is not a string, or the rank + of source_path is unknown and it is needed. + """ + if not isinstance(new_name, str): + raise ValueError('new_name is not a string') + if not isinstance(source_path, (list, tuple)): + raise ValueError('source_path must be a list or tuple') + + if len(source_path) < 2: + raise ValueError('source_path must have length at least two') + + grandparent_path = source_path[:-2] + new_field = self._promote_helper(source_path, grandparent_path) + new_path = grandparent_path + (new_name,) + return self.with_updates({new_path: new_field}) + + #============================================================================= + # Properties + #============================================================================= + + @property + def rank(self): + """The rank of this StructuredTensor. Guaranteed not to be `None`.""" + return self._ragged_shape.rank + + @property + def shape(self): + """The static shape of this StructuredTensor. + + The returned `TensorShape` is guaranteed to have a known rank, but the + individual dimension sizes may be unknown. + + Returns: + `tf.TensorShape` + """ + return self._ragged_shape._to_tensor_shape() # pylint: disable=protected-access + + # TODO(martinz): for backwards compatibility + @property + def _row_partitions(self): + """Deprecated form of row_partitions.""" + return self.row_partitions + + # TODO(edloper): Make this a func instead of a property? Or make nrows + # a property instead of a func? Seems like these should be consistent. + @property + def row_partitions(self): + """A tuple of `RowPartition`s defining the shape of this `StructuredTensor`. + + When `self.rank <= 1`, this tuple will be empty. + + When `self.rank > 1`, these `RowPartitions` define the shape of the + `StructuredTensor` by describing how a flat (1D) list of structures can be + repeatedly partitioned to form a higher-dimensional object. In particular, + the flat list is first partitioned into sublists using `row_partitions[-1]`, + and then those sublists are further partitioned using `row_partitions[-2]`, + etc. The following examples show the row partitions used to describe + several different `StructuredTensor`, each of which contains 8 copies of + the same structure (`x`): + + >>> x = {'a': 1, 'b': ['foo', 'bar', 'baz']} # shape = [] (scalar) + + >>> s1 = [[x, x, x, x], [x, x, x, x]] # shape = [2, 4] + >>> tf.experimental.StructuredTensor.from_pyval(s1).row_partitions + (tf.RowPartition(row_splits=[0 4 8]),) + + >>> s2 = [[x, x], [x, x], [x, x], [x, x]] # shape = [4, 2] + >>> tf.experimental.StructuredTensor.from_pyval(s2).row_partitions + (tf.RowPartition(row_splits=[0 2 4 6 8]),) + + >>> s3 = [[x, x, x], [], [x, x, x, x], [x]] # shape = [2, None] + >>> tf.experimental.StructuredTensor.from_pyval(s3).row_partitions + (tf.RowPartition(row_splits=[0 3 3 7 8]),) + + >>> s4 = [[[x, x], [x, x]], [[x, x], [x, x]]] # shape = [2, 2, 2] + >>> tf.experimental.StructuredTensor.from_pyval(s4).row_partitions + (tf.RowPartition(row_splits=[0 2 4]), + tf.RowPartition(row_splits=[0 2 4 6 8])) + + + >>> s5 = [[[x, x], [x]], [[x, x]], [[x, x], [x]]] # shape = [3, None, None] + >>> tf.experimental.StructuredTensor.from_pyval(s5).row_partitions + (tf.RowPartition(row_splits=[0 2 3 5]), + tf.RowPartition(row_splits=[0 2 3 5 7 8])) + + Note that shapes for nested fields (such as `x['b']` in the above example) + are not considered part of the shape of a `StructuredTensor`, and are not + included in `row_partitions`. + + If this `StructuredTensor` has a ragged shape (i.e., if any of the + `row_partitions` is not uniform in size), then all fields will be encoded + as either `RaggedTensor`s or `StructuredTensor`s with these `RowPartition`s + used to define their outermost `self.rank` dimensions. + + Returns: + A `tuple` of `RowPartition` objects with length `self.rank - 1` + (or `0` if `self.rank < 2`) + + """ + if self.rank < 2: + return () + return self._ragged_shape._as_row_partitions() # pylint:disable=protected-access + + def nrows(self): + """The number of rows in this StructuredTensor (if rank>0). + + This means the length of the outer-most dimension of the StructuredTensor. + + Notice that if `self.rank > 1`, then this equals the number of rows + of the first row partition. That is, + `self.nrows() == self.row_partitions[0].nrows()`. + + Otherwise `self.nrows()` will be the first dimension of the field values. + + Returns: + A scalar integer `Tensor` (or `None` if `self.rank == 0`). + """ + if self.rank == 0: + return None + return self._ragged_shape[0] + + def with_shape_dtype(self, dtype: dtypes.DType) -> 'StructuredTensor': + if dtype == self._ragged_shape.dtype: + return self + return StructuredTensor( + fields=_fields_with_dtype(self._fields, dtype), + ragged_shape=self._ragged_shape.with_dtype(dtype)) + + def _is_eager(self): + """True if all fields are composed of eager tensors.""" + tensors = nest.flatten(self, expand_composites=True) + return all(isinstance(t, ops.EagerTensor) for t in tensors) + + #============================================================================= + # Encoding + #============================================================================= + + def field_names(self): + """Returns the string field names for this `StructuredTensor`.""" + return tuple(self._fields.keys()) + + def field_value(self, field_name): + """Returns the tensor value for the specified field or path. + + If `field_name` is a `string`, then it names a field directly owned by this + `StructuredTensor`. If this `StructuredTensor` has shape `[D1...DN]`, then + the returned tensor will have shape `[D1...DN, V1...VM]`, where the slice + `result[d1...dN]` contains the field value for the structure at + `self[d1...dN]`. + + If `field_name` is a `tuple` of `string`, then it specifies a path to a + field owned by nested `StructuredTensor`. In particular, + `struct.field_value((f1, f2, ..., fN))` is equivalent to + `struct.field_value(f1).field_value(f2)....field_value(fN)` + + Args: + field_name: `string` or `tuple` of `string`: The field whose values should + be returned. + + Returns: + `Tensor`, `StructuredTensor`, or `RaggedTensor`. + + Raises: + KeyError: If the given field_name is not found. + """ + if isinstance(field_name, (list, tuple)): + value = self + for f in field_name: + if not isinstance(value, StructuredTensor): + raise KeyError('Field path {} not found in {}'.format( + field_name, self)) + value = value.field_value(f) + return value + return self._fields[field_name] + + #============================================================================= + # Operators + #============================================================================= + + # TODO(edloper): Add support for ellipsis and/or newaxis? + def __getitem__(self, key): + """Returns the specified piece of this StructuredTensor. + + * If `struct_tensor` is scalar (i.e., a single structure), then + `struct_tensor[f]` returns the value of field `f` (where `f` must be a + string). + + * If `struct_tensor` is non-scalar (i.e., a vector or higher-dimensional + tensor of structures), `struct_tensor[i]` selects an element or slice of + the tensor using standard Python semantics (e.g., negative values index + from the end). `i` may have any of the following types: + + * `int` constant + * `string` constant + * scalar integer `Tensor` + * `slice` containing integer constants and/or scalar integer + `Tensor`s + + #### Multidimensional indexing + + `StructuredTensor` supports multidimensional indexing. I.e., `key` may be a + `tuple` of values, indexing or slicing multiple dimensions at once. For + example, if `people` is a vector of structures, each of which has a vector- + valued `names` field, then `people[3, 'names', 0]` is equivalent to + `people[3]['names'][0]`; and `people[:, 'names', :]` will return a (possibly + ragged) matrix of names, with shape `[num_people, num_names_per_person]`. + + Args: + key: Indicates which piece of the StructuredTensor to return. + + Returns: + A `Tensor`, `StructuredTensor`, or `RaggedTensor`. + """ + if isinstance(key, list): + key = tuple(key) + elif not isinstance(key, tuple): + key = (key,) + if not key: + return self + + if self.rank == 0: + return self._scalar_getitem(key) + else: + return self._tensor_getitem(key) + + def _scalar_getitem(self, key): + if (isinstance(key[0], slice) and key[0].start is None and + key[0].stop is None and key[0].step is None): + fields = dict((field_name, field_value.__getitem__(key[1:])) + for (field_name, field_value) in self._fields.items()) + return StructuredTensor.from_fields(fields, self.shape) + + elif not isinstance(key[0], compat.bytes_or_text_types): + raise ValueError('Key for indexing a StructuredTensor must be a ' + "string or a full slice (':')") + + return self._fields[key[0]].__getitem__(key[1:]) + + def _tensor_getitem(self, key): + rank = self.rank + if len(key) <= rank: + new_fields = dict((field_name, field_value.__getitem__(key)) + for (field_name, field_value) in self._fields.items()) + result_shape = self.shape.as_list() + for d, k in enumerate(key): + if isinstance(k, slice): + if not (k.start is None and k.stop is None and k.step is None): + # TODO(edloper): Better static shape analysis here. + result_shape[d] = None + elif isinstance(k, (int, tensor.Tensor)): + result_shape[d] = -1 # mark for deletion + elif k is None: + raise ValueError('Slicing not supported for tf.newaxis') + else: + # Ellipsis, tf.newaxis: + raise ValueError('Slicing not supported for %r' % k) + result_shape = [d for d in result_shape if d != -1] + return StructuredTensor.from_fields(new_fields, result_shape) + + else: + if not isinstance(key[rank], compat.bytes_or_text_types): + # TODO(edloper): Also support full slice here? + raise ValueError('Key for indexing a StructuredTensor must be a string') + return self._fields[key[rank]].__getitem__(key[:rank] + key[rank + 1:]) + + def __repr__(self): + fields = sorted(self._fields.items()) + fields = ((k, str(v).replace('\n', '\n ')) for k, v in fields) + fields = ('"{}": {}'.format(k, v) for k, v in fields) + dict_repr = ',\n '.join(fields) + return ('' % (dict_repr, self.shape)) + + #============================================================================= + # Conversion + #============================================================================= + + def to_pyval(self): + """Returns this StructuredTensor as a nested Python dict or list of dicts. + + Converts this `StructuredTensor` to a nested python value: + + * `StructTensors` with `rank=0` are converted into a dictionary, with an + entry for each field. Field names are used as keys and field values are + converted to python values. In particular: + + * Scalar Tensor fields are converted to simple values (such as + `int` or `float` or `string`) + * Non-scalar Tensor fields and RaggedTensor fields are converted to + nested lists of simple values. + * StructuredTensor fields are converted recursively using `to_pyval`. + + * `StructTensors` with `rank>0` are converted to nested python `list`s, + containing one dictionary for each structure (where each structure's + dictionary is defined as described above). + + Requires that all fields are Eager tensors. + + >>> tf.experimental.StructuredTensor.from_fields( + ... {'a': [1, 2, 3]}, [3]).to_pyval() + [{'a': 1}, {'a': 2}, {'a': 3}] + + Note that `StructuredTensor.from_pyval(pyval).to_pyval() == pyval`. + + Returns: + A nested Python dict or list of dicts. + """ + if not self._is_eager(): + raise ValueError( + 'StructuredTensor.to_pyval() is only supported in eager mode.') + + # Convert each field value to a nested list. + result = {} + for (key, value) in self._fields.items(): + if isinstance(value, ops.EagerTensor): + value = value.numpy() + if isinstance(value, np.ndarray): + value = value.tolist() + elif isinstance(value, ragged_tensor.RaggedTensor): + value = value.to_list() + elif isinstance(value, StructuredTensor): + value = value.to_pyval() + # TODO(edloper): Throw an exception if value is an unexpected type. + result[key] = value + + # If rank>0, then re-group each value from dict-of-list to list-of-dict. + if len(self.shape) > 0: # pylint: disable=g-explicit-length-test + if not result: # special-case for StructuredTensors w/ no fields. + return _empty_dict_pylist_from_row_partitions(self.row_partitions, + self.nrows()) + return _pyval_field_major_to_node_major( + list(result.keys()), list(result.values()), self.rank) + else: + return result + + @classmethod + def from_pyval(cls, pyval, typespec=None): + """Constructs a StructuredTensor from a nested Python structure. + + >>> tf.experimental.StructuredTensor.from_pyval( + ... {'a': [1, 2, 3], 'b': [[4, 5], [6, 7]]}) + }, + shape=())> + + Note that `StructuredTensor.from_pyval(pyval).to_pyval() == pyval`. + + Args: + pyval: The nested Python structure that should be used to create the new + `StructuredTensor`. + typespec: A `StructuredTensor.Spec` specifying the expected type for each + field. If not specified, then all nested dictionaries are turned into + StructuredTensors, and all nested lists are turned into Tensors (if + rank<2) or RaggedTensors (if rank>=2). + + Returns: + A `StructuredTensor`. + """ + return cls._from_pyval(pyval, typespec, ()) + + @classmethod + def _from_pyval(cls, pyval, typespec, path_so_far): + """Helper function for from_pyval. + + + Args: + pyval: The nested Python structure that should be used to create the new + `StructuredTensor`. + typespec: A `StructuredTensor.Spec` specifying the expected type for each + field. If not specified, then all nested dictionaries are turned into + StructuredTensors, and all nested lists are turned into Tensors (if + rank<2) or RaggedTensors (if rank>=2). + path_so_far: the path of fields that led here (for error messages). + + Returns: + A `StructuredTensor`. + """ + if isinstance(pyval, dict): + return cls._from_pydict(pyval, typespec, path_so_far) + elif isinstance(pyval, (list, tuple)): + keys = set() + rank = _pyval_find_struct_keys_and_depth(pyval, keys) + if rank is not None: + return cls._from_pylist_of_dict(pyval, keys, rank, typespec, + path_so_far) + else: + return cls._from_pylist_of_value(pyval, typespec, path_so_far) + else: + return cls._from_pyscalar(pyval, typespec, path_so_far) + + @classmethod + def _from_pydict(cls, pyval, typespec, path_so_far): + """Converts python dictionary `pyval` to a StructuredTensor with rank=0.""" + if typespec is None: + fields = dict((k, cls._from_pyval(v, None, path_so_far + (k,))) + for (k, v) in pyval.items()) + else: + spec_shape = typespec._shape # pylint: disable=protected-access + field_specs = typespec._field_specs # pylint: disable=protected-access + if not (isinstance(typespec, StructuredTensor.Spec) and + spec_shape.rank == 0 and set(pyval) == set(field_specs)): + raise ValueError('Value at %r does not match typespec: %r vs %r' % + (path_so_far, pyval, typespec)) + fields = dict((k, cls._from_pyval(v, field_specs[k], path_so_far + (k,))) + for (k, v) in pyval.items()) + return StructuredTensor.from_fields(fields=fields, shape=(), validate=False) + + @classmethod + def _from_pylist_of_dict(cls, pyval, keys, rank, typespec, path_so_far): + """Converts python list `pyval` to a StructuredTensor with rank>1.""" + fields = dict((key, []) for key in keys) + for child in pyval: + _pyval_update_fields(child, fields, 1) + if typespec is None: + shape = tensor_shape.TensorShape([None] * rank) + for (key, target) in fields.items(): + fields[key] = cls._from_pyval(target, None, path_so_far + (key,)) + else: + field_specs = typespec._fields # pylint: disable=protected-access + if ((not isinstance(typespec, StructuredTensor.Spec)) or # pylint: disable=superfluous-parens + (set(fields) - set(field_specs))): + raise ValueError('Value at %r does not match typespec: %r vs %r' % + (path_so_far, pyval, typespec)) + shape = typespec._shape + if shape.rank < rank: + raise ValueError('Value at %r does not match typespec (rank mismatch): ' + '%r vs %r' % (path_so_far, pyval, typespec)) + for (key, spec) in field_specs.items(): + fields[key] = cls._from_pyval( + fields.get(key, []), spec, path_so_far + (key,)) + try: + if not fields and typespec is None: + # TODO(b/183245576): handle cases where the typespec is known + # but the dictionary is empty. + return StructuredTensor._from_pylist_of_empty_dict(pyval, rank) + return StructuredTensor.from_fields( + fields=fields, shape=shape, validate=False) + except Exception as exc: + raise ValueError('Error parsing path %r' % (path_so_far,)) from exc + + @classmethod + def _from_pylist_of_empty_dict(cls, pyval, rank): + """Converts a pylist of empty dictionaries to StructuredTensors.""" + if rank == 0: + return StructuredTensor.from_fields(fields={}, shape=(), validate=False) + elif rank == 1: + nrows = len(pyval) + shape = (nrows,) + return StructuredTensor.from_fields(fields={}, shape=shape, nrows=nrows) + elif rank > 1: + ragged_zeros = ragged_factory_ops.constant(_dicts_to_zeros(pyval)) + nrows = len(pyval) + shape = tensor_shape.TensorShape([len(pyval)] + ([None] * (rank - 1))) + return StructuredTensor.from_fields( + fields={}, + shape=shape, + row_partitions=ragged_zeros._nested_row_partitions, # pylint:disable=protected-access + nrows=nrows) + + @classmethod + def _from_pylist_of_value(cls, pyval, typespec, path_so_far): + """Converts python list `pyval` to a Tensor or RaggedTensor with rank>1.""" + if typespec is None: + try: + return ragged_factory_ops.constant(pyval) + except Exception as exc: + raise ValueError('Error parsing path %r' % (path_so_far,)) from exc + elif isinstance(typespec, tensor.TensorSpec): + try: + result = constant_op.constant(pyval, typespec.dtype) + except Exception as exc: + raise ValueError('Error parsing path %r' % (path_so_far,)) from exc + if not typespec.shape.is_compatible_with(result.shape): + raise ValueError('Value at %r does not match typespec: %r vs %r' % + (path_so_far, typespec, pyval)) + return result + elif isinstance(typespec, ragged_tensor.RaggedTensorSpec): + # pylint: disable=protected-access + try: + return ragged_factory_ops.constant( + pyval, + dtype=typespec._dtype, + ragged_rank=typespec._ragged_rank, + row_splits_dtype=typespec._row_splits_dtype, + inner_shape=typespec._shape[typespec._ragged_rank + 1:]) + except Exception as exc: + raise ValueError('Error parsing path %r' % (path_so_far,)) from exc + elif isinstance(typespec, StructuredTensor.Spec): + empty_rank = _pyval_empty_list_depth(pyval) + if empty_rank is None: + raise ValueError('Value at %r does not match typespec: %r vs %r' % + (path_so_far, typespec, pyval)) + else: + return cls._from_pylist_of_dict(pyval, set(), empty_rank, typespec, + path_so_far) + else: + raise ValueError('Value at %r does not match typespec: %r vs %r' % + (path_so_far, typespec, pyval)) + + @classmethod + def _from_pyscalar(cls, pyval, typespec, path_so_far): + """Converts python scalar value `pyval` to a Tensor.""" + if typespec is None: + try: + return constant_op.constant(pyval) + except Exception as exc: + raise ValueError('Error parsing path %r' % (path_so_far,)) from exc + else: + if not (isinstance(typespec, tensor.TensorSpec) and + typespec.shape.rank == 0): + raise ValueError('Value at %r does not match typespec: %r vs %r' % + (path_so_far, typespec, pyval)) + # TODO(edloper): Check that typespec.shape matches. + return constant_op.constant(pyval, typespec.dtype) + + #============================================================================= + # Transforms + #============================================================================= + + # TODO(edloper): Add a 'validate' option here? + # TODO(edloper): Unify nomenclature with RaggedTensor. Should RaggedTensor + # have a partition_outer_dimension method? + def partition_outer_dimension(self, row_partition): + """Partitions the outer dimension of this StructuredTensor. + + Returns a new `StructuredTensor` with the same values as `self`, where + the outer dimension is partitioned into two (possibly ragged) dimensions. + Requires that this StructuredTensor have an outer dimension (i.e., + `self.shape.rank > 0`). + + >>> st = tf.experimental.StructuredTensor.from_pyval( + ... [{'foo': 12}, {'foo': 33}, {'foo': 99}]) + >>> partition = RowPartition.from_row_lengths([2, 0, 1]) + >>> st.partition_outer_dimension(partition) + }, + shape=(3, None))> + + Args: + row_partition: A `RowPartition`. + + Returns: + A `StructuredTensor` with rank `values.rank + 1`. + """ + if not isinstance(row_partition, RowPartition): + raise TypeError('row_partition must be a RowPartition.') + if self.shape.rank == 0: + raise ValueError('Shape %s must have rank at least 1' % self.shape) + return _partition_outer_dimension(self, row_partition) + + def merge_dims(self, outer_axis, inner_axis): + """Merges outer_axis...inner_axis into a single dimension. + + Returns a copy of this RaggedTensor with the specified range of dimensions + flattened into a single dimension, with elements in row-major order. + + >>> st = tf.experimental.StructuredTensor.from_pyval( + ... [[{'foo': 12}, {'foo': 33}], [], [{'foo': 99}]]) + >>> st.merge_dims(0, 1) + + + Args: + outer_axis: `int`: The first dimension in the range of dimensions to + merge. May be negative (to index from the last dimension). + inner_axis: `int`: The last dimension in the range of dimensions to merge. + May be negative (to index from the last dimension). + + Returns: + A copy of this tensor, with the specified dimensions merged into a + single dimension. The shape of the returned tensor will be + `self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N` + is the total number of slices in the merged dimensions. + """ + outer_axis = array_ops.get_positive_axis( + outer_axis, + self.shape.rank, + axis_name='outer_axis', + ndims_name='rank(self)') + inner_axis = array_ops.get_positive_axis( + inner_axis, + self.shape.rank, + axis_name='inner_axis', + ndims_name='rank(self)') + if not outer_axis <= inner_axis: + raise ValueError('Expected outer_axis (%d) to be less than or equal to ' + 'inner_axis (%d)' % (outer_axis, inner_axis)) + return _merge_dims(self, outer_axis, inner_axis) + + class Spec: + """A spec for StructuredTensor.""" + + def __validate__(self): + assert self._ragged_shape is not None + + @classmethod + def _from_fields_and_rank(cls, fields, rank): + """Creates a spec of a StructuredTensor with fields and rank.""" + shape = None + for (k, v) in fields.items(): + field_shape_untruncated = _dynamic_ragged_shape_spec_from_spec(v) + if field_shape_untruncated is None: + raise ValueError(f'Cannot convert spec of {k}.') + untruncated_rank = field_shape_untruncated.rank + if (untruncated_rank is not None and untruncated_rank < rank): + raise ValueError(f'Rank of field {k} is {untruncated_rank}, ' + f'but must be at least {rank}.') + field_shape = field_shape_untruncated._truncate(rank) # pylint: disable=protected-access + if shape is None: + shape = field_shape + else: + shape = shape._merge_with(field_shape) + return StructuredTensor.Spec(_ragged_shape=shape, _fields=fields) + + @classmethod + def _from_shape( + cls, shape: dynamic_ragged_shape.DynamicRaggedShape + ) -> 'StructuredTensor.Spec': + """Creates the spec of an empty StructuredTensor.""" + return StructuredTensor.Spec(_ragged_shape=shape, _fields={}) + + # For backwards compatibility + @property + def _shape(self) -> tensor_shape.TensorShape: + return self._ragged_shape._to_tensor_shape() # pylint: disable=protected-access + + # For backwards compatibility + @property + def _field_specs(self) -> Dict[str, type_spec.TypeSpec]: + return self._fields + + # For backwards compatibility + @property + def shape(self) -> tensor_shape.TensorShape: + return self._shape + + # For backwards compatibility + @property + def rank(self): + return self._ragged_shape.rank + + +# Regular expression used to determine whether a string is a valid field name. +# Note: we plan to relax (or possibly eliminate) this in the future; you +# should not rely on the fact that some field names are currently disallowed. +_FIELD_NAME_RE = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$') + +#============================================================================= +# Helper functions +#============================================================================= +# TODO(edloper): Move some of these helpers to row_partition.py? + + +def _convert_to_structured_field_value(value): + """Converts `value` to a Tensor, RaggedTensor, or StructuredTensor.""" + if isinstance(value, + (tensor.Tensor, ragged_tensor.RaggedTensor, StructuredTensor)): + return value + elif ragged_tensor.is_ragged(value): + return ragged_tensor.convert_to_tensor_or_ragged_tensor(value) + elif isinstance(value, extension_type.ExtensionType): + return value + else: + try: + return ops.convert_to_tensor(value) + except (ValueError, TypeError) as e: + raise TypeError('Unexpected type for value in `fields`: %r' % + value) from e + + +def _find_shape_dtype( + fields: Mapping[str, _FieldValue], nrows: Optional[tensor.Tensor], + row_partitions: Optional[Sequence[RowPartition]]) -> dtypes.DType: + """Return a consistent dtype for fields, nrows, & row_partitions. + + In the future, the default will switch from int64 to int32, but for now, + we stick with int64. + + Args: + fields: the fields of the StructuredTensor. + nrows: the nrows of the StructuredTensor + row_partitions: the row_partitions of the StructuredTensor. + + Returns: + If anything requires int64, then return int64. + If int32 is explicitly specified, return int32. Otherwise, return int64. + """ + field_dtypes = [_field_shape_dtype(v) for v in fields.values()] + nrows_dtypes = [nrows.dtype] if isinstance(nrows, tensor.Tensor) else [] + rp_dtypes = [] if row_partitions is None else [ + rp.dtype for rp in row_partitions + ] + + all_dtypes = field_dtypes + nrows_dtypes + rp_dtypes + + if dtypes.int64 in all_dtypes: + return dtypes.int64 + if dtypes.int32 in all_dtypes: + return dtypes.int32 + + # TODO(martinz): Eventually, shift this to tf.int32. + return dtypes.int64 + + +def _merge_nrows(nrows, static_nrows, value, dtype, validate): + """Merges `nrows` with `nrows(value)`. + + Checks that `value` has the expected number of rows (`nrows`), and returns + `nrows`. If `validate` is true, then add validation ops that check that + the `nrows` values match. + + Args: + nrows: scalar integer Tensor. + static_nrows: tf.Dimension: static value of nrows, if known. + value: Tensor or RaggedTensor or StructuredTensor + dtype: dtype for `nrows`. + validate: bool -- whether to add validation ops. + + Returns: + A tuple `(nrows, static_nrows)`. + """ + static_value_nrows = tensor_shape.dimension_at_index(value.shape, 0) + if isinstance(value, tensor.Tensor): + value_nrows = array_ops.shape(value, out_type=dtype)[0] + else: + value_nrows = value.nrows() + if nrows is None: + nrows = value_nrows + elif (static_value_nrows.value is not None and + static_nrows.value is not None): + if not static_value_nrows.is_compatible_with(static_nrows): + raise ValueError('fields have incompatible nrows') + nrows = value_nrows # No need to add an assertion op. + elif validate: + nrows = control_flow_ops.with_dependencies([ + check_ops.assert_equal( + nrows, value_nrows, message='fields have incompatible nrows') + ], nrows) + return nrows, static_nrows._merge_with(static_value_nrows) # pylint: disable=protected-access + + +def _merge_row_partitions(row_partitions, value, rank, dtype, validate): + """Merges `row_partitions` with `row_partitions(value)`.""" + if isinstance(value, tensor.Tensor): + value_row_partitions = _row_partitions_for_tensor(value, rank, dtype) + + elif isinstance(value, ragged_tensor.RaggedTensor): + value_row_partitions = _row_partitions_for_ragged_tensor(value, rank, dtype) + + else: + assert isinstance(value, StructuredTensor), type(value) + value_row_partitions = value.row_partitions[:rank - 1] + + assert len(value_row_partitions) == rank - 1 + if row_partitions is None: + return tuple(value_row_partitions) + else: + return tuple([ + p1._merge_precomputed_encodings(p2, validate) # pylint: disable=protected-access + for (p1, p2) in zip(row_partitions, value_row_partitions) + ]) + + +def _row_partitions_for_tensor(value, rank, dtype): + """Returns the row partitions for a tf.Tensor.""" + shape = array_ops.shape(value, out_type=dtype) + return _row_partitions_for_uniform_shape(shape, rank) + + +def _row_partitions_for_ragged_tensor(value, rank, dtype): + """Returns the row partitions for a tf.RaggedTensor.""" + assert rank > 1 + value_row_partitions = value._nested_row_partitions[:rank - 1] # pylint: disable=protected-access + if len(value_row_partitions) < (rank - 1): + value_row_partitions += _row_partitions_for_tensor( + value.flat_values, rank - len(value_row_partitions), dtype) + assert len(value_row_partitions) == rank - 1 + return value_row_partitions + + +def _row_partitions_for_uniform_shape(shape, rank): + """Returns row partitions for the given shape Tensor. + + Args: + shape: A vector describing a uniform shape. + rank: The number of dimensions to generate row partitions for + + Returns: + A list of (rank-1) `RowPartition`s with uniform row length. + """ + shape_cumprod = math_ops.cumprod(shape[:rank]) + # pylint: disable=g-complex-comprehension + return tuple([ + RowPartition.from_uniform_row_length( + uniform_row_length=shape[i + 1], + nvals=shape_cumprod[i + 1], + nrows=shape_cumprod[i]) for i in range(rank - 1) + ]) + + +def _pyval_field_major_to_node_major(keys, values, depth): + """Regroup each field (k, v) from dict-of-list to list-of-dict. + + Given a "field-major" encoding of the StructuredTensor (which maps each key to + a single nested list containing the values for all structs), return a + corresponding "node-major" encoding, consisting of a nested list of dicts. + + Args: + keys: The field names (list of string). Must not be empty. + values: The field values (list of python values). Must have the same length + as `keys`. + depth: The list depth at which dictionaries should be created. + + Returns: + A nested list of dict, with depth `depth`. + """ + assert keys + if depth == 0: + return dict(zip(keys, values)) + nvals = len(values[0]) + assert all(nvals == len(values[i]) for i in range(1, len(values))) + return [ + _pyval_field_major_to_node_major(keys, value_slice, depth - 1) + for value_slice in zip(*values) + ] + + +def _empty_dict_pylist_from_row_partitions(row_partitions, nrows): + """Returns a python list of empty dicts from the given row partitions. + + Args: + row_partitions: The row-partitions describing the ragged shape of the + result. + nrows: The number of rows in the outermost row-partition. (Or if + `len(row_partitions)==0`, then the number of empty dicts to return.) + + Returns: + A nested python list whose leaves (if any) are empty python dicts. + """ + if not row_partitions: + return [{} for _ in range(nrows)] + else: + values = _empty_dict_pylist_from_row_partitions( + row_partitions[1:], row_partitions[0].row_splits()[-1]) + splits = row_partitions[0].row_splits() + return [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)] + + +def _pyval_find_struct_keys_and_depth(pyval, keys): + """Finds the keys & depth of nested dictionaries in `pyval`. + + Args: + pyval: A nested structure of lists, tuples, and dictionaries. + keys: (output parameter) A set, which will be updated with any keys that are + found in the nested dictionaries. + + Returns: + The nesting depth of dictionaries in `pyval`, or `None` if `pyval` does + not contain any dictionaries. + Raises: + ValueError: If dictionaries have inconsistent depth. + """ + if isinstance(pyval, dict): + keys.update(pyval.keys()) + return 0 + elif isinstance(pyval, (list, tuple)): + depth = None + for child in pyval: + child_depth = _pyval_find_struct_keys_and_depth(child, keys) + if child_depth is not None: + if depth is None: + depth = child_depth + 1 + elif depth != child_depth + 1: + raise ValueError('Inconsistent depth of dictionaries') + return depth + else: + return None + + +def _pyval_update_fields(pyval, fields, depth): + """Append the field values from `pyval` to `fields`. + + Args: + pyval: A python `dict`, or nested list/tuple of `dict`, whose value(s) + should be appended to `fields`. + fields: A dictionary mapping string keys to field values. Field values + extracted from `pyval` are appended to this dictionary's values. + depth: The depth at which `pyval` should be appended to the field values. + """ + if not isinstance(pyval, (dict, list, tuple)): + raise ValueError('Expected dict or nested list/tuple of dict') + + for (key, target) in fields.items(): + for _ in range(1, depth): + target = target[-1] + target.append(pyval[key] if isinstance(pyval, dict) else []) + + if isinstance(pyval, (list, tuple)): + for child in pyval: + _pyval_update_fields(child, fields, depth + 1) + + +def _pyval_empty_list_depth(pyval): + """Find the max depth for nested empty lists. + + Args: + pyval: A nested python list. + + Returns: + The maximum depth of empty lists in `pyval`, or None if `pyval` contains + anything other than nested empty lists. + """ + if isinstance(pyval, list): + if not pyval: + return 1 + depths = [_pyval_empty_list_depth(v) for v in pyval] + if any(depth is None for depth in depths): + return None + else: + return max(depths) + 1 + else: + return None + + +def _replace_row_partitions(value, new_partitions): + """Updates `value` to use `new_partitions` as its (outer) row partitions. + + This is used to ensure that all fields in a `StructuredTensor` use identical + `RowPartition` objects for the shared dimensions. In particular, + `StructuredTensor.from_fields` first merges all of the row partitions from + any fields, and then replaces the outer row partitions of all fields with + the merged row partitions (using this function). + + Args: + value: A `Tensor`, `RaggedTensor`, or `StructuredTensor`. + new_partitions: A list of row-partitions that should be used by `value`. + Must be equivalent to `value`'s current row partitions. + + Returns: + A value that is equivalent to `value`, where outer row partitions have been + replaced by `new_partitions`. + """ + if isinstance(value, tensor.Tensor) or not new_partitions: + return value + + elif isinstance(value, ragged_tensor.RaggedTensor): + return ragged_tensor.RaggedTensor._from_row_partition( # pylint: disable=protected-access + values=_replace_row_partitions(value.values, new_partitions[1:]), + row_partition=new_partitions[0]) + + else: + assert isinstance(value, StructuredTensor) + new_fields = dict((k, _replace_row_partitions(v, new_partitions)) + for (k, v) in value._fields.items()) + return StructuredTensor._old_init( # pylint: disable=protected-access + fields=new_fields, + shape=value.shape, + nrows=value.nrows(), + row_partitions=tuple(new_partitions) + + tuple(value.row_partitions[len(new_partitions):])) + + +def _partition_outer_dimension(value, row_partition): + """Partitions the outer dimension of `value` using `row_partitions`. + + Examples: + + >>> partition = RowPartition.from_row_lengths([2, 0, 1]) + >>> _partition_outer_dimension(tf.constant([1, 2, 3]), partition) + + + >>> struct_value = tf.experimental.StructuredTensor.from_pyval( + ... [{'x': 1}, {'x': 2}, {'x': 3}]) + >>> _partition_outer_dimension(struct_value, partition) + }, + shape=(3, None))> + + Args: + value: Tensor, RaggedTensor, or StructuredTensor + row_partition: RowPartition + + Returns: + A value with the same type as `value`, where + `result.rank = value.rank + 1`. + """ + is_ragged = row_partition.uniform_row_length() is None + if isinstance(value, tensor.Tensor) and not is_ragged: + new_shape = array_ops.concat( + [[row_partition.nrows(), + row_partition.uniform_row_length()], + array_ops.shape(value, out_type=row_partition.dtype)[1:]], + axis=0) + return array_ops.reshape(value, new_shape) + elif isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor)): + return ragged_tensor.RaggedTensor._from_row_partition( # pylint: disable=protected-access + value, row_partition) + else: + assert isinstance(value, StructuredTensor) + nrows = row_partition.static_nrows + ncols = row_partition.static_uniform_row_length + shape = tensor_shape.TensorShape([nrows, + ncols]).concatenate(value.shape[1:]) + fields = dict((k, _partition_outer_dimension(v, row_partition)) + for (k, v) in value._fields.items()) + return StructuredTensor._old_init( # pylint: disable=protected-access + fields, shape, row_partition.nrows(), + (row_partition,) + value.row_partitions) + + +def _merge_dims(value, outer_axis, inner_axis): + """Merges `outer_axis...inner_axis` of `value` into a single dimension.""" + assert outer_axis < inner_axis + if isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor)): + return ragged_tensor.merge_dims(value, outer_axis, inner_axis) + else: + assert isinstance(value, StructuredTensor) + fields = dict((k, _merge_dims(v, outer_axis, inner_axis)) + for (k, v) in value._fields.items()) + ragged_shape = value._ragged_shape._merge_dims( # pylint: disable=protected-access + outer_axis, inner_axis) + return StructuredTensor(fields, ragged_shape) + + +_structured_tensor_factory_key = object() # unique private object + + +def _dynamic_ragged_shape_spec_from_spec( + spec: Union[dynamic_ragged_shape.DynamicRaggedShape.Spec, + ragged_tensor.RaggedTensorSpec, StructuredTensor.Spec, + tensor.TensorSpec] +) -> dynamic_ragged_shape.DynamicRaggedShape.Spec: + if isinstance(spec, StructuredTensor.Spec): + return spec._ragged_shape # pylint: disable=protected-access + else: + return dynamic_ragged_shape.DynamicRaggedShape.Spec._from_spec(spec) # pylint: disable=protected-access + + +def _normalize_field_name_to_tuple(name: 'FieldName') -> Sequence[str]: + """FieldName can be given also as string, this normalizes it to a tuple.""" + if isinstance(name, str): + return (name,) + if isinstance(name, list): + return tuple(name) + assert isinstance(name, tuple) + return name + + +def _dicts_to_zeros(pyval): + """Replaces dictionaries zeros in a pylist.""" + if isinstance(pyval, dict): + return 0 + return [_dicts_to_zeros(x) for x in pyval] + + +def _merge_dims_generic(source, outer, inner): + """Merges outer_axis...inner_axis into a single dimension. + + If outer == inner, this is a NOOP. If inner < outer, then this fials. + If inner >= source.shape.rank, then the behavior is undefined. + + Args: + source: a tensor, ragged tensor, or structured tensor. + outer: a python int, indicating the first dimension to compress (must be + nonnegative). + inner: a python int, indicating the first dimension to keep (of the tail) + (must be nonnegative). + + Returns: + source with outer_axis...inner_axis merged into a single dimension. + + """ + if isinstance(source, StructuredTensor): + return source.merge_dims(outer, inner) + else: + return ragged_tensor.merge_dims(source, outer, inner) + + +def _dynamic_ragged_shape_from_tensor( + field, dtype=None) -> dynamic_ragged_shape.DynamicRaggedShape: + """Extension of DynamicRaggedShape.from_tensor to support StructuredTensor.""" + if isinstance(field, StructuredTensor): + return field._ragged_shape # pylint: disable=protected-access + shape = array_ops.shape_v2(field, out_type=dtype) + + if isinstance(shape, tensor.Tensor): + return dynamic_ragged_shape.DynamicRaggedShape( + row_partitions=[], inner_shape=shape) + elif isinstance(shape, dynamic_ragged_shape.DynamicRaggedShape): + return shape + # TODO(martinz): add a test for the following line. + raise TypeError(f'Expected shape tf.shape({field}) to return a Tensor or a ' + f'DynamicRaggedShape. Instead, got: {shape}.') + + +def _merge_with_optional( + a: Optional[dynamic_ragged_shape.DynamicRaggedShape], + b: Optional[dynamic_ragged_shape.DynamicRaggedShape] +) -> Optional[dynamic_ragged_shape.DynamicRaggedShape]: + if a is None: + return b + if b is None: + return a + return a._merge_with(b) # pylint: disable=protected-access + + +def _shape_from_fields( + fields, rank: int, + dtype: dtypes.DType) -> Optional[dynamic_ragged_shape.DynamicRaggedShape]: + """Given fields, rank, and dtype, create a shape.""" + + field_shape = None + for (k, field) in fields.items(): + try: + next_field_shape_raw = _dynamic_ragged_shape_from_tensor( + field, dtype=dtype) + next_field_shape = next_field_shape_raw[:rank] + field_shape = _merge_with_optional(field_shape, next_field_shape) + except Exception as err: + raise ValueError(f'Error in shape of {k}') from err + + return field_shape + + +def _field_shape_dtype(field: _FieldValue) -> Optional[dtypes.DType]: + if isinstance(field, ragged_tensor.RaggedTensor): + return field._row_partition.dtype # pylint: disable=protected-access + if isinstance(field, StructuredTensor): + return field._ragged_shape.dtype # pylint: disable=protected-access + return None + + +def _field_with_shape_dtype(field: _FieldValue, + dtype: dtypes.DType) -> _FieldValue: + if isinstance(field, ragged_tensor.RaggedTensor): + return field.with_row_splits_dtype(dtype) + if isinstance(field, StructuredTensor): + return field.with_shape_dtype(dtype) + + return field + + +def _fields_with_dtype(fields: Mapping[str, _FieldValue], + dtype: dtypes.DType) -> Mapping[str, _FieldValue]: + return {k: _field_with_shape_dtype(v, dtype) for (k, v) in fields.items()} + + +# pylint:disable=protected-access +def _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions): + """Produce a DynamicRaggedShape for StructuredTensor.""" + assert isinstance(fields, dict), fields + assert isinstance(shape, tensor_shape.TensorShape), shape + assert nrows is None or isinstance(nrows, tensor.Tensor) or isinstance( + nrows, int), nrows + assert row_partitions is None or isinstance(row_partitions, + tuple), row_partitions + rank = shape.rank + + if rank is None: + raise TypeError("StructuredTensor's shape must have known rank.") + + # TODO(martinz): figure out whether to validate. + dtype = _find_shape_dtype(fields, nrows, row_partitions) + + fields = _fields_with_dtype(fields, dtype) + + result = None + if shape.is_fully_defined(): + result = dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( + shape.as_list(), dtype=dtype) + + if rank == 0: + return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( + array_ops.zeros((0,), dtype=dtype)) + + result = _merge_with_optional(result, _shape_from_fields(fields, rank, dtype)) + if rank == 1: + alt_value = tensor_shape.dimension_value(shape[0]) + if alt_value is not None: + nrows = alt_value + if nrows is not None: + result = _merge_with_optional( + result, + dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( + [nrows], dtype=dtype)) + if result is None: + raise ValueError('Must specify `nrows`, a fully specified `shape`,' + + ' or have `fields` if `rank=1`') + + return result + + if row_partitions: + result = _merge_with_optional( + result, + dynamic_ragged_shape.DynamicRaggedShape.from_row_partitions( + row_partitions, dtype=dtype)) + + if result is None: + raise ValueError('Must specify row_partitions, a fully specified shape, ' + + 'or have fields if rank > 1') + return result + + +# TODO(martinz): Drop this method or rename. +def StructuredTensorSpec(shape, field_specs): # pylint:disable=invalid-name + """A placeholder for the old StructuredTensorSpec.""" + if not isinstance(field_specs, dict): + raise TypeError('field_specs must be a dictionary.') + for k in field_specs.keys(): + if not isinstance(k, str): + raise TypeError('field_specs must be a dictionary with string keys.') + for v in field_specs.values(): + if not isinstance(v, type_spec.TypeSpec): + raise TypeError('field_specs must be a dictionary with TypeSpec values.') + + shape = dynamic_ragged_shape.DynamicRaggedShape.Spec._from_tensor_shape( + tensor_shape.as_shape(shape), 0, dtypes.int32) + rank = shape.rank + if rank is None: + raise TypeError("StructuredTensor's shape must have known rank.") + for (k, v) in field_specs.items(): + field_shape_untruncated = _dynamic_ragged_shape_spec_from_spec(v) + if field_shape_untruncated is None: + raise ValueError(f'Cannot convert spec of {k}.') + untruncated_rank = field_shape_untruncated.rank + if (untruncated_rank is not None and untruncated_rank < rank): + raise ValueError(f'Rank of field {k} is {untruncated_rank},' + f' but must be at least {rank}.') + field_shape = field_shape_untruncated._truncate(rank) + shape = shape._merge_with(field_shape) + return StructuredTensor.Spec(_ragged_shape=shape, _fields=field_specs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_tensor_dynamic.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_tensor_dynamic.py new file mode 100644 index 0000000000000000000000000000000000000000..8aa434831e616ac6548fd0e8b5986ce4641a837e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/ops/structured/structured_tensor_dynamic.py @@ -0,0 +1,52 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dynamic shape for structured Tensors.""" + +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops.ragged import dynamic_ragged_shape +from tensorflow.python.ops.structured.structured_tensor import _find_shape_dtype + + +# pylint:disable=protected-access +def _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions): + """Produce a DynamicRaggedShape for StructuredTensor.""" + assert isinstance(fields, dict), fields + assert isinstance(shape, tensor_shape.TensorShape), shape + assert nrows is None or isinstance(nrows, tensor.Tensor), nrows + assert isinstance(row_partitions, tuple), row_partitions + + rank = shape.rank + if rank is None: + raise TypeError("StructuredTensor's shape must have known rank.") + + # TODO(martinz): figure out whether to validate. + dtype = _find_shape_dtype(fields, nrows, row_partitions) + if rank == 0: + return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( + array_ops.zeros((0,), dtype=dtype)) + + if rank == 1: + alt_value = shape[0] + if isinstance(alt_value, tensor_shape.Dimension): + alt_value = alt_value.value + if alt_value is not None: + nrows = alt_value + return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( + [nrows], dtype=dtype) + + return dynamic_ragged_shape.DynamicRaggedShape.from_row_partitions( + row_partitions, dtype=dtype)