diff --git a/llava_next/share/terminfo/v/v200-nam b/llava_next/share/terminfo/v/v200-nam new file mode 100644 index 0000000000000000000000000000000000000000..175a8d991c9656f547cccd08000cdecd68ef9162 Binary files /dev/null and b/llava_next/share/terminfo/v/v200-nam differ diff --git a/llava_next/share/terminfo/v/vapple b/llava_next/share/terminfo/v/vapple new file mode 100644 index 0000000000000000000000000000000000000000..4d99501c6f79046c696936f43821c4d442096882 Binary files /dev/null and b/llava_next/share/terminfo/v/vapple differ diff --git a/llava_next/share/terminfo/v/vc203 b/llava_next/share/terminfo/v/vc203 new file mode 100644 index 0000000000000000000000000000000000000000..915744ebc67b5212822fb6190949e449060dd024 Binary files /dev/null and b/llava_next/share/terminfo/v/vc203 differ diff --git a/llava_next/share/terminfo/v/vc403a b/llava_next/share/terminfo/v/vc403a new file mode 100644 index 0000000000000000000000000000000000000000..c41964b2150b1e036bd0268c133e7644455e28dc Binary files /dev/null and b/llava_next/share/terminfo/v/vc403a differ diff --git a/llava_next/share/terminfo/v/vi500 b/llava_next/share/terminfo/v/vi500 new file mode 100644 index 0000000000000000000000000000000000000000..4f30cec246c78a8a7c68de18bae48a7f7be0f1b9 Binary files /dev/null and b/llava_next/share/terminfo/v/vi500 differ diff --git a/llava_next/share/terminfo/v/viewdata-rv b/llava_next/share/terminfo/v/viewdata-rv new file mode 100644 index 0000000000000000000000000000000000000000..b5c10cdd83b385f4c80802b1b41004637a8ecaec Binary files /dev/null and b/llava_next/share/terminfo/v/viewdata-rv differ diff --git a/llava_next/share/terminfo/v/viewpoint3a+ b/llava_next/share/terminfo/v/viewpoint3a+ new file mode 100644 index 0000000000000000000000000000000000000000..4f1888487ab4d21aa913e841f71a5a74626f1e9a Binary files /dev/null and b/llava_next/share/terminfo/v/viewpoint3a+ differ diff --git a/llava_next/share/terminfo/v/visa50 b/llava_next/share/terminfo/v/visa50 new file mode 100644 index 0000000000000000000000000000000000000000..a6a1e76cc2f7ec17741fe4a73717104ea8d10a4a Binary files /dev/null and b/llava_next/share/terminfo/v/visa50 differ diff --git a/llava_next/share/terminfo/v/vitty b/llava_next/share/terminfo/v/vitty new file mode 100644 index 0000000000000000000000000000000000000000..56d4b55983c8d6e7b6e8c08d4ab3c7ffc05cf090 Binary files /dev/null and b/llava_next/share/terminfo/v/vitty differ diff --git a/llava_next/share/terminfo/v/vp60 b/llava_next/share/terminfo/v/vp60 new file mode 100644 index 0000000000000000000000000000000000000000..3c28adffab491960f3d4811c3467e035adbd887e Binary files /dev/null and b/llava_next/share/terminfo/v/vp60 differ diff --git a/llava_next/share/terminfo/v/vt100+4bsd b/llava_next/share/terminfo/v/vt100+4bsd new file mode 100644 index 0000000000000000000000000000000000000000..ac983ebf7f41ddc084924b2f1253bffec7b7e9b2 Binary files /dev/null and b/llava_next/share/terminfo/v/vt100+4bsd differ diff --git a/llava_next/share/terminfo/v/vt100+keypad b/llava_next/share/terminfo/v/vt100+keypad new file mode 100644 index 0000000000000000000000000000000000000000..bf8eebc4e591a265dffdd0952354e756aca8229e Binary files /dev/null and b/llava_next/share/terminfo/v/vt100+keypad differ diff --git a/llava_next/share/terminfo/v/vt100-bm-o b/llava_next/share/terminfo/v/vt100-bm-o new file mode 100644 index 0000000000000000000000000000000000000000..7911b36dc3084cf83eca1e012775260b975ee99b Binary files /dev/null and b/llava_next/share/terminfo/v/vt100-bm-o differ diff --git a/llava_next/share/terminfo/v/vt100-nam b/llava_next/share/terminfo/v/vt100-nam new file mode 100644 index 0000000000000000000000000000000000000000..6cf9e3c84ca081568f036e0c5304df55b52f9c66 Binary files /dev/null and b/llava_next/share/terminfo/v/vt100-nam differ diff --git a/llava_next/share/terminfo/v/vt100-s b/llava_next/share/terminfo/v/vt100-s new file mode 100644 index 0000000000000000000000000000000000000000..14bbdd1f2416f49e05a0309f2403a1b743189ca8 Binary files /dev/null and b/llava_next/share/terminfo/v/vt100-s differ diff --git a/llava_next/share/terminfo/v/vt100-w b/llava_next/share/terminfo/v/vt100-w new file mode 100644 index 0000000000000000000000000000000000000000..1a41c3e95b2cee5d36016f1a0f67a6cfd14de014 Binary files /dev/null and b/llava_next/share/terminfo/v/vt100-w differ diff --git a/llava_next/share/terminfo/v/vt100-w-nam b/llava_next/share/terminfo/v/vt100-w-nam new file mode 100644 index 0000000000000000000000000000000000000000..27a402710d7ad79350fbade85dcf215e37ec4a58 Binary files /dev/null and b/llava_next/share/terminfo/v/vt100-w-nam differ diff --git a/llava_next/share/terminfo/v/vt102-nsgr b/llava_next/share/terminfo/v/vt102-nsgr new file mode 100644 index 0000000000000000000000000000000000000000..212a24619da0b52717c84a3bdc1fe613ca75ca41 Binary files /dev/null and b/llava_next/share/terminfo/v/vt102-nsgr differ diff --git a/llava_next/share/terminfo/v/vt131 b/llava_next/share/terminfo/v/vt131 new file mode 100644 index 0000000000000000000000000000000000000000..0cedd381c3d472d630bba8383c580231252a0a01 Binary files /dev/null and b/llava_next/share/terminfo/v/vt131 differ diff --git a/llava_next/share/terminfo/v/vt200 b/llava_next/share/terminfo/v/vt200 new file mode 100644 index 0000000000000000000000000000000000000000..9f3e105ee9103936f4dfc405e916a2878e7674f5 Binary files /dev/null and b/llava_next/share/terminfo/v/vt200 differ diff --git a/llava_next/share/terminfo/v/vt200-8bit b/llava_next/share/terminfo/v/vt200-8bit new file mode 100644 index 0000000000000000000000000000000000000000..a78d94e64c56a676dcab33728454bafbafbc5451 Binary files /dev/null and b/llava_next/share/terminfo/v/vt200-8bit differ diff --git a/llava_next/share/terminfo/v/vt200-w b/llava_next/share/terminfo/v/vt200-w new file mode 100644 index 0000000000000000000000000000000000000000..6bf1c037a90a9cddd6b91c1d7d7869d33e735479 Binary files /dev/null and b/llava_next/share/terminfo/v/vt200-w differ diff --git a/llava_next/share/terminfo/v/vt220+pcedit b/llava_next/share/terminfo/v/vt220+pcedit new file mode 100644 index 0000000000000000000000000000000000000000..bd0d1c332309e2f683f75621b7df288080858cf2 Binary files /dev/null and b/llava_next/share/terminfo/v/vt220+pcedit differ diff --git a/llava_next/share/terminfo/v/vt220+vtedit b/llava_next/share/terminfo/v/vt220+vtedit new file mode 100644 index 0000000000000000000000000000000000000000..047924c0386b54fadcc5d2192761e2c9d0e2ea23 Binary files /dev/null and b/llava_next/share/terminfo/v/vt220+vtedit differ diff --git a/llava_next/share/terminfo/v/vt220-w b/llava_next/share/terminfo/v/vt220-w new file mode 100644 index 0000000000000000000000000000000000000000..6bf1c037a90a9cddd6b91c1d7d7869d33e735479 Binary files /dev/null and b/llava_next/share/terminfo/v/vt220-w differ diff --git a/llava_next/share/terminfo/v/vt300-w-nam b/llava_next/share/terminfo/v/vt300-w-nam new file mode 100644 index 0000000000000000000000000000000000000000..f456469349cb5599287adcb8cd845cf13aed9e39 Binary files /dev/null and b/llava_next/share/terminfo/v/vt300-w-nam differ diff --git a/llava_next/share/terminfo/v/vt320-w-nam b/llava_next/share/terminfo/v/vt320-w-nam new file mode 100644 index 0000000000000000000000000000000000000000..f456469349cb5599287adcb8cd845cf13aed9e39 Binary files /dev/null and b/llava_next/share/terminfo/v/vt320-w-nam differ diff --git a/llava_next/share/terminfo/v/vt420+lrmm b/llava_next/share/terminfo/v/vt420+lrmm new file mode 100644 index 0000000000000000000000000000000000000000..3ed9d34824132093313b1e0bc9371e90c26a9936 Binary files /dev/null and b/llava_next/share/terminfo/v/vt420+lrmm differ diff --git a/llava_next/share/terminfo/v/vt420pc b/llava_next/share/terminfo/v/vt420pc new file mode 100644 index 0000000000000000000000000000000000000000..d145e4cc61a43602e8be4ae42c55e59204c52fd5 Binary files /dev/null and b/llava_next/share/terminfo/v/vt420pc differ diff --git a/llava_next/share/terminfo/v/vt510 b/llava_next/share/terminfo/v/vt510 new file mode 100644 index 0000000000000000000000000000000000000000..847ee69d32834ba006ac7e34f37064266baed23b Binary files /dev/null and b/llava_next/share/terminfo/v/vt510 differ diff --git a/llava_next/share/terminfo/v/vt52 b/llava_next/share/terminfo/v/vt52 new file mode 100644 index 0000000000000000000000000000000000000000..83f379727c535e52b874719ec425f84f7ae09c96 Binary files /dev/null and b/llava_next/share/terminfo/v/vt52 differ diff --git a/llava_next/share/terminfo/v/vt61 b/llava_next/share/terminfo/v/vt61 new file mode 100644 index 0000000000000000000000000000000000000000..820ce7d3ffdc727406009e2a61434d20984b53d1 Binary files /dev/null and b/llava_next/share/terminfo/v/vt61 differ diff --git a/llava_next/share/terminfo/v/vte b/llava_next/share/terminfo/v/vte new file mode 100644 index 0000000000000000000000000000000000000000..8110fc478c7f023760b93f81710ef54971aa4dd7 Binary files /dev/null and b/llava_next/share/terminfo/v/vte differ diff --git a/llava_next/share/terminfo/v/vte-2018 b/llava_next/share/terminfo/v/vte-2018 new file mode 100644 index 0000000000000000000000000000000000000000..687b3d613c1e17267bee749572812d9a8ffe45f0 Binary files /dev/null and b/llava_next/share/terminfo/v/vte-2018 differ diff --git a/llava_next/share/terminfo/v/vte-direct b/llava_next/share/terminfo/v/vte-direct new file mode 100644 index 0000000000000000000000000000000000000000..862188d2b65807b4fa01ac8dc2168cc3159ade2a Binary files /dev/null and b/llava_next/share/terminfo/v/vte-direct differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..057a4cc30cc582db9dbac5aa6dcdae53ad74aab4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg.py @@ -0,0 +1,49 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Public API for tf.linalg namespace.""" + +# go/tf-wildcard-import +# pylint: disable=wildcard-import,unused-import +from tensorflow.python.ops.linalg.linalg_impl import * +from tensorflow.python.ops.linalg.linear_operator import * +from tensorflow.python.ops.linalg.linear_operator_adjoint import * +from tensorflow.python.ops.linalg.linear_operator_block_diag import * +from tensorflow.python.ops.linalg.linear_operator_block_lower_triangular import * +from tensorflow.python.ops.linalg.linear_operator_circulant import * +from tensorflow.python.ops.linalg.linear_operator_composition import * +from tensorflow.python.ops.linalg.linear_operator_diag import * +from tensorflow.python.ops.linalg.linear_operator_full_matrix import * +from tensorflow.python.ops.linalg.linear_operator_householder import * +from tensorflow.python.ops.linalg.linear_operator_identity import * +from tensorflow.python.ops.linalg.linear_operator_inversion import * +from tensorflow.python.ops.linalg.linear_operator_kronecker import * +from tensorflow.python.ops.linalg.linear_operator_low_rank_update import * +from tensorflow.python.ops.linalg.linear_operator_lower_triangular import * +from tensorflow.python.ops.linalg.linear_operator_permutation import * +from tensorflow.python.ops.linalg.linear_operator_toeplitz import * +from tensorflow.python.ops.linalg.linear_operator_tridiag import * +from tensorflow.python.ops.linalg.linear_operator_zeros import * +# pylint: enable=wildcard-import + +# Seal API. +# pylint: disable=undefined-variable +del ops +del array_ops +del gen_linalg_ops +del linalg_ops +del math_ops +del special_math_ops +del tf_export +# pylint: enable=undefined-variable diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg_impl.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..66f0a5ac12a12f08b9b0bcef6c2f4afc3456afbe --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linalg_impl.py @@ -0,0 +1,1588 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operations for linear algebra.""" + +import numpy as np + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_linalg_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import map_fn +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops import stateless_random_ops +from tensorflow.python.ops import while_loop +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + +# Linear algebra ops. +band_part = array_ops.matrix_band_part +cholesky = linalg_ops.cholesky +cholesky_solve = linalg_ops.cholesky_solve +det = linalg_ops.matrix_determinant +slogdet = gen_linalg_ops.log_matrix_determinant +tf_export('linalg.slogdet')(dispatch.add_dispatch_support(slogdet)) +diag = array_ops.matrix_diag +diag_part = array_ops.matrix_diag_part +eigh = linalg_ops.self_adjoint_eig +eigvalsh = linalg_ops.self_adjoint_eigvals +einsum = special_math_ops.einsum +eye = linalg_ops.eye +inv = linalg_ops.matrix_inverse +logm = gen_linalg_ops.matrix_logarithm +lu = gen_linalg_ops.lu +tf_export('linalg.logm')(dispatch.add_dispatch_support(logm)) +lstsq = linalg_ops.matrix_solve_ls +norm = linalg_ops.norm +qr = linalg_ops.qr +set_diag = array_ops.matrix_set_diag +solve = linalg_ops.matrix_solve +sqrtm = linalg_ops.matrix_square_root +svd = linalg_ops.svd +tensordot = math_ops.tensordot +trace = math_ops.trace +transpose = array_ops.matrix_transpose +triangular_solve = linalg_ops.matrix_triangular_solve + + +@tf_export('linalg.logdet') +@dispatch.add_dispatch_support +def logdet(matrix, name=None): + """Computes log of the determinant of a hermitian positive definite matrix. + + ```python + # Compute the determinant of a matrix while reducing the chance of over- or + underflow: + A = ... # shape 10 x 10 + det = tf.exp(tf.linalg.logdet(A)) # scalar + ``` + + Args: + matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, + or `complex128` with shape `[..., M, M]`. + name: A name to give this `Op`. Defaults to `logdet`. + + Returns: + The natural log of the determinant of `matrix`. + + @compatibility(numpy) + Equivalent to numpy.linalg.slogdet, although no sign is returned since only + hermitian positive definite matrices are supported. + @end_compatibility + """ + # This uses the property that the log det(A) = 2*sum(log(real(diag(C)))) + # where C is the cholesky decomposition of A. + with ops.name_scope(name, 'logdet', [matrix]): + chol = gen_linalg_ops.cholesky(matrix) + return 2.0 * math_ops.reduce_sum( + math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))), + axis=[-1]) + + +@tf_export('linalg.adjoint') +@dispatch.add_dispatch_support +def adjoint(matrix, name=None): + """Transposes the last two dimensions of and conjugates tensor `matrix`. + + For example: + + ```python + x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], + [4 + 4j, 5 + 5j, 6 + 6j]]) + tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j], + # [2 - 2j, 5 - 5j], + # [3 - 3j, 6 - 6j]] + ``` + + Args: + matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, + or `complex128` with shape `[..., M, M]`. + name: A name to give this `Op` (optional). + + Returns: + The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of + matrix. + """ + with ops.name_scope(name, 'adjoint', [matrix]): + matrix = ops.convert_to_tensor(matrix, name='matrix') + return array_ops.matrix_transpose(matrix, conjugate=True) + + +# This section is ported nearly verbatim from Eigen's implementation: +# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html +def _matrix_exp_pade3(matrix): + """3rd-order Pade approximant for matrix exponential.""" + b = [120.0, 60.0, 12.0] + b = [constant_op.constant(x, matrix.dtype) for x in b] + ident = linalg_ops.eye( + array_ops.shape(matrix)[-2], + batch_shape=array_ops.shape(matrix)[:-2], + dtype=matrix.dtype) + matrix_2 = math_ops.matmul(matrix, matrix) + tmp = matrix_2 + b[1] * ident + matrix_u = math_ops.matmul(matrix, tmp) + matrix_v = b[2] * matrix_2 + b[0] * ident + return matrix_u, matrix_v + + +def _matrix_exp_pade5(matrix): + """5th-order Pade approximant for matrix exponential.""" + b = [30240.0, 15120.0, 3360.0, 420.0, 30.0] + b = [constant_op.constant(x, matrix.dtype) for x in b] + ident = linalg_ops.eye( + array_ops.shape(matrix)[-2], + batch_shape=array_ops.shape(matrix)[:-2], + dtype=matrix.dtype) + matrix_2 = math_ops.matmul(matrix, matrix) + matrix_4 = math_ops.matmul(matrix_2, matrix_2) + tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident + matrix_u = math_ops.matmul(matrix, tmp) + matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident + return matrix_u, matrix_v + + +def _matrix_exp_pade7(matrix): + """7th-order Pade approximant for matrix exponential.""" + b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0] + b = [constant_op.constant(x, matrix.dtype) for x in b] + ident = linalg_ops.eye( + array_ops.shape(matrix)[-2], + batch_shape=array_ops.shape(matrix)[:-2], + dtype=matrix.dtype) + matrix_2 = math_ops.matmul(matrix, matrix) + matrix_4 = math_ops.matmul(matrix_2, matrix_2) + matrix_6 = math_ops.matmul(matrix_4, matrix_2) + tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident + matrix_u = math_ops.matmul(matrix, tmp) + matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident + return matrix_u, matrix_v + + +def _matrix_exp_pade9(matrix): + """9th-order Pade approximant for matrix exponential.""" + b = [ + 17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0, + 2162160.0, 110880.0, 3960.0, 90.0 + ] + b = [constant_op.constant(x, matrix.dtype) for x in b] + ident = linalg_ops.eye( + array_ops.shape(matrix)[-2], + batch_shape=array_ops.shape(matrix)[:-2], + dtype=matrix.dtype) + matrix_2 = math_ops.matmul(matrix, matrix) + matrix_4 = math_ops.matmul(matrix_2, matrix_2) + matrix_6 = math_ops.matmul(matrix_4, matrix_2) + matrix_8 = math_ops.matmul(matrix_6, matrix_2) + tmp = ( + matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + + b[1] * ident) + matrix_u = math_ops.matmul(matrix, tmp) + matrix_v = ( + b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + + b[0] * ident) + return matrix_u, matrix_v + + +def _matrix_exp_pade13(matrix): + """13th-order Pade approximant for matrix exponential.""" + b = [ + 64764752532480000.0, 32382376266240000.0, 7771770303897600.0, + 1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0, + 33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0 + ] + b = [constant_op.constant(x, matrix.dtype) for x in b] + ident = linalg_ops.eye( + array_ops.shape(matrix)[-2], + batch_shape=array_ops.shape(matrix)[:-2], + dtype=matrix.dtype) + matrix_2 = math_ops.matmul(matrix, matrix) + matrix_4 = math_ops.matmul(matrix_2, matrix_2) + matrix_6 = math_ops.matmul(matrix_4, matrix_2) + tmp_u = ( + math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) + + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident) + matrix_u = math_ops.matmul(matrix, tmp_u) + tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2 + matrix_v = ( + math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 + + b[2] * matrix_2 + b[0] * ident) + return matrix_u, matrix_v + + +@tf_export('linalg.expm') +@dispatch.add_dispatch_support +def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin + r"""Computes the matrix exponential of one or more square matrices. + + $$exp(A) = \sum_{n=0}^\infty A^n/n!$$ + + The exponential is computed using a combination of the scaling and squaring + method and the Pade approximation. Details can be found in: + Nicholas J. Higham, "The scaling and squaring method for the matrix + exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005. + + The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + form square matrices. The output is a tensor of the same shape as the input + containing the exponential for all input submatrices `[..., :, :]`. + + Args: + input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or + `complex128` with shape `[..., M, M]`. + name: A name to give this `Op` (optional). + + Returns: + the matrix exponential of the input. + + Raises: + ValueError: An unsupported type is provided as input. + + @compatibility(scipy) + Equivalent to scipy.linalg.expm + @end_compatibility + """ + with ops.name_scope(name, 'matrix_exponential', [input]): + matrix = ops.convert_to_tensor(input, name='input') + if matrix.shape[-2:] == [0, 0]: + return matrix + batch_shape = matrix.shape[:-2] + if not batch_shape.is_fully_defined(): + batch_shape = array_ops.shape(matrix)[:-2] + + # reshaping the batch makes the where statements work better + matrix = array_ops.reshape( + matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0)) + l1_norm = math_ops.reduce_max( + math_ops.reduce_sum( + math_ops.abs(matrix), + axis=array_ops.size(array_ops.shape(matrix)) - 2), + axis=-1)[..., array_ops.newaxis, array_ops.newaxis] + + const = lambda x: constant_op.constant(x, l1_norm.dtype) + + def _nest_where(vals, cases): + assert len(vals) == len(cases) - 1 + if len(vals) == 1: + return array_ops.where_v2( + math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1]) + else: + return array_ops.where_v2( + math_ops.less(l1_norm, const(vals[0])), cases[0], + _nest_where(vals[1:], cases[1:])) + + if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]: + maxnorm = const(3.925724783138660) + squarings = math_ops.maximum( + math_ops.floor( + math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0) + u3, v3 = _matrix_exp_pade3(matrix) + u5, v5 = _matrix_exp_pade5(matrix) + u7, v7 = _matrix_exp_pade7( + matrix / + math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype)) + conds = (4.258730016922831e-001, 1.880152677804762e+000) + u = _nest_where(conds, (u3, u5, u7)) + v = _nest_where(conds, (v3, v5, v7)) + elif matrix.dtype in [dtypes.float64, dtypes.complex128]: + maxnorm = const(5.371920351148152) + squarings = math_ops.maximum( + math_ops.floor( + math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0) + u3, v3 = _matrix_exp_pade3(matrix) + u5, v5 = _matrix_exp_pade5(matrix) + u7, v7 = _matrix_exp_pade7(matrix) + u9, v9 = _matrix_exp_pade9(matrix) + u13, v13 = _matrix_exp_pade13( + matrix / + math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype)) + conds = (1.495585217958292e-002, 2.539398330063230e-001, + 9.504178996162932e-001, 2.097847961257068e+000) + u = _nest_where(conds, (u3, u5, u7, u9, u13)) + v = _nest_where(conds, (v3, v5, v7, v9, v13)) + else: + raise ValueError('tf.linalg.expm does not support matrices of type %s' % + matrix.dtype) + + is_finite = math_ops.is_finite(math_ops.reduce_max(l1_norm)) + nan = constant_op.constant(np.nan, matrix.dtype) + result = tf_cond.cond( + is_finite, lambda: linalg_ops.matrix_solve(-u + v, u + v), + lambda: array_ops.fill(array_ops.shape(matrix), nan)) + max_squarings = math_ops.reduce_max(squarings) + i = const(0.0) + + def c(i, _): + return tf_cond.cond(is_finite, + lambda: math_ops.less(i, max_squarings), + lambda: constant_op.constant(False)) + + def b(i, r): + return i + 1, array_ops.where_v2( + math_ops.less(i, squarings), math_ops.matmul(r, r), r) + + _, result = while_loop.while_loop(c, b, [i, result]) + if not matrix.shape.is_fully_defined(): + return array_ops.reshape( + result, + array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0)) + return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:])) + + +@tf_export('linalg.banded_triangular_solve', v1=[]) +def banded_triangular_solve( + bands, + rhs, + lower=True, + adjoint=False, # pylint: disable=redefined-outer-name + name=None): + r"""Solve triangular systems of equations with a banded solver. + + `bands` is a tensor of shape `[..., K, M]`, where `K` represents the number + of bands stored. This corresponds to a batch of `M` by `M` matrices, whose + `K` subdiagonals (when `lower` is `True`) are stored. + + This operator broadcasts the batch dimensions of `bands` and the batch + dimensions of `rhs`. + + + Examples: + + Storing 2 bands of a 3x3 matrix. + Note that first element in the second row is ignored due to + the 'LEFT_RIGHT' padding. + + >>> x = [[2., 3., 4.], [1., 2., 3.]] + >>> x2 = [[2., 3., 4.], [10000., 2., 3.]] + >>> y = tf.zeros([3, 3]) + >>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0)) + >>> z + + >>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1])) + >>> soln + + >>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1])) + >>> tf.reduce_all(are_equal).numpy() + True + >>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1])) + >>> tf.reduce_all(are_equal).numpy() + True + + Storing 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding + the last element of the first row is ignored. + + >>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]] + >>> y = tf.zeros([4, 4]) + >>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1)) + >>> z + + >>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False) + >>> soln + + >>> are_equal = (soln == tf.linalg.triangular_solve( + ... z, tf.ones([4, 1]), lower=False)) + >>> tf.reduce_all(are_equal).numpy() + True + + + Args: + bands: A `Tensor` describing the bands of the left hand side, with shape + `[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th + diagonal (the diagonal is the top row) when `lower` is `True` and + otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is + the bottom row) when `lower` is `False`. The bands are stored with + 'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right + and subdiagonals are padded on the left. This is the alignment cuSPARSE + uses. See `tf.linalg.set_diag` for more details. + rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as + `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known + statically, `rhs` will be treated as a matrix rather than a vector. + lower: An optional `bool`. Defaults to `True`. Boolean indicating whether + `bands` represents a lower or upper triangular matrix. + adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether + to solve with the matrix's block-wise adjoint. + name: A name to give this `Op` (optional). + + Returns: + A `Tensor` of shape [..., M] or [..., M, N] containing the solutions. + """ + with ops.name_scope(name, 'banded_triangular_solve', [bands, rhs]): + return gen_linalg_ops.banded_triangular_solve( + bands, rhs, lower=lower, adjoint=adjoint) + + +@tf_export('linalg.tridiagonal_solve') +@dispatch.add_dispatch_support +def tridiagonal_solve(diagonals, + rhs, + diagonals_format='compact', + transpose_rhs=False, + conjugate_rhs=False, + name=None, + partial_pivoting=True, + perturb_singular=False): + r"""Solves tridiagonal systems of equations. + + The input can be supplied in various formats: `matrix`, `sequence` and + `compact`, specified by the `diagonals_format` arg. + + In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with + two inner-most dimensions representing the square tridiagonal matrices. + Elements outside of the three diagonals will be ignored. + + In `sequence` format, `diagonals` are supplied as a tuple or list of three + tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing + superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either + `M-1` or `M`; in the latter case, the last element of superdiagonal and the + first element of subdiagonal will be ignored. + + In `compact` format the three diagonals are brought together into one tensor + of shape `[..., 3, M]`, with last two dimensions containing superdiagonals, + diagonals, and subdiagonals, in order. Similarly to `sequence` format, + elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored. + + The `compact` format is recommended as the one with best performance. In case + you need to cast a tensor into a compact format manually, use `tf.gather_nd`. + An example for a tensor of shape [m, m]: + + ```python + rhs = tf.constant([...]) + matrix = tf.constant([[...]]) + m = matrix.shape[0] + dummy_idx = [0, 0] # An arbitrary element to use as a dummy + indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal + [[i, i] for i in range(m)], # Diagonal + [dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal + diagonals=tf.gather_nd(matrix, indices) + x = tf.linalg.tridiagonal_solve(diagonals, rhs) + ``` + + Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or + `[..., M, K]`. The latter allows to simultaneously solve K systems with the + same left-hand sides and K different right-hand sides. If `transpose_rhs` + is set to `True` the expected shape is `[..., M]` or `[..., K, M]`. + + The batch dimensions, denoted as `...`, must be the same in `diagonals` and + `rhs`. + + The output is a tensor of the same shape as `rhs`: either `[..., M]` or + `[..., M, K]`. + + The op isn't guaranteed to raise an error if the input matrix is not + invertible. `tf.debugging.check_numerics` can be applied to the output to + detect invertibility problems. + + **Note**: with large batch sizes, the computation on the GPU may be slow, if + either `partial_pivoting=True` or there are multiple right-hand sides + (`K > 1`). If this issue arises, consider if it's possible to disable pivoting + and have `K = 1`, or, alternatively, consider using CPU. + + On CPU, solution is computed via Gaussian elimination with or without partial + pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE + library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv + + Args: + diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The + shape depends of `diagonals_format`, see description above. Must be + `float32`, `float64`, `complex64`, or `complex128`. + rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as + `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known + statically, `rhs` will be treated as a matrix rather than a vector. + diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is + `compact`. + transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect + if the shape of rhs is [..., M]). + conjugate_rhs: If `True`, `rhs` is conjugated before solving. + name: A name to give this `Op` (optional). + partial_pivoting: whether to perform partial pivoting. `True` by default. + Partial pivoting makes the procedure more stable, but slower. Partial + pivoting is unnecessary in some cases, including diagonally dominant and + symmetric positive definite matrices (see e.g. theorem 9.12 in [1]). + perturb_singular: whether to perturb singular matrices to return a finite + result. `False` by default. If true, solutions to systems involving + a singular matrix will be computed by perturbing near-zero pivots in + the partially pivoted LU decomposition. Specifically, tiny pivots are + perturbed by an amount of order `eps * max_{ij} |U(i,j)|` to avoid + overflow. Here `U` is the upper triangular part of the LU decomposition, + and `eps` is the machine precision. This is useful for solving + numerically singular systems when computing eigenvectors by inverse + iteration. + If `partial_pivoting` is `False`, `perturb_singular` must be `False` as + well. + + Returns: + A `Tensor` of shape [..., M] or [..., M, K] containing the solutions. + If the input matrix is singular, the result is undefined. + + Raises: + ValueError: Is raised if any of the following conditions hold: + 1. An unsupported type is provided as input, + 2. the input tensors have incorrect shapes, + 3. `perturb_singular` is `True` but `partial_pivoting` is not. + UnimplementedError: Whenever `partial_pivoting` is true and the backend is + XLA, or whenever `perturb_singular` is true and the backend is + XLA or GPU. + + [1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms: + Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7. + + """ + if perturb_singular and not partial_pivoting: + raise ValueError('partial_pivoting must be True if perturb_singular is.') + + if diagonals_format == 'compact': + return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, + conjugate_rhs, partial_pivoting, + perturb_singular, name) + + if diagonals_format == 'sequence': + if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3: + raise ValueError('Expected diagonals to be a sequence of length 3.') + + superdiag, maindiag, subdiag = diagonals + if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or + not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])): + raise ValueError( + 'Tensors representing the three diagonals must have the same shape,' + 'except for the last dimension, got {}, {}, {}'.format( + subdiag.shape, maindiag.shape, superdiag.shape)) + + m = tensor_shape.dimension_value(maindiag.shape[-1]) + + def pad_if_necessary(t, name, last_dim_padding): + n = tensor_shape.dimension_value(t.shape[-1]) + if not n or n == m: + return t + if n == m - 1: + paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] + + [last_dim_padding]) + return array_ops.pad(t, paddings) + raise ValueError('Expected {} to be have length {} or {}, got {}.'.format( + name, m, m - 1, n)) + + subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0]) + superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1]) + + diagonals = array_ops_stack.stack((superdiag, maindiag, subdiag), axis=-2) + return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, + conjugate_rhs, partial_pivoting, + perturb_singular, name) + + if diagonals_format == 'matrix': + m1 = tensor_shape.dimension_value(diagonals.shape[-1]) + m2 = tensor_shape.dimension_value(diagonals.shape[-2]) + if m1 and m2 and m1 != m2: + raise ValueError( + 'Expected last two dimensions of diagonals to be same, got {} and {}' + .format(m1, m2)) + m = m1 or m2 + diagonals = array_ops.matrix_diag_part( + diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT') + return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, + conjugate_rhs, partial_pivoting, + perturb_singular, name) + + raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format)) + + +def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, + conjugate_rhs, partial_pivoting, + perturb_singular, name): + """Helper function used after the input has been cast to compact form.""" + diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank + + # If we know the rank of the diagonal tensor, do some static checking. + if diags_rank: + if diags_rank < 2: + raise ValueError( + 'Expected diagonals to have rank at least 2, got {}'.format( + diags_rank)) + if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1: + raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format( + diags_rank - 1, diags_rank, rhs_rank)) + if (rhs_rank and not diagonals.shape[:-2].is_compatible_with( + rhs.shape[:diags_rank - 2])): + raise ValueError('Batch shapes {} and {} are incompatible'.format( + diagonals.shape[:-2], rhs.shape[:diags_rank - 2])) + + if diagonals.shape[-2] and diagonals.shape[-2] != 3: + raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2])) + + def check_num_lhs_matches_num_rhs(): + if (diagonals.shape[-1] and rhs.shape[-2] and + diagonals.shape[-1] != rhs.shape[-2]): + raise ValueError('Expected number of left-hand sided and right-hand ' + 'sides to be equal, got {} and {}'.format( + diagonals.shape[-1], rhs.shape[-2])) + + if rhs_rank and diags_rank and rhs_rank == diags_rank - 1: + # Rhs provided as a vector, ignoring transpose_rhs + if conjugate_rhs: + rhs = math_ops.conj(rhs) + rhs = array_ops.expand_dims(rhs, -1) + check_num_lhs_matches_num_rhs() + return array_ops.squeeze( + linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, + perturb_singular, name), -1) + + if transpose_rhs: + rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs) + elif conjugate_rhs: + rhs = math_ops.conj(rhs) + + check_num_lhs_matches_num_rhs() + return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, + perturb_singular, name) + + +@tf_export('linalg.tridiagonal_matmul') +@dispatch.add_dispatch_support +def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None): + r"""Multiplies tridiagonal matrix by matrix. + + `diagonals` is representation of 3-diagonal NxN matrix, which depends on + `diagonals_format`. + + In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with + two inner-most dimensions representing the square tridiagonal matrices. + Elements outside of the three diagonals will be ignored. + + If `sequence` format, `diagonals` is list or tuple of three tensors: + `[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element + of `superdiag` first element of `subdiag` are ignored. + + In `compact` format the three diagonals are brought together into one tensor + of shape `[..., 3, M]`, with last two dimensions containing superdiagonals, + diagonals, and subdiagonals, in order. Similarly to `sequence` format, + elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored. + + The `sequence` format is recommended as the one with the best performance. + + `rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`. + + Example: + + ```python + superdiag = tf.constant([-1, -1, 0], dtype=tf.float64) + maindiag = tf.constant([2, 2, 2], dtype=tf.float64) + subdiag = tf.constant([0, -1, -1], dtype=tf.float64) + diagonals = [superdiag, maindiag, subdiag] + rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64) + x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence') + ``` + + Args: + diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The + shape depends of `diagonals_format`, see description above. Must be + `float32`, `float64`, `complex64`, or `complex128`. + rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`. + diagonals_format: one of `sequence`, or `compact`. Default is `compact`. + name: A name to give this `Op` (optional). + + Returns: + A `Tensor` of shape [..., M, N] containing the result of multiplication. + + Raises: + ValueError: An unsupported type is provided as input, or when the input + tensors have incorrect shapes. + """ + if diagonals_format == 'compact': + superdiag = diagonals[..., 0, :] + maindiag = diagonals[..., 1, :] + subdiag = diagonals[..., 2, :] + elif diagonals_format == 'sequence': + superdiag, maindiag, subdiag = diagonals + elif diagonals_format == 'matrix': + m1 = tensor_shape.dimension_value(diagonals.shape[-1]) + m2 = tensor_shape.dimension_value(diagonals.shape[-2]) + if m1 and m2 and m1 != m2: + raise ValueError( + 'Expected last two dimensions of diagonals to be same, got {} and {}' + .format(m1, m2)) + diags = array_ops.matrix_diag_part( + diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT') + superdiag = diags[..., 0, :] + maindiag = diags[..., 1, :] + subdiag = diags[..., 2, :] + else: + raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format) + + # C++ backend requires matrices. + # Converting 1-dimensional vectors to matrices with 1 row. + superdiag = array_ops.expand_dims(superdiag, -2) + maindiag = array_ops.expand_dims(maindiag, -2) + subdiag = array_ops.expand_dims(subdiag, -2) + + return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name) + + +def _maybe_validate_matrix(a, validate_args): + """Checks that input is a `float` matrix.""" + assertions = [] + if not a.dtype.is_floating: + raise TypeError('Input `a` must have `float`-like `dtype` ' + '(saw {}).'.format(a.dtype.name)) + if a.shape is not None and a.shape.rank is not None: + if a.shape.rank < 2: + raise ValueError('Input `a` must have at least 2 dimensions ' + '(saw: {}).'.format(a.shape.rank)) + elif validate_args: + assertions.append( + check_ops.assert_rank_at_least( + a, rank=2, message='Input `a` must have at least 2 dimensions.')) + return assertions + + +@tf_export('linalg.matrix_rank') +@dispatch.add_dispatch_support +def matrix_rank(a, tol=None, validate_args=False, name=None): + """Compute the matrix rank of one or more matrices. + + Args: + a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be + pseudo-inverted. + tol: Threshold below which the singular value is counted as 'zero'. + Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). + validate_args: When `True`, additional assertions might be embedded in the + graph. + Default value: `False` (i.e., no graph assertions are added). + name: Python `str` prefixed to ops created by this function. + Default value: 'matrix_rank'. + + Returns: + matrix_rank: (Batch of) `int32` scalars representing the number of non-zero + singular values. + """ + with ops.name_scope(name or 'matrix_rank'): + a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a') + assertions = _maybe_validate_matrix(a, validate_args) + if assertions: + with ops.control_dependencies(assertions): + a = array_ops.identity(a) + s = svd(a, compute_uv=False) + if tol is None: + if (a.shape[-2:]).is_fully_defined(): + m = np.max(a.shape[-2:].as_list()) + else: + m = math_ops.reduce_max(array_ops.shape(a)[-2:]) + eps = np.finfo(a.dtype.as_numpy_dtype).eps + tol = ( + eps * math_ops.cast(m, a.dtype) * + math_ops.reduce_max(s, axis=-1, keepdims=True)) + return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1) + + +@tf_export('linalg.pinv') +@dispatch.add_dispatch_support +def pinv(a, rcond=None, validate_args=False, name=None): + """Compute the Moore-Penrose pseudo-inverse of one or more matrices. + + Calculate the [generalized inverse of a matrix]( + https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its + singular-value decomposition (SVD) and including all large singular values. + + The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves' + [the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then + `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if + `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then + `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1] + + This function is analogous to [`numpy.linalg.pinv`]( + https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html). + It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the + default `rcond` is `1e-15`. Here the default is + `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`. + + Args: + a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be + pseudo-inverted. + rcond: `Tensor` of small singular value cutoffs. Singular values smaller + (in modulus) than `rcond` * largest_singular_value (again, in modulus) are + set to zero. Must broadcast against `tf.shape(a)[:-2]`. + Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`. + validate_args: When `True`, additional assertions might be embedded in the + graph. + Default value: `False` (i.e., no graph assertions are added). + name: Python `str` prefixed to ops created by this function. + Default value: 'pinv'. + + Returns: + a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except + rightmost two dimensions are transposed. + + Raises: + TypeError: if input `a` does not have `float`-like `dtype`. + ValueError: if input `a` has fewer than 2 dimensions. + + #### Examples + + ```python + import tensorflow as tf + import tensorflow_probability as tfp + + a = tf.constant([[1., 0.4, 0.5], + [0.4, 0.2, 0.25], + [0.5, 0.25, 0.35]]) + tf.matmul(tf.linalg.pinv(a), a) + # ==> array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], dtype=float32) + + a = tf.constant([[1., 0.4, 0.5, 1.], + [0.4, 0.2, 0.25, 2.], + [0.5, 0.25, 0.35, 3.]]) + tf.matmul(tf.linalg.pinv(a), a) + # ==> array([[ 0.76, 0.37, 0.21, -0.02], + [ 0.37, 0.43, -0.33, 0.02], + [ 0.21, -0.33, 0.81, 0.01], + [-0.02, 0.02, 0.01, 1. ]], dtype=float32) + ``` + + #### References + + [1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press, + Inc., 1980, pp. 139-142. + """ + with ops.name_scope(name or 'pinv'): + a = ops.convert_to_tensor(a, name='a') + + assertions = _maybe_validate_matrix(a, validate_args) + if assertions: + with ops.control_dependencies(assertions): + a = array_ops.identity(a) + + dtype = a.dtype.as_numpy_dtype + + if rcond is None: + + def get_dim_size(dim): + dim_val = tensor_shape.dimension_value(a.shape[dim]) + if dim_val is not None: + return dim_val + return array_ops.shape(a)[dim] + + num_rows = get_dim_size(-2) + num_cols = get_dim_size(-1) + if isinstance(num_rows, int) and isinstance(num_cols, int): + max_rows_cols = float(max(num_rows, num_cols)) + else: + max_rows_cols = math_ops.cast( + math_ops.maximum(num_rows, num_cols), dtype) + rcond = 10. * max_rows_cols * np.finfo(dtype).eps + + rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond') + + # Calculate pseudo inverse via SVD. + # Note: if a is Hermitian then u == v. (We might observe additional + # performance by explicitly setting `v = u` in such cases.) + [ + singular_values, # Sigma + left_singular_vectors, # U + right_singular_vectors, # V + ] = svd( + a, full_matrices=False, compute_uv=True) + + # Saturate small singular values to inf. This has the effect of make + # `1. / s = 0.` while not resulting in `NaN` gradients. + cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1) + singular_values = array_ops.where_v2( + singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values, + np.array(np.inf, dtype)) + + # By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse + # is defined as `pinv(a) == v @ inv(s) @ u^H`. + a_pinv = math_ops.matmul( + right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2), + left_singular_vectors, + adjoint_b=True) + + if a.shape is not None and a.shape.rank is not None: + a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]])) + + return a_pinv + + +@tf_export('linalg.lu_solve') +@dispatch.add_dispatch_support +def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None): + """Solves systems of linear eqns `A X = RHS`, given LU factorizations. + + Note: this function does not verify the implied matrix is actually invertible + nor is this condition checked even when `validate_args=True`. + + Args: + lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, + matmul(L, U)) = X` then `lower_upper = L + U - eye`. + perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = + X` then `perm = argmax(P)`. + rhs: Matrix-shaped float `Tensor` representing targets for which to solve; + `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., + tf.newaxis])[..., 0]`. + validate_args: Python `bool` indicating whether arguments should be checked + for correctness. Note: this function does not verify the implied matrix is + actually invertible, even when `validate_args=True`. + Default value: `False` (i.e., don't validate arguments). + name: Python `str` name given to ops managed by this object. + Default value: `None` (i.e., 'lu_solve'). + + Returns: + x: The `X` in `A @ X = RHS`. + + #### Examples + + ```python + import numpy as np + import tensorflow as tf + import tensorflow_probability as tfp + + x = [[[1., 2], + [3, 4]], + [[7, 8], + [3, 4]]] + inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) + tf.assert_near(tf.matrix_inverse(x), inv_x) + # ==> True + ``` + + """ + + with ops.name_scope(name or 'lu_solve'): + lower_upper = ops.convert_to_tensor( + lower_upper, dtype_hint=dtypes.float32, name='lower_upper') + perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm') + rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs') + + assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args) + if assertions: + with ops.control_dependencies(assertions): + lower_upper = array_ops.identity(lower_upper) + perm = array_ops.identity(perm) + rhs = array_ops.identity(rhs) + + if (rhs.shape.rank == 2 and perm.shape.rank == 1): + # Both rhs and perm have scalar batch_shape. + permuted_rhs = array_ops.gather(rhs, perm, axis=-2) + else: + # Either rhs or perm have non-scalar batch_shape or we can't determine + # this information statically. + rhs_shape = array_ops.shape(rhs) + broadcast_batch_shape = array_ops.broadcast_dynamic_shape( + rhs_shape[:-2], + array_ops.shape(perm)[:-1]) + d, m = rhs_shape[-2], rhs_shape[-1] + rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]], + axis=0) + + # Tile out rhs. + broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape) + broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m]) + + # Tile out perm and add batch indices. + broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1]) + broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d]) + broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape) + broadcast_batch_indices = array_ops.broadcast_to( + math_ops.range(broadcast_batch_size)[:, array_ops.newaxis], + [broadcast_batch_size, d]) + broadcast_perm = array_ops_stack.stack( + [broadcast_batch_indices, broadcast_perm], axis=-1) + + permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm) + permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape) + + lower = set_diag( + band_part(lower_upper, num_lower=-1, num_upper=0), + array_ops.ones( + array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype)) + return triangular_solve( + lower_upper, # Only upper is accessed. + triangular_solve(lower, permuted_rhs), + lower=False) + + +@tf_export('linalg.lu_matrix_inverse') +@dispatch.add_dispatch_support +def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None): + """Computes the inverse given the LU decomposition(s) of one or more matrices. + + This op is conceptually identical to, + + ```python + inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X)) + tf.assert_near(tf.matrix_inverse(X), inv_X) + # ==> True + ``` + + Note: this function does not verify the implied matrix is actually invertible + nor is this condition checked even when `validate_args=True`. + + Args: + lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, + matmul(L, U)) = X` then `lower_upper = L + U - eye`. + perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = + X` then `perm = argmax(P)`. + validate_args: Python `bool` indicating whether arguments should be checked + for correctness. Note: this function does not verify the implied matrix is + actually invertible, even when `validate_args=True`. + Default value: `False` (i.e., don't validate arguments). + name: Python `str` name given to ops managed by this object. + Default value: `None` (i.e., 'lu_matrix_inverse'). + + Returns: + inv_x: The matrix_inv, i.e., + `tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`. + + #### Examples + + ```python + import numpy as np + import tensorflow as tf + import tensorflow_probability as tfp + + x = [[[3., 4], [1, 2]], + [[7., 8], [3, 4]]] + inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x)) + tf.assert_near(tf.matrix_inverse(x), inv_x) + # ==> True + ``` + + """ + + with ops.name_scope(name or 'lu_matrix_inverse'): + lower_upper = ops.convert_to_tensor( + lower_upper, dtype_hint=dtypes.float32, name='lower_upper') + perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm') + assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args) + if assertions: + with ops.control_dependencies(assertions): + lower_upper = array_ops.identity(lower_upper) + perm = array_ops.identity(perm) + shape = array_ops.shape(lower_upper) + return lu_solve( + lower_upper, + perm, + rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype), + validate_args=False) + + +@tf_export('linalg.lu_reconstruct') +@dispatch.add_dispatch_support +def lu_reconstruct(lower_upper, perm, validate_args=False, name=None): + """The reconstruct one or more matrices from their LU decomposition(s). + + Args: + lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, + matmul(L, U)) = X` then `lower_upper = L + U - eye`. + perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = + X` then `perm = argmax(P)`. + validate_args: Python `bool` indicating whether arguments should be checked + for correctness. + Default value: `False` (i.e., don't validate arguments). + name: Python `str` name given to ops managed by this object. + Default value: `None` (i.e., 'lu_reconstruct'). + + Returns: + x: The original input to `tf.linalg.lu`, i.e., `x` as in, + `lu_reconstruct(*tf.linalg.lu(x))`. + + #### Examples + + ```python + import numpy as np + import tensorflow as tf + import tensorflow_probability as tfp + + x = [[[3., 4], [1, 2]], + [[7., 8], [3, 4]]] + x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x)) + tf.assert_near(x, x_reconstructed) + # ==> True + ``` + + """ + with ops.name_scope(name or 'lu_reconstruct'): + lower_upper = ops.convert_to_tensor( + lower_upper, dtype_hint=dtypes.float32, name='lower_upper') + perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm') + + assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args) + if assertions: + with ops.control_dependencies(assertions): + lower_upper = array_ops.identity(lower_upper) + perm = array_ops.identity(perm) + + shape = array_ops.shape(lower_upper) + + lower = set_diag( + band_part(lower_upper, num_lower=-1, num_upper=0), + array_ops.ones(shape[:-1], dtype=lower_upper.dtype)) + upper = band_part(lower_upper, num_lower=0, num_upper=-1) + x = math_ops.matmul(lower, upper) + + if (lower_upper.shape is None or lower_upper.shape.rank is None or + lower_upper.shape.rank != 2): + # We either don't know the batch rank or there are >0 batch dims. + batch_size = math_ops.reduce_prod(shape[:-2]) + d = shape[-1] + x = array_ops.reshape(x, [batch_size, d, d]) + perm = array_ops.reshape(perm, [batch_size, d]) + perm = map_fn.map_fn(array_ops.invert_permutation, perm) + batch_indices = array_ops.broadcast_to( + math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d]) + x = array_ops.gather_nd( + x, array_ops_stack.stack([batch_indices, perm], axis=-1)) + x = array_ops.reshape(x, shape) + else: + x = array_ops.gather(x, array_ops.invert_permutation(perm)) + + x.set_shape(lower_upper.shape) + return x + + +def lu_reconstruct_assertions(lower_upper, perm, validate_args): + """Returns list of assertions related to `lu_reconstruct` assumptions.""" + assertions = [] + + message = 'Input `lower_upper` must have at least 2 dimensions.' + if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2: + raise ValueError(message) + elif validate_args: + assertions.append( + check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message)) + + message = '`rank(lower_upper)` must equal `rank(perm) + 1`' + if lower_upper.shape.rank is not None and perm.shape.rank is not None: + if lower_upper.shape.rank != perm.shape.rank + 1: + raise ValueError(message) + elif validate_args: + assertions.append( + check_ops.assert_rank( + lower_upper, rank=array_ops.rank(perm) + 1, message=message)) + + message = '`lower_upper` must be square.' + if lower_upper.shape[:-2].is_fully_defined(): + if lower_upper.shape[-2] != lower_upper.shape[-1]: + raise ValueError(message) + elif validate_args: + m, n = array_ops.split( + array_ops.shape(lower_upper)[-2:], num_or_size_splits=2) + assertions.append(check_ops.assert_equal(m, n, message=message)) + + return assertions + + +def _lu_solve_assertions(lower_upper, perm, rhs, validate_args): + """Returns list of assertions related to `lu_solve` assumptions.""" + assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args) + + message = 'Input `rhs` must have at least 2 dimensions.' + if rhs.shape.ndims is not None: + if rhs.shape.ndims < 2: + raise ValueError(message) + elif validate_args: + assertions.append( + check_ops.assert_rank_at_least(rhs, rank=2, message=message)) + + message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.' + if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None): + if lower_upper.shape[-1] != rhs.shape[-2]: + raise ValueError(message) + elif validate_args: + assertions.append( + check_ops.assert_equal( + array_ops.shape(lower_upper)[-1], + array_ops.shape(rhs)[-2], + message=message)) + + return assertions + + +@tf_export('linalg.eigh_tridiagonal') +@dispatch.add_dispatch_support +def eigh_tridiagonal(alpha, + beta, + eigvals_only=True, + select='a', + select_range=None, + tol=None, + name=None): + """Computes the eigenvalues of a Hermitian tridiagonal matrix. + + Args: + alpha: A real or complex tensor of shape (n), the diagonal elements of the + matrix. NOTE: If alpha is complex, the imaginary part is ignored (assumed + zero) to satisfy the requirement that the matrix be Hermitian. + beta: A real or complex tensor of shape (n-1), containing the elements of + the first super-diagonal of the matrix. If beta is complex, the first + sub-diagonal of the matrix is assumed to be the conjugate of beta to + satisfy the requirement that the matrix be Hermitian + eigvals_only: If False, both eigenvalues and corresponding eigenvectors are + computed. If True, only eigenvalues are computed. Default is True. + select: Optional string with values in {‘a’, ‘v’, ‘i’} (default is 'a') that + determines which eigenvalues to calculate: + 'a': all eigenvalues. + ‘v’: eigenvalues in the interval (min, max] given by `select_range`. + 'i’: eigenvalues with indices min <= i <= max. + select_range: Size 2 tuple or list or tensor specifying the range of + eigenvalues to compute together with select. If select is 'a', + select_range is ignored. + tol: Optional scalar. The absolute tolerance to which each eigenvalue is + required. An eigenvalue (or cluster) is considered to have converged if it + lies in an interval of this width. If tol is None (default), the value + eps*|T|_2 is used where eps is the machine precision, and |T|_2 is the + 2-norm of the matrix T. + name: Optional name of the op. + + Returns: + eig_vals: The eigenvalues of the matrix in non-decreasing order. + eig_vectors: If `eigvals_only` is False the eigenvectors are returned in + the second output argument. + + Raises: + ValueError: If input values are invalid. + NotImplemented: Computing eigenvectors for `eigvals_only` = False is + not implemented yet. + + This op implements a subset of the functionality of + scipy.linalg.eigh_tridiagonal. + + Note: The result is undefined if the input contains +/-inf or NaN, or if + any value in beta has a magnitude greater than + `numpy.sqrt(numpy.finfo(beta.dtype.as_numpy_dtype).max)`. + + + TODO(b/187527398): + Add support for outer batch dimensions. + + #### Examples + + ```python + import numpy + eigvals = tf.linalg.eigh_tridiagonal([0.0, 0.0, 0.0], [1.0, 1.0]) + eigvals_expected = [-numpy.sqrt(2.0), 0.0, numpy.sqrt(2.0)] + tf.assert_near(eigvals_expected, eigvals) + # ==> True + ``` + + """ + with ops.name_scope(name or 'eigh_tridiagonal'): + + def _compute_eigenvalues(alpha, beta): + """Computes all eigenvalues of a Hermitian tridiagonal matrix.""" + + def _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, x): + """Implements the Sturm sequence recurrence.""" + with ops.name_scope('sturm'): + n = alpha.shape[0] + zeros = array_ops.zeros(array_ops.shape(x), dtype=dtypes.int32) + ones = array_ops.ones(array_ops.shape(x), dtype=dtypes.int32) + + # The first step in the Sturm sequence recurrence + # requires special care if x is equal to alpha[0]. + def sturm_step0(): + q = alpha[0] - x + count = array_ops.where(q < 0, ones, zeros) + q = array_ops.where( + math_ops.equal(alpha[0], x), alpha0_perturbation, q) + return q, count + + # Subsequent steps all take this form: + def sturm_step(i, q, count): + q = alpha[i] - beta_sq[i - 1] / q - x + count = array_ops.where(q <= pivmin, count + 1, count) + q = array_ops.where(q <= pivmin, math_ops.minimum(q, -pivmin), q) + return q, count + + # The first step initializes q and count. + q, count = sturm_step0() + + # Peel off ((n-1) % blocksize) steps from the main loop, so we can run + # the bulk of the iterations unrolled by a factor of blocksize. + blocksize = 16 + i = 1 + peel = (n - 1) % blocksize + unroll_cnt = peel + + def unrolled_steps(start, q, count): + for j in range(unroll_cnt): + q, count = sturm_step(start + j, q, count) + return start + unroll_cnt, q, count + + i, q, count = unrolled_steps(i, q, count) + + # Run the remaining steps of the Sturm sequence using a partially + # unrolled while loop. + unroll_cnt = blocksize + cond = lambda i, q, count: math_ops.less(i, n) + _, _, count = while_loop.while_loop( + cond, unrolled_steps, [i, q, count], back_prop=False) + return count + + with ops.name_scope('compute_eigenvalues'): + if alpha.dtype.is_complex: + alpha = math_ops.real(alpha) + beta_sq = math_ops.real(math_ops.conj(beta) * beta) + beta_abs = math_ops.sqrt(beta_sq) + else: + beta_sq = math_ops.square(beta) + beta_abs = math_ops.abs(beta) + + # Estimate the largest and smallest eigenvalues of T using the + # Gershgorin circle theorem. + finfo = np.finfo(alpha.dtype.as_numpy_dtype) + off_diag_abs_row_sum = array_ops.concat( + [beta_abs[:1], beta_abs[:-1] + beta_abs[1:], beta_abs[-1:]], axis=0) + lambda_est_max = math_ops.minimum( + finfo.max, math_ops.reduce_max(alpha + off_diag_abs_row_sum)) + lambda_est_min = math_ops.maximum( + finfo.min, math_ops.reduce_min(alpha - off_diag_abs_row_sum)) + # Upper bound on 2-norm of T. + t_norm = math_ops.maximum( + math_ops.abs(lambda_est_min), math_ops.abs(lambda_est_max)) + + # Compute the smallest allowed pivot in the Sturm sequence to avoid + # overflow. + one = np.ones([], dtype=alpha.dtype.as_numpy_dtype) + safemin = np.maximum(one / finfo.max, (one + finfo.eps) * finfo.tiny) + pivmin = safemin * math_ops.maximum(one, math_ops.reduce_max(beta_sq)) + alpha0_perturbation = math_ops.square(finfo.eps * beta_abs[0]) + abs_tol = finfo.eps * t_norm + if tol: + abs_tol = math_ops.maximum(tol, abs_tol) + # In the worst case, when the absolute tolerance is eps*lambda_est_max + # and lambda_est_max = -lambda_est_min, we have to take as many + # bisection steps as there are bits in the mantissa plus 1. + max_it = finfo.nmant + 1 + + # Determine the indices of the desired eigenvalues, based on select + # and select_range. + asserts = None + if select == 'a': + target_counts = math_ops.range(n) + elif select == 'i': + asserts = check_ops.assert_less_equal( + select_range[0], + select_range[1], + message='Got empty index range in select_range.') + target_counts = math_ops.range(select_range[0], select_range[1] + 1) + elif select == 'v': + asserts = check_ops.assert_less( + select_range[0], + select_range[1], + message='Got empty interval in select_range.') + else: + raise ValueError("'select must have a value in {'a', 'i', 'v'}.") + + if asserts: + with ops.control_dependencies([asserts]): + alpha = array_ops.identity(alpha) + + # Run binary search for all desired eigenvalues in parallel, starting + # from an interval slightly wider than the estimated + # [lambda_est_min, lambda_est_max]. + fudge = 2.1 # We widen starting interval the Gershgorin interval a bit. + norm_slack = math_ops.cast(n, alpha.dtype) * fudge * finfo.eps * t_norm + if select in {'a', 'i'}: + lower = lambda_est_min - norm_slack - 2 * fudge * pivmin + upper = lambda_est_max + norm_slack + fudge * pivmin + else: + # Count the number of eigenvalues in the given range. + lower = select_range[0] - norm_slack - 2 * fudge * pivmin + upper = select_range[1] + norm_slack + fudge * pivmin + first = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, lower) + last = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, upper) + target_counts = math_ops.range(first, last) + + # Pre-broadcast the scalars used in the Sturm sequence for improved + # performance. + upper = math_ops.minimum(upper, finfo.max) + lower = math_ops.maximum(lower, finfo.min) + target_shape = array_ops.shape(target_counts) + lower = array_ops.broadcast_to(lower, shape=target_shape) + upper = array_ops.broadcast_to(upper, shape=target_shape) + pivmin = array_ops.broadcast_to(pivmin, target_shape) + alpha0_perturbation = array_ops.broadcast_to(alpha0_perturbation, + target_shape) + + # We compute the midpoint as 0.5*lower + 0.5*upper to avoid overflow in + # (lower + upper) or (upper - lower) when the matrix has eigenvalues + # with magnitude greater than finfo.max / 2. + def midpoint(lower, upper): + return (0.5 * lower) + (0.5 * upper) + + def continue_binary_search(i, lower, upper): + return math_ops.logical_and( + math_ops.less(i, max_it), + math_ops.less(abs_tol, math_ops.reduce_max(upper - lower))) + + def binary_search_step(i, lower, upper): + mid = midpoint(lower, upper) + counts = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, mid) + lower = array_ops.where(counts <= target_counts, mid, lower) + upper = array_ops.where(counts > target_counts, mid, upper) + return i + 1, lower, upper + + # Start parallel binary searches. + _, lower, upper = while_loop.while_loop(continue_binary_search, + binary_search_step, + [0, lower, upper]) + return midpoint(lower, upper) + + def _compute_eigenvectors(alpha, beta, eigvals): + """Implements inverse iteration to compute eigenvectors.""" + with ops.name_scope('compute_eigenvectors'): + k = array_ops.size(eigvals) + n = array_ops.size(alpha) + alpha = math_ops.cast(alpha, dtype=beta.dtype) + + # Eigenvectors corresponding to cluster of close eigenvalues are + # not unique and need to be explicitly orthogonalized. Here we + # identify such clusters. Note: This function assumes that + # eigenvalues are sorted in non-decreasing order. + gap = eigvals[1:] - eigvals[:-1] + eps = np.finfo(eigvals.dtype.as_numpy_dtype).eps + t_norm = math_ops.maximum( + math_ops.abs(eigvals[0]), math_ops.abs(eigvals[-1])) + gaptol = np.sqrt(eps) * t_norm + # Find the beginning and end of runs of eigenvectors corresponding + # to eigenvalues closer than "gaptol", which will need to be + # orthogonalized against each other. + close = math_ops.less(gap, gaptol) + left_neighbor_close = array_ops.concat([[False], close], axis=0) + right_neighbor_close = array_ops.concat([close, [False]], axis=0) + ortho_interval_start = math_ops.logical_and( + math_ops.logical_not(left_neighbor_close), right_neighbor_close) + ortho_interval_start = array_ops.squeeze( + array_ops.where_v2(ortho_interval_start), axis=-1) + ortho_interval_end = math_ops.logical_and( + left_neighbor_close, math_ops.logical_not(right_neighbor_close)) + ortho_interval_end = array_ops.squeeze( + array_ops.where_v2(ortho_interval_end), axis=-1) + 1 + num_clusters = array_ops.size(ortho_interval_end) + + # We perform inverse iteration for all eigenvectors in parallel, + # starting from a random set of vectors, until all have converged. + v0 = math_ops.cast( + stateless_random_ops.stateless_random_normal( + shape=(k, n), seed=[7, 42]), + dtype=beta.dtype) + nrm_v = norm(v0, axis=1) + v0 = v0 / nrm_v[:, array_ops.newaxis] + zero_nrm = constant_op.constant(0, shape=nrm_v.shape, dtype=nrm_v.dtype) + + # Replicate alpha-eigvals(ik) and beta across the k eigenvectors so we + # can solve the k systems + # [T - eigvals(i)*eye(n)] x_i = r_i + # simultaneously using the batching mechanism. + eigvals_cast = math_ops.cast(eigvals, dtype=beta.dtype) + alpha_shifted = ( + alpha[array_ops.newaxis, :] - eigvals_cast[:, array_ops.newaxis]) + beta = array_ops.tile(beta[array_ops.newaxis, :], [k, 1]) + diags = [beta, alpha_shifted, math_ops.conj(beta)] + + def orthogonalize_close_eigenvectors(eigenvectors): + # Eigenvectors corresponding to a cluster of close eigenvalues are not + # uniquely defined, but the subspace they span is. To avoid numerical + # instability, we explicitly mutually orthogonalize such eigenvectors + # after each step of inverse iteration. It is customary to use + # modified Gram-Schmidt for this, but this is not very efficient + # on some platforms, so here we defer to the QR decomposition in + # TensorFlow. + def orthogonalize_cluster(cluster_idx, eigenvectors): + start = ortho_interval_start[cluster_idx] + end = ortho_interval_end[cluster_idx] + update_indices = array_ops.expand_dims( + math_ops.range(start, end), -1) + vectors_in_cluster = eigenvectors[start:end, :] + # We use the builtin QR factorization to orthonormalize the + # vectors in the cluster. + q, _ = qr(transpose(vectors_in_cluster)) + vectors_to_update = transpose(q) + eigenvectors = array_ops.tensor_scatter_nd_update( + eigenvectors, update_indices, vectors_to_update) + return cluster_idx + 1, eigenvectors + + _, eigenvectors = while_loop.while_loop( + lambda i, ev: math_ops.less(i, num_clusters), + orthogonalize_cluster, [0, eigenvectors]) + return eigenvectors + + def continue_iteration(i, _, nrm_v, nrm_v_old): + max_it = 5 # Taken from LAPACK xSTEIN. + min_norm_growth = 0.1 + norm_growth_factor = constant_op.constant( + 1 + min_norm_growth, dtype=nrm_v.dtype) + # We stop the inverse iteration when we reach the maximum number of + # iterations or the norm growths is less than 10%. + return math_ops.logical_and( + math_ops.less(i, max_it), + math_ops.reduce_any( + math_ops.greater_equal( + math_ops.real(nrm_v), + math_ops.real(norm_growth_factor * nrm_v_old)))) + + def inverse_iteration_step(i, v, nrm_v, nrm_v_old): + v = tridiagonal_solve( + diags, + v, + diagonals_format='sequence', + partial_pivoting=True, + perturb_singular=True) + nrm_v_old = nrm_v + nrm_v = norm(v, axis=1) + v = v / nrm_v[:, array_ops.newaxis] + v = orthogonalize_close_eigenvectors(v) + return i + 1, v, nrm_v, nrm_v_old + + _, v, nrm_v, _ = while_loop.while_loop(continue_iteration, + inverse_iteration_step, + [0, v0, nrm_v, zero_nrm]) + return transpose(v) + + alpha = ops.convert_to_tensor(alpha, name='alpha') + n = alpha.shape[0] + if n <= 1: + return math_ops.real(alpha) + beta = ops.convert_to_tensor(beta, name='beta') + + if alpha.dtype != beta.dtype: + raise ValueError("'alpha' and 'beta' must have the same type.") + + eigvals = _compute_eigenvalues(alpha, beta) + if eigvals_only: + return eigvals + + eigvectors = _compute_eigenvectors(alpha, beta, eigvals) + return eigvals, eigvectors diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..b9a4a32e425481fe926ee5cccb0e60af02496786 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator.py @@ -0,0 +1,1693 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base class for linear operators.""" + +import abc +import contextlib + +import numpy as np + +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import composite_tensor_gradient +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_spec +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import type_spec +from tensorflow.python.framework import type_spec_registry +from tensorflow.python.module import module +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import variables +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.linalg import property_hint_util +from tensorflow.python.ops.linalg import slicing +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.trackable import data_structures +from tensorflow.python.util import deprecation +from tensorflow.python.util import dispatch +from tensorflow.python.util import nest +from tensorflow.python.util import variable_utils +from tensorflow.python.util.tf_export import tf_export + + +__all__ = ["LinearOperator"] + + +# pylint: disable=protected-access +class _LinearOperatorGradient( + composite_tensor_gradient.CompositeTensorGradient): + """Composite tensor gradient for `LinearOperator`.""" + + def get_gradient_components(self, value): + return value._type_spec._to_components(value) + + def replace_gradient_components(self, value, components): + flat_components = nest.flatten(components) + + # If all component gradients are disconnected, return None. + if all(c is None for c in flat_components): + return None + + # TODO(b/286565628): Update this once `CompositeTensorGradient` fully + # supports `tf.UnconnectedGradients.ZERO`. + # Replace individual disconnected component gradients with zeros. + value_components = value._type_spec._to_components(value) + flat_grad_components = [] + for gc, vc in zip(flat_components, nest.flatten(value_components)): + if gc is None: + flat_grad_components.append( + nest.map_structure( + lambda x: array_ops.zeros_like(x, dtype=value.dtype), + vc, + expand_composites=True)) + else: + flat_grad_components.append(gc) + grad_components = nest.pack_sequence_as( + value_components, flat_grad_components) + return value._type_spec._from_components(grad_components) +# pylint: enable=protected-access + + +# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices. +@tf_export("linalg.LinearOperator") +class LinearOperator( + module.Module, composite_tensor.CompositeTensor, metaclass=abc.ABCMeta): + """Base class defining a [batch of] linear operator[s]. + + Subclasses of `LinearOperator` provide access to common methods on a + (batch) matrix, without the need to materialize the matrix. This allows: + + * Matrix free computations + * Operators that take advantage of special structure, while providing a + consistent API to users. + + #### Subclassing + + To enable a public method, subclasses should implement the leading-underscore + version of the method. The argument signature should be identical except for + the omission of `name="..."`. For example, to enable + `matmul(x, adjoint=False, name="matmul")` a subclass should implement + `_matmul(x, adjoint=False)`. + + #### Performance contract + + Subclasses should only implement the assert methods + (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)` + time. + + Class docstrings should contain an explanation of computational complexity. + Since this is a high-performance library, attention should be paid to detail, + and explanations can include constants as well as Big-O notation. + + #### Shape compatibility + + `LinearOperator` subclasses should operate on a [batch] matrix with + compatible shape. Class docstrings should define what is meant by compatible + shape. Some subclasses may not support batching. + + Examples: + + `x` is a batch matrix with compatible shape for `matmul` if + + ``` + operator.shape = [B1,...,Bb] + [M, N], b >= 0, + x.shape = [B1,...,Bb] + [N, R] + ``` + + `rhs` is a batch matrix with compatible shape for `solve` if + + ``` + operator.shape = [B1,...,Bb] + [M, N], b >= 0, + rhs.shape = [B1,...,Bb] + [M, R] + ``` + + #### Example docstring for subclasses. + + This operator acts like a (batch) matrix `A` with shape + `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `m x n` matrix. Again, this matrix `A` may not be materialized, but for + purposes of identifying and working with compatible arguments the shape is + relevant. + + Examples: + + ```python + some_tensor = ... shape = ???? + operator = MyLinOp(some_tensor) + + operator.shape() + ==> [2, 4, 4] + + operator.log_abs_determinant() + ==> Shape [2] Tensor + + x = ... Shape [2, 4, 5] Tensor + + operator.matmul(x) + ==> Shape [2, 4, 5] Tensor + ``` + + #### Shape compatibility + + This operator acts on batch matrices with compatible shape. + FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE + + #### Performance + + FILL THIS IN + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + + #### Initialization parameters + + All subclasses of `LinearOperator` are expected to pass a `parameters` + argument to `super().__init__()`. This should be a `dict` containing + the unadulterated arguments passed to the subclass `__init__`. For example, + `MyLinearOperator` with an initializer should look like: + + ```python + def __init__(self, operator, is_square=False, name=None): + parameters = dict( + operator=operator, + is_square=is_square, + name=name + ) + ... + super().__init__(..., parameters=parameters) + ``` + + Users can then access `my_linear_operator.parameters` to see all arguments + passed to its initializer. + """ + + # TODO(b/143910018) Remove graph_parents in V3. + @deprecation.deprecated_args(None, "Do not pass `graph_parents`. They will " + " no longer be used.", "graph_parents") + def __init__(self, + dtype, + graph_parents=None, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=None, + parameters=None): + """Initialize the `LinearOperator`. + + **This is a private method for subclass use.** + **Subclasses should copy-paste this `__init__` documentation.** + + Args: + dtype: The type of the this `LinearOperator`. Arguments to `matmul` and + `solve` will have to be this type. + graph_parents: (Deprecated) Python list of graph prerequisites of this + `LinearOperator` Typically tensors that are passed during initialization + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `dtype` is real, this is equivalent to being symmetric. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + parameters: Python `dict` of parameters used to instantiate this + `LinearOperator`. + + Raises: + ValueError: If any member of graph_parents is `None` or not a `Tensor`. + ValueError: If hints are set incorrectly. + """ + # Check and auto-set flags. + if is_positive_definite: + if is_non_singular is False: + raise ValueError("A positive definite matrix is always non-singular.") + is_non_singular = True + + if is_non_singular: + if is_square is False: + raise ValueError("A non-singular matrix is always square.") + is_square = True + + if is_self_adjoint: + if is_square is False: + raise ValueError("A self-adjoint matrix is always square.") + is_square = True + + self._is_square_set_or_implied_by_hints = is_square + + if graph_parents is not None: + self._set_graph_parents(graph_parents) + else: + self._graph_parents = [] + self._dtype = dtypes.as_dtype(dtype).base_dtype if dtype else dtype + self._is_non_singular = is_non_singular + self._is_self_adjoint = is_self_adjoint + self._is_positive_definite = is_positive_definite + self._parameters = self._no_dependency(parameters) + self._parameters_sanitized = False + self._name = name or type(self).__name__ + + @contextlib.contextmanager + def _name_scope(self, name=None): # pylint: disable=method-hidden + """Helper function to standardize op scope.""" + full_name = self.name + if name is not None: + full_name += "/" + name + with ops.name_scope(full_name) as scope: + yield scope + + @property + def parameters(self): + """Dictionary of parameters used to instantiate this `LinearOperator`.""" + return dict(self._parameters) + + @property + def dtype(self): + """The `DType` of `Tensor`s handled by this `LinearOperator`.""" + return self._dtype + + @property + def name(self): + """Name prepended to all ops created by this `LinearOperator`.""" + return self._name + + @property + @deprecation.deprecated(None, "Do not call `graph_parents`.") + def graph_parents(self): + """List of graph dependencies of this `LinearOperator`.""" + return self._graph_parents + + @property + def is_non_singular(self): + return self._is_non_singular + + @property + def is_self_adjoint(self): + return self._is_self_adjoint + + @property + def is_positive_definite(self): + return self._is_positive_definite + + @property + def is_square(self): + """Return `True/False` depending on if this operator is square.""" + # Static checks done after __init__. Why? Because domain/range dimension + # sometimes requires lots of work done in the derived class after init. + auto_square_check = self.domain_dimension == self.range_dimension + if self._is_square_set_or_implied_by_hints is False and auto_square_check: + raise ValueError( + "User set is_square hint to False, but the operator was square.") + if self._is_square_set_or_implied_by_hints is None: + return auto_square_check + + return self._is_square_set_or_implied_by_hints + + @abc.abstractmethod + def _shape(self): + # Write this in derived class to enable all static shape methods. + raise NotImplementedError("_shape is not implemented.") + + @property + def shape(self): + """`TensorShape` of this `LinearOperator`. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns + `TensorShape([B1,...,Bb, M, N])`, equivalent to `A.shape`. + + Returns: + `TensorShape`, statically determined, may be undefined. + """ + return self._shape() + + def _shape_tensor(self): + # This is not an abstractmethod, since we want derived classes to be able to + # override this with optional kwargs, which can reduce the number of + # `convert_to_tensor` calls. See derived classes for examples. + raise NotImplementedError("_shape_tensor is not implemented.") + + def shape_tensor(self, name="shape_tensor"): + """Shape of this `LinearOperator`, determined at runtime. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding + `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`. + + Args: + name: A name for this `Op`. + + Returns: + `int32` `Tensor` + """ + with self._name_scope(name): # pylint: disable=not-callable + # Prefer to use statically defined shape if available. + if self.shape.is_fully_defined(): + return linear_operator_util.shape_tensor(self.shape.as_list()) + else: + return self._shape_tensor() + + @property + def batch_shape(self): + """`TensorShape` of batch dimensions of this `LinearOperator`. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns + `TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]` + + Returns: + `TensorShape`, statically determined, may be undefined. + """ + # Derived classes get this "for free" once .shape is implemented. + return self.shape[:-2] + + def batch_shape_tensor(self, name="batch_shape_tensor"): + """Shape of batch dimensions of this operator, determined at runtime. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding + `[B1,...,Bb]`. + + Args: + name: A name for this `Op`. + + Returns: + `int32` `Tensor` + """ + # Derived classes get this "for free" once .shape() is implemented. + with self._name_scope(name): # pylint: disable=not-callable + return self._batch_shape_tensor() + + def _batch_shape_tensor(self, shape=None): + # `shape` may be passed in if this can be pre-computed in a + # more efficient manner, e.g. without excessive Tensor conversions. + if self.batch_shape.is_fully_defined(): + return linear_operator_util.shape_tensor( + self.batch_shape.as_list(), name="batch_shape") + else: + shape = self.shape_tensor() if shape is None else shape + return shape[:-2] + + @property + def tensor_rank(self, name="tensor_rank"): + """Rank (in the sense of tensors) of matrix corresponding to this operator. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. + + Args: + name: A name for this `Op`. + + Returns: + Python integer, or None if the tensor rank is undefined. + """ + # Derived classes get this "for free" once .shape() is implemented. + with self._name_scope(name): # pylint: disable=not-callable + return self.shape.ndims + + def tensor_rank_tensor(self, name="tensor_rank_tensor"): + """Rank (in the sense of tensors) of matrix corresponding to this operator. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. + + Args: + name: A name for this `Op`. + + Returns: + `int32` `Tensor`, determined at runtime. + """ + # Derived classes get this "for free" once .shape() is implemented. + with self._name_scope(name): # pylint: disable=not-callable + return self._tensor_rank_tensor() + + def _tensor_rank_tensor(self, shape=None): + # `shape` may be passed in if this can be pre-computed in a + # more efficient manner, e.g. without excessive Tensor conversions. + if self.tensor_rank is not None: + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.tensor_rank + ) + else: + shape = self.shape_tensor() if shape is None else shape + return array_ops.size(shape) + + @property + def domain_dimension(self): + """Dimension (in the sense of vector spaces) of the domain of this operator. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns `N`. + + Returns: + `Dimension` object. + """ + # Derived classes get this "for free" once .shape is implemented. + if self.shape.rank is None: + return tensor_shape.Dimension(None) + else: + return self.shape.dims[-1] + + def domain_dimension_tensor(self, name="domain_dimension_tensor"): + """Dimension (in the sense of vector spaces) of the domain of this operator. + + Determined at runtime. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns `N`. + + Args: + name: A name for this `Op`. + + Returns: + `int32` `Tensor` + """ + # Derived classes get this "for free" once .shape() is implemented. + with self._name_scope(name): # pylint: disable=not-callable + return self._domain_dimension_tensor() + + def _domain_dimension_tensor(self, shape=None): + # `shape` may be passed in if this can be pre-computed in a + # more efficient manner, e.g. without excessive Tensor conversions. + dim_value = tensor_shape.dimension_value(self.domain_dimension) + if dim_value is not None: + return tensor_conversion.convert_to_tensor_v2_with_dispatch(dim_value) + else: + shape = self.shape_tensor() if shape is None else shape + return shape[-1] + + @property + def range_dimension(self): + """Dimension (in the sense of vector spaces) of the range of this operator. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. + + Returns: + `Dimension` object. + """ + # Derived classes get this "for free" once .shape is implemented. + if self.shape.dims: + return self.shape.dims[-2] + else: + return tensor_shape.Dimension(None) + + def range_dimension_tensor(self, name="range_dimension_tensor"): + """Dimension (in the sense of vector spaces) of the range of this operator. + + Determined at runtime. + + If this operator acts like the batch matrix `A` with + `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. + + Args: + name: A name for this `Op`. + + Returns: + `int32` `Tensor` + """ + # Derived classes get this "for free" once .shape() is implemented. + with self._name_scope(name): # pylint: disable=not-callable + return self._range_dimension_tensor() + + def _range_dimension_tensor(self, shape=None): + # `shape` may be passed in if this can be pre-computed in a + # more efficient manner, e.g. without excessive Tensor conversions. + dim_value = tensor_shape.dimension_value(self.range_dimension) + if dim_value is not None: + return tensor_conversion.convert_to_tensor_v2_with_dispatch(dim_value) + else: + shape = self.shape_tensor() if shape is None else shape + return shape[-2] + + def _assert_non_singular(self): + """Private default implementation of _assert_non_singular.""" + logging.warn( + "Using (possibly slow) default implementation of assert_non_singular." + " Requires conversion to a dense matrix and O(N^3) operations.") + if self._can_use_cholesky(): + return self.assert_positive_definite() + else: + singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False) + # TODO(langmore) Add .eig and .cond as methods. + cond = (math_ops.reduce_max(singular_values, axis=-1) / + math_ops.reduce_min(singular_values, axis=-1)) + return check_ops.assert_less( + cond, + self._max_condition_number_to_be_non_singular(), + message="Singular matrix up to precision epsilon.") + + def _max_condition_number_to_be_non_singular(self): + """Return the maximum condition number that we consider nonsingular.""" + with ops.name_scope("max_nonsingular_condition_number"): + dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps + eps = math_ops.cast( + math_ops.reduce_max([ + 100., + math_ops.cast(self.range_dimension_tensor(), self.dtype), + math_ops.cast(self.domain_dimension_tensor(), self.dtype) + ]), self.dtype) * dtype_eps + return 1. / eps + + def assert_non_singular(self, name="assert_non_singular"): + """Returns an `Op` that asserts this operator is non singular. + + This operator is considered non-singular if + + ``` + ConditionNumber < max{100, range_dimension, domain_dimension} * eps, + eps := np.finfo(self.dtype.as_numpy_dtype).eps + ``` + + Args: + name: A string name to prepend to created ops. + + Returns: + An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if + the operator is singular. + """ + with self._name_scope(name): # pylint: disable=not-callable + return self._assert_non_singular() + + def _assert_positive_definite(self): + """Default implementation of _assert_positive_definite.""" + logging.warn( + "Using (possibly slow) default implementation of " + "assert_positive_definite." + " Requires conversion to a dense matrix and O(N^3) operations.") + # If the operator is self-adjoint, then checking that + # Cholesky decomposition succeeds + results in positive diag is necessary + # and sufficient. + if self.is_self_adjoint: + return check_ops.assert_positive( + array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())), + message="Matrix was not positive definite.") + # We have no generic check for positive definite. + raise NotImplementedError("assert_positive_definite is not implemented.") + + def assert_positive_definite(self, name="assert_positive_definite"): + """Returns an `Op` that asserts this operator is positive definite. + + Here, positive definite means that the quadratic form `x^H A x` has positive + real part for all nonzero `x`. Note that we do not require the operator to + be self-adjoint to be positive definite. + + Args: + name: A name to give this `Op`. + + Returns: + An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if + the operator is not positive definite. + """ + with self._name_scope(name): # pylint: disable=not-callable + return self._assert_positive_definite() + + def _assert_self_adjoint(self): + dense = self.to_dense() + logging.warn( + "Using (possibly slow) default implementation of assert_self_adjoint." + " Requires conversion to a dense matrix.") + return check_ops.assert_equal( + dense, + linalg.adjoint(dense), + message="Matrix was not equal to its adjoint.") + + def assert_self_adjoint(self, name="assert_self_adjoint"): + """Returns an `Op` that asserts this operator is self-adjoint. + + Here we check that this operator is *exactly* equal to its hermitian + transpose. + + Args: + name: A string name to prepend to created ops. + + Returns: + An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if + the operator is not self-adjoint. + """ + with self._name_scope(name): # pylint: disable=not-callable + return self._assert_self_adjoint() + + def _check_input_dtype(self, arg): + """Check that arg.dtype == self.dtype.""" + if arg.dtype.base_dtype != self.dtype: + raise TypeError( + "Expected argument to have dtype %s. Found: %s in tensor %s" % + (self.dtype, arg.dtype, arg)) + + @abc.abstractmethod + def _matmul(self, x, adjoint=False, adjoint_arg=False): + raise NotImplementedError("_matmul is not implemented.") + + def matmul( + self, + x, + adjoint=False, + adjoint_arg=False, + name="matmul", + ): + """Transform [batch] matrix `x` with left multiplication: `x --> Ax`. + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + X = ... # shape [..., N, R], batch matrix, R > 0. + + Y = operator.matmul(X) + Y.shape + ==> [..., M, R] + + Y[..., :, r] = sum_j A[..., :, j] X[j, r] + ``` + + Args: + x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as + `self`. See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. + adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is + the hermitian transpose (transposition and complex conjugation). + name: A name for this `Op`. + + Returns: + A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype` + as `self`. + """ + if isinstance(x, LinearOperator): + left_operator = self.adjoint() if adjoint else self + right_operator = x.adjoint() if adjoint_arg else x + + if (right_operator.range_dimension is not None and + left_operator.domain_dimension is not None and + right_operator.range_dimension != left_operator.domain_dimension): + raise ValueError( + "Operators are incompatible. Expected `x` to have dimension" + " {} but got {}.".format( + left_operator.domain_dimension, right_operator.range_dimension)) + + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_matmul(left_operator, right_operator) + + with self._name_scope(name): # pylint: disable=not-callable + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + + self_dim = -2 if adjoint else -1 + arg_dim = -1 if adjoint_arg else -2 + tensor_shape.dimension_at_index( + self.shape, self_dim).assert_is_compatible_with( + x.shape[arg_dim]) + + return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _linop_matmul( + self, left_operator: "LinearOperator", right_operator: "LinearOperator" + ) -> "LinearOperator": + # instance of linear_operator_identity.LinearOperatorIdentity + if hasattr(right_operator, "_ones_diag") and not hasattr( + right_operator, "multiplier" + ): + return left_operator + + # instance of linear_operator_zeros.LinearOperatorZeros + elif hasattr(right_operator, "_zeros_diag"): + if not right_operator.is_square or not left_operator.is_square: + raise ValueError( + "Matmul with non-square `LinearOperator`s or " + "non-square `LinearOperatorZeros` not supported at this time." + ) + return right_operator + + else: + # Generic matmul of two `LinearOperator`s. + is_square = property_hint_util.is_square(left_operator, right_operator) + is_non_singular = None + is_self_adjoint = None + is_positive_definite = None + + if is_square: + is_non_singular = property_hint_util.combined_non_singular_hint( + left_operator, right_operator + ) + # is_square can be None, so the explicit check for False is needed. + elif is_square is False: # pylint:disable=g-bool-id-comparison + is_non_singular = False + is_self_adjoint = False + is_positive_definite = False + + # LinearOperator outputs a LinearOperatorComposition instance, which + # inherits from LinearOperator. The inline import is necessary to avoid + # errors due to this cyclic dependency. + from tensorflow.python.ops.linalg import linear_operator_composition # pylint: disable=g-import-not-at-top + + return linear_operator_composition.LinearOperatorComposition( + operators=[left_operator, right_operator], + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + ) + + def __matmul__(self, other): + return self.matmul(other) + + def _matvec(self, x, adjoint=False): + x_mat = array_ops.expand_dims(x, axis=-1) + y_mat = self.matmul(x_mat, adjoint=adjoint) + return array_ops.squeeze(y_mat, axis=-1) + + def matvec(self, x, adjoint=False, name="matvec"): + """Transform [batch] vector `x` with left multiplication: `x --> Ax`. + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + + X = ... # shape [..., N], batch vector + + Y = operator.matvec(X) + Y.shape + ==> [..., M] + + Y[..., :] = sum_j A[..., :, j] X[..., j] + ``` + + Args: + x: `Tensor` with compatible shape and same `dtype` as `self`. + `x` is treated as a [batch] vector meaning for every set of leading + dimensions, the last dimension defines a vector. + See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. + name: A name for this `Op`. + + Returns: + A `Tensor` with shape `[..., M]` and same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + self_dim = -2 if adjoint else -1 + tensor_shape.dimension_at_index( + self.shape, self_dim).assert_is_compatible_with(x.shape[-1]) + return self._matvec(x, adjoint=adjoint) + + def _determinant(self): + logging.warn( + "Using (possibly slow) default implementation of determinant." + " Requires conversion to a dense matrix and O(N^3) operations.") + if self._can_use_cholesky(): + return math_ops.exp(self.log_abs_determinant()) + return linalg_ops.matrix_determinant(self.to_dense()) + + def determinant(self, name="det"): + """Determinant for every batch member. + + Args: + name: A name for this `Op`. + + Returns: + `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. + + Raises: + NotImplementedError: If `self.is_square` is `False`. + """ + if self.is_square is False: + raise NotImplementedError( + "Determinant not implemented for an operator that is expected to " + "not be square.") + with self._name_scope(name): # pylint: disable=not-callable + return self._determinant() + + def _log_abs_determinant(self): + logging.warn( + "Using (possibly slow) default implementation of determinant." + " Requires conversion to a dense matrix and O(N^3) operations.") + if self._can_use_cholesky(): + diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())) + return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1]) + _, log_abs_det = linalg.slogdet(self.to_dense()) + return log_abs_det + + def log_abs_determinant(self, name="log_abs_det"): + """Log absolute value of determinant for every batch member. + + Args: + name: A name for this `Op`. + + Returns: + `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. + + Raises: + NotImplementedError: If `self.is_square` is `False`. + """ + if self.is_square is False: + raise NotImplementedError( + "Determinant not implemented for an operator that is expected to " + "not be square.") + with self._name_scope(name): # pylint: disable=not-callable + return self._log_abs_determinant() + + def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False): + """Solve by conversion to a dense matrix.""" + if self.is_square is False: # pylint: disable=g-bool-id-comparison + raise NotImplementedError( + "Solve is not yet implemented for non-square operators.") + rhs = linalg.adjoint(rhs) if adjoint_arg else rhs + if self._can_use_cholesky(): + return linalg_ops.cholesky_solve( + linalg_ops.cholesky(self.to_dense()), rhs) + return linear_operator_util.matrix_solve_with_broadcast( + self.to_dense(), rhs, adjoint=adjoint) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + """Default implementation of _solve.""" + logging.warn( + "Using (possibly slow) default implementation of solve." + " Requires conversion to a dense matrix and O(N^3) operations.") + return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"): + """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`. + + The returned `Tensor` will be close to an exact solution if `A` is well + conditioned. Otherwise closeness will vary. See class docstring for details. + + Examples: + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + # Solve R > 0 linear systems for every member of the batch. + RHS = ... # shape [..., M, R] + + X = operator.solve(RHS) + # X[..., :, r] is the solution to the r'th linear system + # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r] + + operator.matmul(X) + ==> RHS + ``` + + Args: + rhs: `Tensor` with same `dtype` as this operator and compatible shape. + `rhs` is treated like a [batch] matrix meaning for every set of leading + dimensions, the last two dimensions defines a matrix. + See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, solve the system involving the adjoint + of this `LinearOperator`: `A^H X = rhs`. + adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H` + is the hermitian transpose (transposition and complex conjugation). + name: A name scope to use for ops added by this method. + + Returns: + `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`. + + Raises: + NotImplementedError: If `self.is_non_singular` or `is_square` is False. + """ + if self.is_non_singular is False: + raise NotImplementedError( + "Exact solve not implemented for an operator that is expected to " + "be singular.") + if self.is_square is False: + raise NotImplementedError( + "Exact solve not implemented for an operator that is expected to " + "not be square.") + if isinstance(rhs, LinearOperator): + left_operator = self.adjoint() if adjoint else self + right_operator = rhs.adjoint() if adjoint_arg else rhs + + if (right_operator.range_dimension is not None and + left_operator.domain_dimension is not None and + right_operator.range_dimension != left_operator.domain_dimension): + raise ValueError( + "Operators are incompatible. Expected `rhs` to have dimension" + " {} but got {}.".format( + left_operator.domain_dimension, right_operator.range_dimension)) + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_solve(left_operator, right_operator) + + with self._name_scope(name): # pylint: disable=not-callable + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs" + ) + self._check_input_dtype(rhs) + + self_dim = -1 if adjoint else -2 + arg_dim = -1 if adjoint_arg else -2 + tensor_shape.dimension_at_index( + self.shape, self_dim).assert_is_compatible_with( + rhs.shape[arg_dim]) + + return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _linop_solve( + self, left_operator: "LinearOperator", right_operator: "LinearOperator" + ) -> "LinearOperator": + # instance of linear_operator_identity.LinearOperatorIdentity + if hasattr(right_operator, "_ones_diag") and not hasattr( + right_operator, "multiplier" + ): + return left_operator.inverse() + + # Generic solve of two `LinearOperator`s. + is_square = property_hint_util.is_square(left_operator, right_operator) + is_non_singular = None + is_self_adjoint = None + is_positive_definite = None + + if is_square: + is_non_singular = property_hint_util.combined_non_singular_hint( + left_operator, right_operator + ) + elif is_square is False: # pylint:disable=g-bool-id-comparison + is_non_singular = False + is_self_adjoint = False + is_positive_definite = False + + # LinearOperator outputs a LinearOperatorComposition instance that contains + # a LinearOperatorInversion instance, both of which + # inherit from LinearOperator. The inline import is necessary to avoid + # errors due to this cyclic dependency. + from tensorflow.python.ops.linalg import linear_operator_composition # pylint: disable=g-import-not-at-top + from tensorflow.python.ops.linalg import linear_operator_inversion # pylint: disable=g-import-not-at-top + + return linear_operator_composition.LinearOperatorComposition( + operators=[ + linear_operator_inversion.LinearOperatorInversion(left_operator), + right_operator, + ], + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + ) + + def _solvevec(self, rhs, adjoint=False): + """Default implementation of _solvevec.""" + rhs_mat = array_ops.expand_dims(rhs, axis=-1) + solution_mat = self.solve(rhs_mat, adjoint=adjoint) + return array_ops.squeeze(solution_mat, axis=-1) + + def solvevec(self, rhs, adjoint=False, name="solve"): + """Solve single equation with best effort: `A X = rhs`. + + The returned `Tensor` will be close to an exact solution if `A` is well + conditioned. Otherwise closeness will vary. See class docstring for details. + + Examples: + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + # Solve one linear system for every member of the batch. + RHS = ... # shape [..., M] + + X = operator.solvevec(RHS) + # X is the solution to the linear system + # sum_j A[..., :, j] X[..., j] = RHS[..., :] + + operator.matvec(X) + ==> RHS + ``` + + Args: + rhs: `Tensor` with same `dtype` as this operator. + `rhs` is treated like a [batch] vector meaning for every set of leading + dimensions, the last dimension defines a vector. See class docstring + for definition of compatibility regarding batch dimensions. + adjoint: Python `bool`. If `True`, solve the system involving the adjoint + of this `LinearOperator`: `A^H X = rhs`. + name: A name scope to use for ops added by this method. + + Returns: + `Tensor` with shape `[...,N]` and same `dtype` as `rhs`. + + Raises: + NotImplementedError: If `self.is_non_singular` or `is_square` is False. + """ + with self._name_scope(name): # pylint: disable=not-callable + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs" + ) + self._check_input_dtype(rhs) + self_dim = -1 if adjoint else -2 + tensor_shape.dimension_at_index( + self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1]) + + return self._solvevec(rhs, adjoint=adjoint) + + def adjoint(self, name: str = "adjoint") -> "LinearOperator": + """Returns the adjoint of the current `LinearOperator`. + + Given `A` representing this `LinearOperator`, return `A*`. + Note that calling `self.adjoint()` and `self.H` are equivalent. + + Args: + name: A name for this `Op`. + + Returns: + `LinearOperator` which represents the adjoint of this `LinearOperator`. + """ + if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison + return self + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_adjoint() + + # self.H is equivalent to self.adjoint(). + H = property(adjoint, None) + + def _linop_adjoint(self) -> "LinearOperator": + from tensorflow.python.ops.linalg import linear_operator_adjoint # pylint: disable=g-import-not-at-top + return linear_operator_adjoint.LinearOperatorAdjoint( + self, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=self.is_square) + + def inverse(self, name: str = "inverse") -> "LinearOperator": + """Returns the Inverse of this `LinearOperator`. + + Given `A` representing this `LinearOperator`, return a `LinearOperator` + representing `A^-1`. + + Args: + name: A name scope to use for ops added by this method. + + Returns: + `LinearOperator` representing inverse of this matrix. + + Raises: + ValueError: When the `LinearOperator` is not hinted to be `non_singular`. + """ + if self.is_square is False: # pylint: disable=g-bool-id-comparison + raise ValueError("Cannot take the Inverse: This operator represents " + "a non square matrix.") + if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison + raise ValueError("Cannot take the Inverse: This operator represents " + "a singular matrix.") + + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_inverse() + + def _linop_inverse(self) -> "LinearOperator": + # The in-line import is necessary because linear_operator_inversion.py + # depends on linear_operator.py. The in-line import works because the two + # files are now in the same build target, but if the import were at the top + # of the file there would be a partially-initialized module error caused by + # the code cycle. + from tensorflow.python.ops.linalg import linear_operator_inversion # pylint: disable=g-import-not-at-top + return linear_operator_inversion.LinearOperatorInversion( + self, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=self.is_square) + + def cholesky(self, name: str = "cholesky") -> "LinearOperator": + """Returns a Cholesky factor as a `LinearOperator`. + + Given `A` representing this `LinearOperator`, if `A` is positive definite + self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky + decomposition. + + Args: + name: A name for this `Op`. + + Returns: + `LinearOperator` which represents the lower triangular matrix + in the Cholesky decomposition. + + Raises: + ValueError: When the `LinearOperator` is not hinted to be positive + definite and self adjoint. + """ + + if not self._can_use_cholesky(): + raise ValueError("Cannot take the Cholesky decomposition: " + "Not a positive definite self adjoint matrix.") + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_cholesky() + + def _linop_cholesky(self) -> "LinearOperator": + from tensorflow.python.ops.linalg import linear_operator_lower_triangular # pylint: disable=g-import-not-at-top + return linear_operator_lower_triangular.LinearOperatorLowerTriangular( + linalg_ops.cholesky(self.to_dense()), + is_non_singular=True, + is_self_adjoint=False, + is_square=True) + + def _to_dense(self): + """Generic and often inefficient implementation. Override often.""" + if self.batch_shape.is_fully_defined(): + batch_shape = self.batch_shape + else: + batch_shape = self.batch_shape_tensor() + + dim_value = tensor_shape.dimension_value(self.domain_dimension) + if dim_value is not None: + n = dim_value + else: + n = self.domain_dimension_tensor() + + eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype) + return self.matmul(eye) + + def to_dense(self, name="to_dense"): + """Return a dense (batch) matrix representing this operator.""" + with self._name_scope(name): # pylint: disable=not-callable + return self._to_dense() + + def _diag_part(self): + """Generic and often inefficient implementation. Override often.""" + return array_ops.matrix_diag_part(self.to_dense()) + + def diag_part(self, name="diag_part"): + """Efficiently get the [batch] diagonal part of this operator. + + If this operator has shape `[B1,...,Bb, M, N]`, this returns a + `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where + `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`. + + ``` + my_operator = LinearOperatorDiag([1., 2.]) + + # Efficiently get the diagonal + my_operator.diag_part() + ==> [1., 2.] + + # Equivalent, but inefficient method + tf.linalg.diag_part(my_operator.to_dense()) + ==> [1., 2.] + ``` + + Args: + name: A name for this `Op`. + + Returns: + diag_part: A `Tensor` of same `dtype` as self. + """ + with self._name_scope(name): # pylint: disable=not-callable + return self._diag_part() + + def _trace(self): + return math_ops.reduce_sum(self.diag_part(), axis=-1) + + def trace(self, name="trace"): + """Trace of the linear operator, equal to sum of `self.diag_part()`. + + If the operator is square, this is also the sum of the eigenvalues. + + Args: + name: A name for this `Op`. + + Returns: + Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + return self._trace() + + def _add_to_tensor(self, x): + # Override if a more efficient implementation is available. + return self.to_dense() + x + + def add_to_tensor(self, x, name="add_to_tensor"): + """Add matrix represented by this operator to `x`. Equivalent to `A + x`. + + Args: + x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`. + name: A name to give this `Op`. + + Returns: + A `Tensor` with broadcast shape and same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + return self._add_to_tensor(x) + + def _eigvals(self): + return linalg_ops.self_adjoint_eigvals(self.to_dense()) + + def eigvals(self, name="eigvals"): + """Returns the eigenvalues of this linear operator. + + If the operator is marked as self-adjoint (via `is_self_adjoint`) + this computation can be more efficient. + + Note: This currently only supports self-adjoint operators. + + Args: + name: A name for this `Op`. + + Returns: + Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`. + """ + if not self.is_self_adjoint: + raise NotImplementedError("Only self-adjoint matrices are supported.") + with self._name_scope(name): # pylint: disable=not-callable + return self._eigvals() + + def _cond(self): + if not self.is_self_adjoint: + # In general the condition number is the ratio of the + # absolute value of the largest and smallest singular values. + vals = linalg_ops.svd(self.to_dense(), compute_uv=False) + else: + # For self-adjoint matrices, and in general normal matrices, + # we can use eigenvalues. + vals = math_ops.abs(self._eigvals()) + + return (math_ops.reduce_max(vals, axis=-1) / + math_ops.reduce_min(vals, axis=-1)) + + def cond(self, name="cond"): + """Returns the condition number of this linear operator. + + Args: + name: A name for this `Op`. + + Returns: + Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + return self._cond() + + def _can_use_cholesky(self): + return self.is_self_adjoint and self.is_positive_definite + + def _set_graph_parents(self, graph_parents): + """Set self._graph_parents. Called during derived class init. + + This method allows derived classes to set graph_parents, without triggering + a deprecation warning (which is invoked if `graph_parents` is passed during + `__init__`. + + Args: + graph_parents: Iterable over Tensors. + """ + # TODO(b/143910018) Remove this function in V3. + graph_parents = [] if graph_parents is None else graph_parents + for i, t in enumerate(graph_parents): + if t is None or not (linear_operator_util.is_ref(t) or + tensor_util.is_tf_type(t)): + raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) + self._graph_parents = graph_parents + + @property + def _composite_tensor_fields(self): + """A tuple of parameter names to rebuild the `LinearOperator`. + + The tuple contains the names of kwargs to the `LinearOperator`'s constructor + that the `TypeSpec` needs to rebuild the `LinearOperator` instance. + + "is_non_singular", "is_self_adjoint", "is_positive_definite", and + "is_square" are common to all `LinearOperator` subclasses and may be + omitted. + """ + return () + + @property + def _composite_tensor_prefer_static_fields(self): + """A tuple of names referring to parameters that may be treated statically. + + This is a subset of `_composite_tensor_fields`, and contains the names of + of `Tensor`-like args to the `LinearOperator`s constructor that may be + stored as static values, if they are statically known. These are typically + shapes or axis values. + """ + return () + + @property + def _type_spec(self): + # This property will be overwritten by the `@make_composite_tensor` + # decorator. However, we need it so that a valid subclass of the `ABCMeta` + # class `CompositeTensor` can be constructed and passed to the + # `@make_composite_tensor` decorator. + pass + + def _convert_variables_to_tensors(self): + """Recursively converts ResourceVariables in the LinearOperator to Tensors. + + The usage of `self._type_spec._from_components` violates the contract of + `CompositeTensor`, since it is called on a different nested structure + (one containing only `Tensor`s) than `self.type_spec` specifies (one that + may contain `ResourceVariable`s). Since `LinearOperator`'s + `_from_components` method just passes the contents of the nested structure + to `__init__` to rebuild the operator, and any `LinearOperator` that may be + instantiated with `ResourceVariables` may also be instantiated with + `Tensor`s, this usage is valid. + + Returns: + tensor_operator: `self` with all internal Variables converted to Tensors. + """ + # pylint: disable=protected-access + components = self._type_spec._to_components(self) + tensor_components = variable_utils.convert_variables_to_tensors( + components) + return self._type_spec._from_components(tensor_components) + # pylint: enable=protected-access + + def __getitem__(self, slices): + return slicing.batch_slice(self, params_overrides={}, slices=slices) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + """A dict of names to number of dimensions contributing to an operator. + + This is a dictionary of parameter names to `int`s specifying the + number of right-most dimensions contributing to the **matrix** shape of the + densified operator. + If the parameter is a `Tensor`, this is mapped to an `int`. + If the parameter is a `LinearOperator` (called `A`), this specifies the + number of batch dimensions of `A` contributing to this `LinearOperator`s + matrix shape. + If the parameter is a structure, this is a structure of the same type of + `int`s. + """ + return () + + __composite_gradient__ = _LinearOperatorGradient() + + +class _LinearOperatorSpec(type_spec.BatchableTypeSpec): + """A tf.TypeSpec for `LinearOperator` objects.""" + + __slots__ = ("_param_specs", "_non_tensor_params", "_prefer_static_fields") + + def __init__(self, param_specs, non_tensor_params, prefer_static_fields): + """Initializes a new `_LinearOperatorSpec`. + + Args: + param_specs: Python `dict` of `tf.TypeSpec` instances that describe + kwargs to the `LinearOperator`'s constructor that are `Tensor`-like or + `CompositeTensor` subclasses. + non_tensor_params: Python `dict` containing non-`Tensor` and non- + `CompositeTensor` kwargs to the `LinearOperator`'s constructor. + prefer_static_fields: Python `tuple` of strings corresponding to the names + of `Tensor`-like args to the `LinearOperator`s constructor that may be + stored as static values, if known. These are typically shapes, indices, + or axis values. + """ + self._param_specs = param_specs + self._non_tensor_params = non_tensor_params + self._prefer_static_fields = prefer_static_fields + + @classmethod + def from_operator(cls, operator): + """Builds a `_LinearOperatorSpec` from a `LinearOperator` instance. + + Args: + operator: An instance of `LinearOperator`. + + Returns: + linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as + the `TypeSpec` of `operator`. + """ + validation_fields = ("is_non_singular", "is_self_adjoint", + "is_positive_definite", "is_square") + kwargs = _extract_attrs( + operator, + keys=set(operator._composite_tensor_fields + validation_fields)) # pylint: disable=protected-access + + non_tensor_params = {} + param_specs = {} + for k, v in list(kwargs.items()): + type_spec_or_v = _extract_type_spec_recursively(v) + is_tensor = [isinstance(x, type_spec.TypeSpec) + for x in nest.flatten(type_spec_or_v)] + if all(is_tensor): + param_specs[k] = type_spec_or_v + elif not any(is_tensor): + non_tensor_params[k] = v + else: + raise NotImplementedError(f"Field {k} contains a mix of `Tensor` and " + f" non-`Tensor` values.") + + return cls( + param_specs=param_specs, + non_tensor_params=non_tensor_params, + prefer_static_fields=operator._composite_tensor_prefer_static_fields) # pylint: disable=protected-access + + def _to_components(self, obj): + return _extract_attrs(obj, keys=list(self._param_specs)) + + def _from_components(self, components): + kwargs = dict(self._non_tensor_params, **components) + return self.value_type(**kwargs) + + @property + def _component_specs(self): + return self._param_specs + + def _serialize(self): + return (self._param_specs, + self._non_tensor_params, + self._prefer_static_fields) + + def _copy(self, **overrides): + kwargs = { + "param_specs": self._param_specs, + "non_tensor_params": self._non_tensor_params, + "prefer_static_fields": self._prefer_static_fields + } + kwargs.update(overrides) + return type(self)(**kwargs) + + def _batch(self, batch_size): + """Returns a TypeSpec representing a batch of objects with this TypeSpec.""" + return self._copy( + param_specs=nest.map_structure( + lambda spec: spec._batch(batch_size), # pylint: disable=protected-access + self._param_specs)) + + def _unbatch(self, batch_size): + """Returns a TypeSpec representing a single element of this TypeSpec.""" + return self._copy( + param_specs=nest.map_structure( + lambda spec: spec._unbatch(), # pylint: disable=protected-access + self._param_specs)) + + +def make_composite_tensor(cls, module_name="tf.linalg"): + """Class decorator to convert `LinearOperator`s to `CompositeTensor`.""" + + spec_name = "{}Spec".format(cls.__name__) + spec_type = type(spec_name, (_LinearOperatorSpec,), {"value_type": cls}) + type_spec_registry.register("{}.{}".format(module_name, spec_name))(spec_type) + cls._type_spec = property(spec_type.from_operator) # pylint: disable=protected-access + return cls + + +def _extract_attrs(op, keys): + """Extract constructor kwargs to reconstruct `op`. + + Args: + op: A `LinearOperator` instance. + keys: A Python `tuple` of strings indicating the names of the constructor + kwargs to extract from `op`. + + Returns: + kwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`. + """ + + kwargs = {} + not_found = object() + for k in keys: + srcs = [ + getattr(op, k, not_found), getattr(op, "_" + k, not_found), + getattr(op, "parameters", {}).get(k, not_found), + ] + if any(v is not not_found for v in srcs): + kwargs[k] = [v for v in srcs if v is not not_found][0] + else: + raise ValueError( + f"Could not determine an appropriate value for field `{k}` in object " + f" `{op}`. Looked for \n" + f" 1. an attr called `{k}`,\n" + f" 2. an attr called `_{k}`,\n" + f" 3. an entry in `op.parameters` with key '{k}'.") + if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None: # pylint: disable=protected-access + if tensor_util.is_tensor(kwargs[k]): + static_val = tensor_util.constant_value(kwargs[k]) + if static_val is not None: + kwargs[k] = static_val + if isinstance(kwargs[k], (np.ndarray, np.generic)): + kwargs[k] = kwargs[k].tolist() + return kwargs + + +def _extract_type_spec_recursively(value): + """Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s. + + If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If + `value` is a collection containing `Tensor` values, recursively supplant them + with their respective `TypeSpec`s in a collection of parallel stucture. + + If `value` is none of the above, return it unchanged. + + Args: + value: a Python `object` to (possibly) turn into a (collection of) + `tf.TypeSpec`(s). + + Returns: + spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value` + or `value`, if no `Tensor`s are found. + """ + if isinstance(value, composite_tensor.CompositeTensor): + return value._type_spec # pylint: disable=protected-access + if isinstance(value, variables.Variable): + return resource_variable_ops.VariableSpec( + value.shape, dtype=value.dtype, trainable=value.trainable) + if tensor_util.is_tensor(value): + return tensor_spec.TensorSpec(value.shape, value.dtype) + # Unwrap trackable data structures to comply with `Type_Spec._serialize` + # requirements. `ListWrapper`s are converted to `list`s, and for other + # trackable data structures, the `__wrapped__` attribute is used. + if isinstance(value, list): + return list(_extract_type_spec_recursively(v) for v in value) + if isinstance(value, data_structures.TrackableDataStructure): + return _extract_type_spec_recursively(value.__wrapped__) + if isinstance(value, tuple): + return type(value)(_extract_type_spec_recursively(x) for x in value) + if isinstance(value, dict): + return type(value)((k, _extract_type_spec_recursively(v)) + for k, v in value.items()) + return value + + +# Overrides for tf.linalg functions. This allows a LinearOperator to be used in +# place of a Tensor. +# For instance tf.trace(linop) and linop.trace() both work. + + +@dispatch.dispatch_for_types(linalg.adjoint, LinearOperator) +def _adjoint(matrix, name=None): + return matrix.adjoint(name) + + +@dispatch.dispatch_for_types(linalg.cholesky, LinearOperator) +def _cholesky(input, name=None): # pylint:disable=redefined-builtin + return input.cholesky(name) + + +# The signature has to match with the one in python/op/array_ops.py, +# so we have k, padding_value, and align even though we don't use them here. +# pylint:disable=unused-argument +@dispatch.dispatch_for_types(linalg.diag_part, LinearOperator) +def _diag_part( + input, # pylint:disable=redefined-builtin + name="diag_part", + k=0, + padding_value=0, + align="RIGHT_LEFT"): + return input.diag_part(name) +# pylint:enable=unused-argument + + +@dispatch.dispatch_for_types(linalg.det, LinearOperator) +def _det(input, name=None): # pylint:disable=redefined-builtin + return input.determinant(name) + + +@dispatch.dispatch_for_types(linalg.inv, LinearOperator) +def _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin + inv = input.inverse(name) + if adjoint: + inv = inv.adjoint() + return inv + + +@dispatch.dispatch_for_types(linalg.logdet, LinearOperator) +def _logdet(matrix, name=None): + if matrix.is_positive_definite and matrix.is_self_adjoint: + return matrix.log_abs_determinant(name) + raise ValueError("Expected matrix to be self-adjoint positive definite.") + + +@dispatch.dispatch_for_types(math_ops.matmul, LinearOperator) +def _matmul( # pylint:disable=missing-docstring + a, + b, + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + a_is_sparse=False, + b_is_sparse=False, + output_type=None, # pylint: disable=unused-argument + grad_a=False, # pylint: disable=unused-argument + grad_b=False, # pylint: disable=unused-argument + name=None, +): + if transpose_a or transpose_b: + raise ValueError("Transposing not supported at this time.") + if a_is_sparse or b_is_sparse: + raise ValueError("Sparse methods not supported at this time.") + if not isinstance(a, LinearOperator): + # We use the identity (B^HA^H)^H = AB + adjoint_matmul = b.matmul( + a, + adjoint=(not adjoint_b), + adjoint_arg=(not adjoint_a), + name=name) + return linalg.adjoint(adjoint_matmul) + return a.matmul( + b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name) + + +@dispatch.dispatch_for_types(linalg.solve, LinearOperator) +def _solve( + matrix, + rhs, + adjoint=False, + name=None): + if not isinstance(matrix, LinearOperator): + raise ValueError("Passing in `matrix` as a Tensor and `rhs` as a " + "LinearOperator is not supported.") + return matrix.solve(rhs, adjoint=adjoint, name=name) + + +@dispatch.dispatch_for_types(linalg.trace, LinearOperator) +def _trace(x, name=None): + return x.trace(name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_addition.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_addition.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4061362c541414774415c34d195cfef16f3c9e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_addition.py @@ -0,0 +1,437 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Add one or more `LinearOperators` efficiently.""" + +import abc + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_diag +from tensorflow.python.ops.linalg import linear_operator_full_matrix +from tensorflow.python.ops.linalg import linear_operator_identity +from tensorflow.python.ops.linalg import linear_operator_lower_triangular + +__all__ = [] + + +def add_operators(operators, + operator_name=None, + addition_tiers=None, + name=None): + """Efficiently add one or more linear operators. + + Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of + operators `[B1, B2,...]` such that + + ```sum_k Ak.matmul(x) = sum_k Bk.matmul(x).``` + + The operators `Bk` result by adding some of the `Ak`, as allowed by + `addition_tiers`. + + Example of efficient adding of diagonal operators. + + ```python + A1 = LinearOperatorDiag(diag=[1., 1.], name="A1") + A2 = LinearOperatorDiag(diag=[2., 2.], name="A2") + + # Use two tiers, the first contains an Adder that returns Diag. Since both + # A1 and A2 are Diag, they can use this Adder. The second tier will not be + # used. + addition_tiers = [ + [_AddAndReturnDiag()], + [_AddAndReturnMatrix()]] + B_list = add_operators([A1, A2], addition_tiers=addition_tiers) + + len(B_list) + ==> 1 + + B_list[0].__class__.__name__ + ==> 'LinearOperatorDiag' + + B_list[0].to_dense() + ==> [[3., 0.], + [0., 3.]] + + B_list[0].name + ==> 'Add/A1__A2/' + ``` + + Args: + operators: Iterable of `LinearOperator` objects with same `dtype`, domain + and range dimensions, and broadcastable batch shapes. + operator_name: String name for returned `LinearOperator`. Defaults to + concatenation of "Add/A__B/" that indicates the order of addition steps. + addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i` + is a list of `Adder` objects. This function attempts to do all additions + in tier `i` before trying tier `i + 1`. + name: A name for this `Op`. Defaults to `add_operators`. + + Returns: + Subclass of `LinearOperator`. Class and order of addition may change as new + (and better) addition strategies emerge. + + Raises: + ValueError: If `operators` argument is empty. + ValueError: If shapes are incompatible. + """ + # Default setting + if addition_tiers is None: + addition_tiers = _DEFAULT_ADDITION_TIERS + + # Argument checking. + check_ops.assert_proper_iterable(operators) + operators = list(reversed(operators)) + if len(operators) < 1: + raise ValueError( + f"Argument `operators` must contain at least one operator. " + f"Received: {operators}.") + if not all( + isinstance(op, linear_operator.LinearOperator) for op in operators): + raise TypeError( + f"Argument `operators` must contain only LinearOperator instances. " + f"Received: {operators}.") + _static_check_for_same_dimensions(operators) + _static_check_for_broadcastable_batch_shape(operators) + + with ops.name_scope(name or "add_operators"): + + # Additions done in one of the tiers. Try tier 0, 1,... + ops_to_try_at_next_tier = list(operators) + for tier in addition_tiers: + ops_to_try_at_this_tier = ops_to_try_at_next_tier + ops_to_try_at_next_tier = [] + while ops_to_try_at_this_tier: + op1 = ops_to_try_at_this_tier.pop() + op2, adder = _pop_a_match_at_tier(op1, ops_to_try_at_this_tier, tier) + if op2 is not None: + # Will try to add the result of this again at this same tier. + new_operator = adder.add(op1, op2, operator_name) + ops_to_try_at_this_tier.append(new_operator) + else: + ops_to_try_at_next_tier.append(op1) + + return ops_to_try_at_next_tier + + +def _pop_a_match_at_tier(op1, operator_list, tier): + # Search from the back of list to the front in order to create nice default + # order of operations. + for i in range(1, len(operator_list) + 1): + op2 = operator_list[-i] + for adder in tier: + if adder.can_add(op1, op2): + return operator_list.pop(-i), adder + return None, None + + +def _infer_hints_allowing_override(op1, op2, hints): + """Infer hints from op1 and op2. hints argument is an override. + + Args: + op1: LinearOperator + op2: LinearOperator + hints: _Hints object holding "is_X" boolean hints to use for returned + operator. + If some hint is None, try to set using op1 and op2. If the + hint is provided, ignore op1 and op2 hints. This allows an override + of previous hints, but does not allow forbidden hints (e.g. you still + cannot say a real diagonal operator is not self-adjoint. + + Returns: + _Hints object. + """ + hints = hints or _Hints() + # If A, B are self-adjoint, then so is A + B. + if hints.is_self_adjoint is None: + is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint + else: + is_self_adjoint = hints.is_self_adjoint + + # If A, B are positive definite, then so is A + B. + if hints.is_positive_definite is None: + is_positive_definite = op1.is_positive_definite and op2.is_positive_definite + else: + is_positive_definite = hints.is_positive_definite + + # A positive definite operator is always non-singular. + if is_positive_definite and hints.is_positive_definite is None: + is_non_singular = True + else: + is_non_singular = hints.is_non_singular + + return _Hints( + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite) + + +def _static_check_for_same_dimensions(operators): + """ValueError if operators determined to have different dimensions.""" + if len(operators) < 2: + return + + domain_dimensions = [ + (op.name, tensor_shape.dimension_value(op.domain_dimension)) + for op in operators + if tensor_shape.dimension_value(op.domain_dimension) is not None] + if len(set(value for name, value in domain_dimensions)) > 1: + raise ValueError(f"All `operators` must have the same `domain_dimension`. " + f"Received: {domain_dimensions}.") + + range_dimensions = [ + (op.name, tensor_shape.dimension_value(op.range_dimension)) + for op in operators + if tensor_shape.dimension_value(op.range_dimension) is not None] + if len(set(value for name, value in range_dimensions)) > 1: + raise ValueError(f"All operators must have the same `range_dimension`. " + f"Received: {range_dimensions}.") + + +def _static_check_for_broadcastable_batch_shape(operators): + """ValueError if operators determined to have non-broadcastable shapes.""" + if len(operators) < 2: + return + + # This will fail if they cannot be broadcast together. + batch_shape = operators[0].batch_shape + for op in operators[1:]: + batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape) + + +class _Hints: + """Holds 'is_X' flags that every LinearOperator is initialized with.""" + + def __init__(self, + is_non_singular=None, + is_positive_definite=None, + is_self_adjoint=None): + self.is_non_singular = is_non_singular + self.is_positive_definite = is_positive_definite + self.is_self_adjoint = is_self_adjoint + + +################################################################################ +# Classes to add two linear operators. +################################################################################ + + +class _Adder(metaclass=abc.ABCMeta): + """Abstract base class to add two operators. + + Each `Adder` acts independently, adding everything it can, paying no attention + as to whether another `Adder` could have done the addition more efficiently. + """ + + @property + def name(self): + return self.__class__.__name__ + + @abc.abstractmethod + def can_add(self, op1, op2): + """Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`.""" + pass + + @abc.abstractmethod + def _add(self, op1, op2, operator_name, hints): + # Derived classes can assume op1 and op2 have been validated, e.g. they have + # the same dtype, and their domain/range dimensions match. + pass + + def add(self, op1, op2, operator_name, hints=None): + """Return new `LinearOperator` acting like `op1 + op2`. + + Args: + op1: `LinearOperator` + op2: `LinearOperator`, with `shape` and `dtype` such that adding to + `op1` is allowed. + operator_name: `String` name to give to returned `LinearOperator` + hints: `_Hints` object. Returned `LinearOperator` will be created with + these hints. + + Returns: + `LinearOperator` + """ + updated_hints = _infer_hints_allowing_override(op1, op2, hints) + + if operator_name is None: + operator_name = "Add/" + op1.name + "__" + op2.name + "/" + + scope_name = self.name + if scope_name.startswith("_"): + scope_name = scope_name[1:] + with ops.name_scope(scope_name): + return self._add(op1, op2, operator_name, updated_hints) + + +class _AddAndReturnScaledIdentity(_Adder): + """Handles additions resulting in an Identity family member. + + The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family + is closed under addition. This `Adder` respects that, and returns an Identity + """ + + def can_add(self, op1, op2): + types = {_type(op1), _type(op2)} + return not types.difference(_IDENTITY_FAMILY) + + def _add(self, op1, op2, operator_name, hints): + # Will build a LinearOperatorScaledIdentity. + + if _type(op1) == _SCALED_IDENTITY: + multiplier_1 = op1.multiplier + else: + multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype) + + if _type(op2) == _SCALED_IDENTITY: + multiplier_2 = op2.multiplier + else: + multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype) + + return linear_operator_identity.LinearOperatorScaledIdentity( + num_rows=op1.range_dimension_tensor(), + multiplier=multiplier_1 + multiplier_2, + is_non_singular=hints.is_non_singular, + is_self_adjoint=hints.is_self_adjoint, + is_positive_definite=hints.is_positive_definite, + name=operator_name) + + +class _AddAndReturnDiag(_Adder): + """Handles additions resulting in a Diag operator.""" + + def can_add(self, op1, op2): + types = {_type(op1), _type(op2)} + return not types.difference(_DIAG_LIKE) + + def _add(self, op1, op2, operator_name, hints): + return linear_operator_diag.LinearOperatorDiag( + diag=op1.diag_part() + op2.diag_part(), + is_non_singular=hints.is_non_singular, + is_self_adjoint=hints.is_self_adjoint, + is_positive_definite=hints.is_positive_definite, + name=operator_name) + + +class _AddAndReturnTriL(_Adder): + """Handles additions resulting in a TriL operator.""" + + def can_add(self, op1, op2): + types = {_type(op1), _type(op2)} + return not types.difference(_DIAG_LIKE.union({_TRIL})) + + def _add(self, op1, op2, operator_name, hints): + if _type(op1) in _EFFICIENT_ADD_TO_TENSOR: + op_add_to_tensor, op_other = op1, op2 + else: + op_add_to_tensor, op_other = op2, op1 + + return linear_operator_lower_triangular.LinearOperatorLowerTriangular( + tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()), + is_non_singular=hints.is_non_singular, + is_self_adjoint=hints.is_self_adjoint, + is_positive_definite=hints.is_positive_definite, + name=operator_name) + + +class _AddAndReturnMatrix(_Adder): + """"Handles additions resulting in a `LinearOperatorFullMatrix`.""" + + def can_add(self, op1, op2): # pylint: disable=unused-argument + return isinstance(op1, linear_operator.LinearOperator) and isinstance( + op2, linear_operator.LinearOperator) + + def _add(self, op1, op2, operator_name, hints): + if _type(op1) in _EFFICIENT_ADD_TO_TENSOR: + op_add_to_tensor, op_other = op1, op2 + else: + op_add_to_tensor, op_other = op2, op1 + return linear_operator_full_matrix.LinearOperatorFullMatrix( + matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()), + is_non_singular=hints.is_non_singular, + is_self_adjoint=hints.is_self_adjoint, + is_positive_definite=hints.is_positive_definite, + name=operator_name) + + +################################################################################ +# Constants designating types of LinearOperators +################################################################################ + +# Type name constants for LinearOperator classes. +_IDENTITY = "identity" +_SCALED_IDENTITY = "scaled_identity" +_DIAG = "diag" +_TRIL = "tril" +_MATRIX = "matrix" + +# Groups of operators. +_DIAG_LIKE = {_DIAG, _IDENTITY, _SCALED_IDENTITY} +_IDENTITY_FAMILY = {_IDENTITY, _SCALED_IDENTITY} +# operators with an efficient .add_to_tensor() method. +_EFFICIENT_ADD_TO_TENSOR = _DIAG_LIKE + +# Supported LinearOperator classes. +SUPPORTED_OPERATORS = [ + linear_operator_diag.LinearOperatorDiag, + linear_operator_lower_triangular.LinearOperatorLowerTriangular, + linear_operator_full_matrix.LinearOperatorFullMatrix, + linear_operator_identity.LinearOperatorIdentity, + linear_operator_identity.LinearOperatorScaledIdentity +] + + +def _type(operator): + """Returns the type name constant (e.g. _TRIL) for operator.""" + if isinstance(operator, linear_operator_diag.LinearOperatorDiag): + return _DIAG + if isinstance(operator, + linear_operator_lower_triangular.LinearOperatorLowerTriangular): + return _TRIL + if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix): + return _MATRIX + if isinstance(operator, linear_operator_identity.LinearOperatorIdentity): + return _IDENTITY + if isinstance(operator, + linear_operator_identity.LinearOperatorScaledIdentity): + return _SCALED_IDENTITY + raise TypeError(f"Expected operator to be one of [LinearOperatorDiag, " + f"LinearOperatorLowerTriangular, LinearOperatorFullMatrix, " + f"LinearOperatorIdentity, LinearOperatorScaledIdentity]. " + f"Received: {operator}") + + +################################################################################ +# Addition tiers: +# We attempt to use Adders in tier K before K+1. +# +# Organize tiers to +# (i) reduce O(..) complexity of forming final operator, and +# (ii) produce the "most efficient" final operator. +# Dev notes: +# * Results of addition at tier K will be added at tier K or higher. +# * Tiers may change, and we warn the user that it may change. +################################################################################ + +# Note that the final tier, _AddAndReturnMatrix, will convert everything to a +# dense matrix. So it is sometimes very inefficient. +_DEFAULT_ADDITION_TIERS = [ + [_AddAndReturnScaledIdentity()], + [_AddAndReturnDiag()], + [_AddAndReturnTriL()], + [_AddAndReturnMatrix()], +] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_adjoint.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_adjoint.py new file mode 100644 index 0000000000000000000000000000000000000000..e63f9d7d3af384535b237025f00dac3449f8168f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_adjoint.py @@ -0,0 +1,238 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Takes the adjoint of a `LinearOperator`.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorAdjoint"] + + +@tf_export("linalg.LinearOperatorAdjoint") +@linear_operator.make_composite_tensor +class LinearOperatorAdjoint(linear_operator.LinearOperator): + """`LinearOperator` representing the adjoint of another operator. + + This operator represents the adjoint of another operator. + + ```python + # Create a 2 x 2 linear operator. + operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]]) + operator_adjoint = LinearOperatorAdjoint(operator) + + operator_adjoint.to_dense() + ==> [[1. + i, 0.] + [3., 1 - i]] + + operator_adjoint.shape + ==> [2, 2] + + operator_adjoint.log_abs_determinant() + ==> - log(2) + + x = ... Shape [2, 4] Tensor + operator_adjoint.matmul(x) + ==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True) + ``` + + #### Performance + + The performance of `LinearOperatorAdjoint` depends on the underlying + operators performance. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + operator, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=None): + r"""Initialize a `LinearOperatorAdjoint`. + + `LinearOperatorAdjoint` is initialized with an operator `A`. The `solve` + and `matmul` methods effectively flip the `adjoint` argument. E.g. + + ``` + A = MyLinearOperator(...) + B = LinearOperatorAdjoint(A) + x = [....] # a vector + + assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False) + ``` + + Args: + operator: `LinearOperator` object. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is `operator.name + + "_adjoint"`. + + Raises: + ValueError: If `operator.is_non_singular` is False. + """ + parameters = dict( + operator=operator, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name, + ) + + self._operator = operator + + # The congruency of is_non_singular and is_self_adjoint was checked in the + # base operator. + combine_hint = ( + linear_operator_util.use_operator_or_provided_hint_unless_contradicting) + + is_square = combine_hint( + operator, "is_square", is_square, + "An operator is square if and only if its adjoint is square.") + + is_non_singular = combine_hint( + operator, "is_non_singular", is_non_singular, + "An operator is non-singular if and only if its adjoint is " + "non-singular.") + + is_self_adjoint = combine_hint( + operator, "is_self_adjoint", is_self_adjoint, + "An operator is self-adjoint if and only if its adjoint is " + "self-adjoint.") + + is_positive_definite = combine_hint( + operator, "is_positive_definite", is_positive_definite, + "An operator is positive-definite if and only if its adjoint is " + "positive-definite.") + + # Initialization. + if name is None: + name = operator.name + "_adjoint" + with ops.name_scope(name): + super(LinearOperatorAdjoint, self).__init__( + dtype=operator.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + @property + def operator(self): + """The operator before taking the adjoint.""" + return self._operator + + def _linop_adjoint(self) -> linear_operator.LinearOperator: + return self.operator + + def _assert_non_singular(self): + return self.operator.assert_non_singular() + + def _assert_positive_definite(self): + return self.operator.assert_positive_definite() + + def _assert_self_adjoint(self): + return self.operator.assert_self_adjoint() + + def _shape(self): + # Rotate last dimension + shape = self.operator.shape + return shape[:-2].concatenate([shape[-1], shape[-2]]) + + def _shape_tensor(self): + # Rotate last dimension + shape = self.operator.shape_tensor() + return array_ops.concat([ + shape[:-2], [shape[-1], shape[-2]]], axis=-1) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + return self.operator.matmul( + x, adjoint=(not adjoint), adjoint_arg=adjoint_arg) + + def _matvec(self, x, adjoint=False): + return self.operator.matvec(x, adjoint=(not adjoint)) + + def _determinant(self): + if self.is_self_adjoint: + return self.operator.determinant() + return math_ops.conj(self.operator.determinant()) + + def _log_abs_determinant(self): + return self.operator.log_abs_determinant() + + def _trace(self): + if self.is_self_adjoint: + return self.operator.trace() + return math_ops.conj(self.operator.trace()) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + return self.operator.solve( + rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg) + + def _solvevec(self, rhs, adjoint=False): + return self.operator.solvevec(rhs, adjoint=(not adjoint)) + + def _to_dense(self): + if self.is_self_adjoint: + return self.operator.to_dense() + return linalg.adjoint(self.operator.to_dense()) + + def _add_to_tensor(self, x): + return self.to_dense() + x + + def _eigvals(self): + eigvals = self.operator.eigvals() + if not self.operator.is_self_adjoint: + eigvals = math_ops.conj(eigvals) + return eigvals + + def _cond(self): + return self.operator.cond() + + @property + def _composite_tensor_fields(self): + return ("operator",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"operator": 0} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_diag.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_diag.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf53df3173d3cc81fea7d3d815f471baf641489 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_diag.py @@ -0,0 +1,818 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Create a Block Diagonal operator from one or more `LinearOperators`.""" + +from tensorflow.python.framework import common_shapes +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.linalg import property_hint_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorBlockDiag"] + + +@tf_export("linalg.LinearOperatorBlockDiag") +@linear_operator.make_composite_tensor +class LinearOperatorBlockDiag(linear_operator.LinearOperator): + """Combines one or more `LinearOperators` in to a Block Diagonal matrix. + + This operator combines one or more linear operators `[op1,...,opJ]`, + building a new `LinearOperator`, whose underlying matrix representation + has each operator `opi` on the main diagonal, and zero's elsewhere. + + #### Shape compatibility + + If `opj` acts like a [batch] matrix `Aj`, then `op_combined` acts like + the [batch] matrix formed by having each matrix `Aj` on the main + diagonal. + + Each `opj` is required to represent a matrix, and hence will have + shape `batch_shape_j + [M_j, N_j]`. + + If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the combined operator + has shape `broadcast_batch_shape + [sum M_j, sum N_j]`, where + `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, + `j = 1,...,J`, assuming the intermediate batch shapes broadcast. + + Arguments to `matmul`, `matvec`, `solve`, and `solvevec` may either be single + `Tensor`s or lists of `Tensor`s that are interpreted as blocks. The `j`th + element of a blockwise list of `Tensor`s must have dimensions that match + `opj` for the given method. If a list of blocks is input, then a list of + blocks is returned as well. + + When the `opj` are not guaranteed to be square, this operator's methods might + fail due to the combined operator not being square and/or lack of efficient + methods. + + ```python + # Create a 4 x 4 linear operator combined of two 2 x 2 operators. + operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) + operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) + operator = LinearOperatorBlockDiag([operator_1, operator_2]) + + operator.to_dense() + ==> [[1., 2., 0., 0.], + [3., 4., 0., 0.], + [0., 0., 1., 0.], + [0., 0., 0., 1.]] + + operator.shape + ==> [4, 4] + + operator.log_abs_determinant() + ==> scalar Tensor + + x1 = ... # Shape [2, 2] Tensor + x2 = ... # Shape [2, 2] Tensor + x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor + operator.matmul(x) + ==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)]) + + # Create a 5 x 4 linear operator combining three blocks. + operator_1 = LinearOperatorFullMatrix([[1.], [3.]]) + operator_2 = LinearOperatorFullMatrix([[1., 6.]]) + operator_3 = LinearOperatorFullMatrix([[2.], [7.]]) + operator = LinearOperatorBlockDiag([operator_1, operator_2, operator_3]) + + operator.to_dense() + ==> [[1., 0., 0., 0.], + [3., 0., 0., 0.], + [0., 1., 6., 0.], + [0., 0., 0., 2.]] + [0., 0., 0., 7.]] + + operator.shape + ==> [5, 4] + + + # Create a [2, 3] batch of 4 x 4 linear operators. + matrix_44 = tf.random.normal(shape=[2, 3, 4, 4]) + operator_44 = LinearOperatorFullMatrix(matrix) + + # Create a [1, 3] batch of 5 x 5 linear operators. + matrix_55 = tf.random.normal(shape=[1, 3, 5, 5]) + operator_55 = LinearOperatorFullMatrix(matrix_55) + + # Combine to create a [2, 3] batch of 9 x 9 operators. + operator_99 = LinearOperatorBlockDiag([operator_44, operator_55]) + + # Create a shape [2, 3, 9] vector. + x = tf.random.normal(shape=[2, 3, 9]) + operator_99.matmul(x) + ==> Shape [2, 3, 9] Tensor + + # Create a blockwise list of vectors. + x = [tf.random.normal(shape=[2, 3, 4]), tf.random.normal(shape=[2, 3, 5])] + operator_99.matmul(x) + ==> [Shape [2, 3, 4] Tensor, Shape [2, 3, 5] Tensor] + ``` + + #### Performance + + The performance of `LinearOperatorBlockDiag` on any operation is equal to + the sum of the individual operators' operations. + + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + operators, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=True, + name=None): + r"""Initialize a `LinearOperatorBlockDiag`. + + `LinearOperatorBlockDiag` is initialized with a list of operators + `[op_1,...,op_J]`. + + Args: + operators: Iterable of `LinearOperator` objects, each with + the same `dtype` and composable shape. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + This is true by default, and will raise a `ValueError` otherwise. + name: A name for this `LinearOperator`. Default is the individual + operators names joined with `_o_`. + + Raises: + TypeError: If all operators do not have the same `dtype`. + ValueError: If `operators` is empty or are non-square. + """ + parameters = dict( + operators=operators, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + # Validate operators. + check_ops.assert_proper_iterable(operators) + operators = list(operators) + if not operators: + raise ValueError( + "Expected a non-empty list of operators. Found: %s" % operators) + self._operators = operators + + # Define diagonal operators, for functions that are shared across blockwise + # `LinearOperator` types. + self._diagonal_operators = operators + + # Validate dtype. + dtype = operators[0].dtype + for operator in operators: + if operator.dtype != dtype: + name_type = (str((o.name, o.dtype)) for o in operators) + raise TypeError( + "Expected all operators to have the same dtype. Found %s" + % " ".join(name_type)) + + # Auto-set and check hints. + if all(operator.is_non_singular for operator in operators): + if is_non_singular is False: + raise ValueError( + "The direct sum of non-singular operators is always non-singular.") + is_non_singular = True + + if all(operator.is_self_adjoint for operator in operators): + if is_self_adjoint is False: + raise ValueError( + "The direct sum of self-adjoint operators is always self-adjoint.") + is_self_adjoint = True + + if all(operator.is_positive_definite for operator in operators): + if is_positive_definite is False: + raise ValueError( + "The direct sum of positive definite operators is always " + "positive definite.") + is_positive_definite = True + + if name is None: + # Using ds to mean direct sum. + name = "_ds_".join(operator.name for operator in operators) + with ops.name_scope(name): + super(LinearOperatorBlockDiag, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + @property + def operators(self): + return self._operators + + def _block_range_dimensions(self): + return [op.range_dimension for op in self._diagonal_operators] + + def _block_domain_dimensions(self): + return [op.domain_dimension for op in self._diagonal_operators] + + def _block_range_dimension_tensors(self): + return [op.range_dimension_tensor() for op in self._diagonal_operators] + + def _block_domain_dimension_tensors(self): + return [op.domain_dimension_tensor() for op in self._diagonal_operators] + + def _shape(self): + # Get final matrix shape. + domain_dimension = sum(self._block_domain_dimensions()) + range_dimension = sum(self._block_range_dimensions()) + matrix_shape = tensor_shape.TensorShape([range_dimension, domain_dimension]) + + # Get broadcast batch shape. + # broadcast_shape checks for compatibility. + batch_shape = self.operators[0].batch_shape + for operator in self.operators[1:]: + batch_shape = common_shapes.broadcast_shape( + batch_shape, operator.batch_shape) + + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + # Avoid messy broadcasting if possible. + if self.shape.is_fully_defined(): + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.shape.as_list(), dtype=dtypes.int32, name="shape" + ) + + domain_dimension = sum(self._block_domain_dimension_tensors()) + range_dimension = sum(self._block_range_dimension_tensors()) + matrix_shape = array_ops_stack.stack([range_dimension, domain_dimension]) + + # Dummy Tensor of zeros. Will never be materialized. + zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) + for operator in self.operators[1:]: + zeros += array_ops.zeros(shape=operator.batch_shape_tensor()) + batch_shape = array_ops.shape(zeros) + + return array_ops.concat((batch_shape, matrix_shape), 0) + + def _linop_adjoint(self) -> "LinearOperatorBlockDiag": + # We take the adjoint of each block on the diagonal. + return LinearOperatorBlockDiag( + operators=[operator.adjoint() for operator in self.operators], + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_cholesky(self) -> "LinearOperatorBlockDiag": + # We take the cholesky of each block on the diagonal. + return LinearOperatorBlockDiag( + operators=[operator.cholesky() for operator in self.operators], + is_non_singular=True, + is_self_adjoint=None, # Let the operators passed in decide. + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorBlockDiag": + # We take the inverse of each block on the diagonal. + return LinearOperatorBlockDiag( + operators=[ + operator.inverse() for operator in self.operators], + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_matmul( + self, + left_operator: "LinearOperatorBlockDiag", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if isinstance(right_operator, LinearOperatorBlockDiag): + return LinearOperatorBlockDiag( + operators=[ + o1.matmul(o2) for o1, o2 in zip( + left_operator.operators, right_operator.operators)], + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + # In general, a product of self-adjoint positive-definite + # block diagonal matrices is not self-adjoint. + is_self_adjoint=None, + # In general, a product of positive-definite block diagonal + # matrices is not positive-definite. + is_positive_definite=None, + is_square=True) + return super()._linop_matmul(left_operator, right_operator) + + def _linop_solve( + self, + left_operator: "LinearOperatorBlockDiag", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if isinstance(right_operator, LinearOperatorBlockDiag): + return LinearOperatorBlockDiag( + operators=[ + o1.solve(o2) for o1, o2 in zip( + left_operator.operators, right_operator.operators)], + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + # In general, a solve of self-adjoint positive-definite block diagonal + # matrices is not self-=adjoint. + is_self_adjoint=None, + # In general, a solve of positive-definite block diagonal matrices is + # not positive-definite. + is_positive_definite=None, + is_square=True) + return super()._linop_solve(left_operator, right_operator) + + # TODO(b/188080761): Add a more efficient implementation of `cond` that + # constructs the condition number from the blockwise singular values. + + def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"): + """Transform [batch] matrix `x` with left multiplication: `x --> Ax`. + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + X = ... # shape [..., N, R], batch matrix, R > 0. + + Y = operator.matmul(X) + Y.shape + ==> [..., M, R] + + Y[..., :, r] = sum_j A[..., :, j] X[j, r] + ``` + + Args: + x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as + `self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See + class docstring for definition of shape compatibility. + adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. + adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is + the hermitian transpose (transposition and complex conjugation). + name: A name for this `Op`. + + Returns: + A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype` + as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that + concatenate to `[..., M, R]`. + """ + def _check_operators_agree(r, l, message): + if (r.range_dimension is not None and + l.domain_dimension is not None and + r.range_dimension != l.domain_dimension): + raise ValueError(message) + + if isinstance(x, linear_operator.LinearOperator): + left_operator = self.adjoint() if adjoint else self + right_operator = x.adjoint() if adjoint_arg else x + + _check_operators_agree( + right_operator, left_operator, + "Operators are incompatible. Expected `x` to have dimension" + " {} but got {}.".format( + left_operator.domain_dimension, right_operator.range_dimension)) + + # We can efficiently multiply BlockDiag LinearOperators if the number of + # blocks agree. + if isinstance(x, LinearOperatorBlockDiag): + if len(left_operator.operators) != len(right_operator.operators): + raise ValueError( + "Can not efficiently multiply two `LinearOperatorBlockDiag`s " + "together when number of blocks differ.") + + for o1, o2 in zip(left_operator.operators, right_operator.operators): + _check_operators_agree( + o2, o1, + "Blocks are incompatible. Expected `x` to have dimension" + " {} but got {}.".format( + o1.domain_dimension, o2.range_dimension)) + + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_matmul(left_operator, right_operator) + + with self._name_scope(name): # pylint: disable=not-callable + arg_dim = -1 if adjoint_arg else -2 + block_dimensions = (self._block_range_dimensions() if adjoint + else self._block_domain_dimensions()) + if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim): + for i, block in enumerate(x): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim]) + x[i] = block + else: + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + op_dimension = (self.range_dimension if adjoint + else self.domain_dimension) + op_dimension.assert_is_compatible_with(x.shape[arg_dim]) + return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + arg_dim = -1 if adjoint_arg else -2 + block_dimensions = (self._block_range_dimensions() if adjoint + else self._block_domain_dimensions()) + block_dimensions_fn = ( + self._block_range_dimension_tensors if adjoint + else self._block_domain_dimension_tensors) + blockwise_arg = linear_operator_util.arg_is_blockwise( + block_dimensions, x, arg_dim) + if blockwise_arg: + split_x = x + + else: + split_dim = -1 if adjoint_arg else -2 + # Split input by rows normally, and otherwise columns. + split_x = linear_operator_util.split_arg_into_blocks( + block_dimensions, block_dimensions_fn, x, axis=split_dim) + + result_list = [] + for index, operator in enumerate(self.operators): + result_list += [operator.matmul( + split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)] + + if blockwise_arg: + return result_list + + result_list = linear_operator_util.broadcast_matrix_batch_dims( + result_list) + return array_ops.concat(result_list, axis=-2) + + def matvec(self, x, adjoint=False, name="matvec"): + """Transform [batch] vector `x` with left multiplication: `x --> Ax`. + + ```python + # Make an operator acting like batch matric A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + + X = ... # shape [..., N], batch vector + + Y = operator.matvec(X) + Y.shape + ==> [..., M] + + Y[..., :] = sum_j A[..., :, j] X[..., j] + ``` + + Args: + x: `Tensor` with compatible shape and same `dtype` as `self`, or an + iterable of `Tensor`s (for blockwise operators). `Tensor`s are treated + a [batch] vectors, meaning for every set of leading dimensions, the last + dimension defines a vector. + See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. + name: A name for this `Op`. + + Returns: + A `Tensor` with shape `[..., M]` and same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + block_dimensions = (self._block_range_dimensions() if adjoint + else self._block_domain_dimensions()) + if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1): + for i, block in enumerate(x): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[-1]) + x[i] = block + x_mat = [block[..., array_ops.newaxis] for block in x] + y_mat = self.matmul(x_mat, adjoint=adjoint) + return [array_ops.squeeze(y, axis=-1) for y in y_mat] + + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + op_dimension = (self.range_dimension if adjoint + else self.domain_dimension) + op_dimension.assert_is_compatible_with(x.shape[-1]) + x_mat = x[..., array_ops.newaxis] + y_mat = self.matmul(x_mat, adjoint=adjoint) + return array_ops.squeeze(y_mat, axis=-1) + + def _determinant(self): + result = self.operators[0].determinant() + for operator in self.operators[1:]: + result *= operator.determinant() + return result + + def _log_abs_determinant(self): + result = self.operators[0].log_abs_determinant() + for operator in self.operators[1:]: + result += operator.log_abs_determinant() + return result + + def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"): + """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`. + + The returned `Tensor` will be close to an exact solution if `A` is well + conditioned. Otherwise closeness will vary. See class docstring for details. + + Examples: + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + # Solve R > 0 linear systems for every member of the batch. + RHS = ... # shape [..., M, R] + + X = operator.solve(RHS) + # X[..., :, r] is the solution to the r'th linear system + # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r] + + operator.matmul(X) + ==> RHS + ``` + + Args: + rhs: `Tensor` with same `dtype` as this operator and compatible shape, + or a list of `Tensor`s (for blockwise operators). `Tensor`s are treated + like a [batch] matrices meaning for every set of leading dimensions, the + last two dimensions defines a matrix. + See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, solve the system involving the adjoint + of this `LinearOperator`: `A^H X = rhs`. + adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H` + is the hermitian transpose (transposition and complex conjugation). + name: A name scope to use for ops added by this method. + + Returns: + `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`. + + Raises: + NotImplementedError: If `self.is_non_singular` or `is_square` is False. + """ + if self.is_non_singular is False: + raise NotImplementedError( + "Exact solve not implemented for an operator that is expected to " + "be singular.") + if self.is_square is False: + raise NotImplementedError( + "Exact solve not implemented for an operator that is expected to " + "not be square.") + + def _check_operators_agree(r, l, message): + if (r.range_dimension is not None and + l.domain_dimension is not None and + r.range_dimension != l.domain_dimension): + raise ValueError(message) + + if isinstance(rhs, linear_operator.LinearOperator): + left_operator = self.adjoint() if adjoint else self + right_operator = rhs.adjoint() if adjoint_arg else rhs + + _check_operators_agree( + right_operator, left_operator, + "Operators are incompatible. Expected `x` to have dimension" + " {} but got {}.".format( + left_operator.domain_dimension, right_operator.range_dimension)) + + # We can efficiently solve BlockDiag LinearOperators if the number of + # blocks agree. + if isinstance(right_operator, LinearOperatorBlockDiag): + if len(left_operator.operators) != len(right_operator.operators): + raise ValueError( + "Can not efficiently solve `LinearOperatorBlockDiag` when " + "number of blocks differ.") + + for o1, o2 in zip(left_operator.operators, right_operator.operators): + _check_operators_agree( + o2, o1, + "Blocks are incompatible. Expected `x` to have dimension" + " {} but got {}.".format( + o1.domain_dimension, o2.range_dimension)) + + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_solve(left_operator, right_operator) + + with self._name_scope(name): # pylint: disable=not-callable + block_dimensions = (self._block_domain_dimensions() if adjoint + else self._block_range_dimensions()) + arg_dim = -1 if adjoint_arg else -2 + blockwise_arg = linear_operator_util.arg_is_blockwise( + block_dimensions, rhs, arg_dim) + + if blockwise_arg: + split_rhs = rhs + for i, block in enumerate(split_rhs): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim]) + split_rhs[i] = block + else: + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs" + ) + self._check_input_dtype(rhs) + op_dimension = (self.domain_dimension if adjoint + else self.range_dimension) + op_dimension.assert_is_compatible_with(rhs.shape[arg_dim]) + split_dim = -1 if adjoint_arg else -2 + # Split input by rows normally, and otherwise columns. + split_rhs = linear_operator_util.split_arg_into_blocks( + self._block_domain_dimensions(), + self._block_domain_dimension_tensors, + rhs, axis=split_dim) + + solution_list = [] + for index, operator in enumerate(self.operators): + solution_list += [operator.solve( + split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)] + + if blockwise_arg: + return solution_list + + solution_list = linear_operator_util.broadcast_matrix_batch_dims( + solution_list) + return array_ops.concat(solution_list, axis=-2) + + def solvevec(self, rhs, adjoint=False, name="solve"): + """Solve single equation with best effort: `A X = rhs`. + + The returned `Tensor` will be close to an exact solution if `A` is well + conditioned. Otherwise closeness will vary. See class docstring for details. + + Examples: + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + # Solve one linear system for every member of the batch. + RHS = ... # shape [..., M] + + X = operator.solvevec(RHS) + # X is the solution to the linear system + # sum_j A[..., :, j] X[..., j] = RHS[..., :] + + operator.matvec(X) + ==> RHS + ``` + + Args: + rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s + (for blockwise operators). `Tensor`s are treated as [batch] vectors, + meaning for every set of leading dimensions, the last dimension defines + a vector. See class docstring for definition of compatibility regarding + batch dimensions. + adjoint: Python `bool`. If `True`, solve the system involving the adjoint + of this `LinearOperator`: `A^H X = rhs`. + name: A name scope to use for ops added by this method. + + Returns: + `Tensor` with shape `[...,N]` and same `dtype` as `rhs`. + + Raises: + NotImplementedError: If `self.is_non_singular` or `is_square` is False. + """ + with self._name_scope(name): # pylint: disable=not-callable + block_dimensions = (self._block_domain_dimensions() if adjoint + else self._block_range_dimensions()) + if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1): + for i, block in enumerate(rhs): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[-1]) + rhs[i] = block + rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs] + solution_mat = self.solve(rhs_mat, adjoint=adjoint) + return [array_ops.squeeze(x, axis=-1) for x in solution_mat] + + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs" + ) + self._check_input_dtype(rhs) + op_dimension = (self.domain_dimension if adjoint + else self.range_dimension) + op_dimension.assert_is_compatible_with(rhs.shape[-1]) + rhs_mat = array_ops.expand_dims(rhs, axis=-1) + solution_mat = self.solve(rhs_mat, adjoint=adjoint) + return array_ops.squeeze(solution_mat, axis=-1) + + def _diag_part(self): + if not all(operator.is_square for operator in self.operators): + raise NotImplementedError( + "`diag_part` not implemented for an operator whose blocks are not " + "square.") + diag_list = [] + for operator in self.operators: + # Extend the axis for broadcasting. + diag_list += [operator.diag_part()[..., array_ops.newaxis]] + diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list) + diagonal = array_ops.concat(diag_list, axis=-2) + return array_ops.squeeze(diagonal, axis=-1) + + def _trace(self): + if not all(operator.is_square for operator in self.operators): + raise NotImplementedError( + "`trace` not implemented for an operator whose blocks are not " + "square.") + result = self.operators[0].trace() + for operator in self.operators[1:]: + result += operator.trace() + return result + + def _to_dense(self): + num_cols = 0 + rows = [] + broadcasted_blocks = [operator.to_dense() for operator in self.operators] + broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims( + broadcasted_blocks) + for block in broadcasted_blocks: + batch_row_shape = array_ops.shape(block)[:-1] + + zeros_to_pad_before_shape = array_ops.concat( + [batch_row_shape, [num_cols]], axis=-1) + zeros_to_pad_before = array_ops.zeros( + shape=zeros_to_pad_before_shape, dtype=block.dtype) + num_cols += array_ops.shape(block)[-1] + zeros_to_pad_after_shape = array_ops.concat( + [batch_row_shape, + [self.domain_dimension_tensor() - num_cols]], axis=-1) + zeros_to_pad_after = array_ops.zeros( + shape=zeros_to_pad_after_shape, dtype=block.dtype) + + rows.append(array_ops.concat( + [zeros_to_pad_before, block, zeros_to_pad_after], axis=-1)) + + mat = array_ops.concat(rows, axis=-2) + mat.set_shape(self.shape) + return mat + + def _assert_non_singular(self): + return control_flow_ops.group([ + operator.assert_non_singular() for operator in self.operators]) + + def _assert_self_adjoint(self): + return control_flow_ops.group([ + operator.assert_self_adjoint() for operator in self.operators]) + + def _assert_positive_definite(self): + return control_flow_ops.group([ + operator.assert_positive_definite() for operator in self.operators]) + + def _eigvals(self): + if not all(operator.is_square for operator in self.operators): + raise NotImplementedError( + "`eigvals` not implemented for an operator whose blocks are not " + "square.") + eig_list = [] + for operator in self.operators: + # Extend the axis for broadcasting. + eig_list += [operator.eigvals()[..., array_ops.newaxis]] + eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list) + eigs = array_ops.concat(eig_list, axis=-2) + return array_ops.squeeze(eigs, axis=-1) + + @property + def _composite_tensor_fields(self): + return ("operators",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"operators": [0] * len(self.operators)} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py new file mode 100644 index 0000000000000000000000000000000000000000..bd9caf67d3f5dd3c1e5b6c91f92d1e9ce575ccfb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py @@ -0,0 +1,986 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Create a blockwise lower-triangular operator from `LinearOperators`.""" + +from tensorflow.python.framework import common_shapes +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_addition +from tensorflow.python.ops.linalg import linear_operator_full_matrix +from tensorflow.python.ops.linalg import linear_operator_identity +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorBlockLowerTriangular"] + + +@tf_export("linalg.LinearOperatorBlockLowerTriangular") +@linear_operator.make_composite_tensor +class LinearOperatorBlockLowerTriangular(linear_operator.LinearOperator): + """Combines `LinearOperators` into a blockwise lower-triangular matrix. + + This operator is initialized with a nested list of linear operators, which + are combined into a new `LinearOperator` whose underlying matrix + representation is square and has each operator on or below the main diagonal, + and zero's elsewhere. Each element of the outer list is a list of + `LinearOperators` corresponding to a row-partition of the blockwise structure. + The number of `LinearOperator`s in row-partion `i` must be equal to `i`. + + For example, a blockwise `3 x 3` `LinearOperatorBlockLowerTriangular` is + initialized with the list `[[op_00], [op_10, op_11], [op_20, op_21, op_22]]`, + where the `op_ij`, `i < 3, j <= i`, are `LinearOperator` instances. The + `LinearOperatorBlockLowerTriangular` behaves as the following blockwise + matrix, where `0` represents appropriately-sized [batch] matrices of zeros: + + ```none + [[op_00, 0, 0], + [op_10, op_11, 0], + [op_20, op_21, op_22]] + ``` + + Each `op_jj` on the diagonal is required to represent a square matrix, and + hence will have shape `batch_shape_j + [M_j, M_j]`. `LinearOperator`s in row + `j` of the blockwise structure must have `range_dimension` equal to that of + `op_jj`, and `LinearOperators` in column `j` must have `domain_dimension` + equal to that of `op_jj`. + + If each `op_jj` on the diagonal has shape `batch_shape_j + [M_j, M_j]`, then + the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, + where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, + `j = 0, 1, ..., J`, assuming the intermediate batch shapes broadcast. + Even if the combined shape is well defined, the combined operator's + methods may fail due to lack of broadcasting ability in the defining + operators' methods. + + For example, to create a 4 x 4 linear operator combined of three 2 x 2 + operators: + >>> operator_0 = tf.linalg.LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) + >>> operator_1 = tf.linalg.LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) + >>> operator_2 = tf.linalg.LinearOperatorLowerTriangular([[5., 6.], [7., 8]]) + >>> operator = LinearOperatorBlockLowerTriangular( + ... [[operator_0], [operator_1, operator_2]]) + + >>> operator.to_dense() + + + >>> operator.shape + TensorShape([4, 4]) + + >>> operator.log_abs_determinant() + + + >>> x0 = [[1., 6.], [-3., 4.]] + >>> x1 = [[0., 2.], [4., 0.]] + >>> x = tf.concat([x0, x1], 0) # Shape [2, 4] Tensor + >>> operator.matmul(x) + + + The above `matmul` is equivalent to: + >>> tf.concat([operator_0.matmul(x0), + ... operator_1.matmul(x0) + operator_2.matmul(x1)], axis=0) + + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [M, N], with b >= 0 + x.shape = [B1,...,Bb] + [N, R], with R >= 0. + ``` + + For example: + + Create a [2, 3] batch of 4 x 4 linear operators: + >>> matrix_44 = tf.random.normal(shape=[2, 3, 4, 4]) + >>> operator_44 = tf.linalg.LinearOperatorFullMatrix(matrix_44) + + Create a [1, 3] batch of 5 x 4 linear operators: + >>> matrix_54 = tf.random.normal(shape=[1, 3, 5, 4]) + >>> operator_54 = tf.linalg.LinearOperatorFullMatrix(matrix_54) + + Create a [1, 3] batch of 5 x 5 linear operators: + >>> matrix_55 = tf.random.normal(shape=[1, 3, 5, 5]) + >>> operator_55 = tf.linalg.LinearOperatorFullMatrix(matrix_55) + + Combine to create a [2, 3] batch of 9 x 9 operators: + >>> operator_99 = LinearOperatorBlockLowerTriangular( + ... [[operator_44], [operator_54, operator_55]]) + >>> operator_99.shape + TensorShape([2, 3, 9, 9]) + + Create a shape [2, 1, 9] batch of vectors and apply the operator to it. + >>> x = tf.random.normal(shape=[2, 1, 9]) + >>> y = operator_99.matvec(x) + >>> y.shape + TensorShape([2, 3, 9]) + + Create a blockwise list of vectors and apply the operator to it. A blockwise + list is returned. + >>> x4 = tf.random.normal(shape=[2, 1, 4]) + >>> x5 = tf.random.normal(shape=[2, 3, 5]) + >>> y_blockwise = operator_99.matvec([x4, x5]) + >>> y_blockwise[0].shape + TensorShape([2, 3, 4]) + >>> y_blockwise[1].shape + TensorShape([2, 3, 5]) + + #### Performance + + Suppose `operator` is a `LinearOperatorBlockLowerTriangular` consisting of `D` + row-partitions and `D` column-partitions, such that the total number of + operators is `N = D * (D + 1) // 2`. + + * `operator.matmul` has complexity equal to the sum of the `matmul` + complexities of the individual operators. + * `operator.solve` has complexity equal to the sum of the `solve` complexities + of the operators on the diagonal and the `matmul` complexities of the + operators off the diagonal. + * `operator.determinant` has complexity equal to the sum of the `determinant` + complexities of the operators on the diagonal. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + operators, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorBlockLowerTriangular"): + r"""Initialize a `LinearOperatorBlockLowerTriangular`. + + `LinearOperatorBlockLowerTriangular` is initialized with a list of lists of + operators `[[op_0], [op_1, op_2], [op_3, op_4, op_5],...]`. + + Args: + operators: Iterable of iterables of `LinearOperator` objects, each with + the same `dtype`. Each element of `operators` corresponds to a row- + partition, in top-to-bottom order. The operators in each row-partition + are filled in left-to-right. For example, + `operators = [[op_0], [op_1, op_2], [op_3, op_4, op_5]]` creates a + `LinearOperatorBlockLowerTriangular` with full block structure + `[[op_0, 0, 0], [op_1, op_2, 0], [op_3, op_4, op_5]]`. The number of + operators in the `i`th row must be equal to `i`, such that each operator + falls on or below the diagonal of the blockwise structure. + `LinearOperator`s that fall on the diagonal (the last elements of each + row) must be square. The other `LinearOperator`s must have domain + dimension equal to the domain dimension of the `LinearOperator`s in the + same column-partition, and range dimension equal to the range dimension + of the `LinearOperator`s in the same row-partition. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + This will raise a `ValueError` if set to `False`. + name: A name for this `LinearOperator`. + + Raises: + TypeError: If all operators do not have the same `dtype`. + ValueError: If `operators` is empty, contains an erroneous number of + elements, or contains operators with incompatible shapes. + """ + parameters = dict( + operators=operators, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + # Validate operators. + check_ops.assert_proper_iterable(operators) + for row in operators: + check_ops.assert_proper_iterable(row) + operators = [list(row) for row in operators] + + if not operators: + raise ValueError(f"Argument `operators` must be a list of >=1 operators. " + f"Received: {operators}.") + self._operators = operators + self._diagonal_operators = [row[-1] for row in operators] + + dtype = operators[0][0].dtype + self._validate_dtype(dtype) + is_non_singular = self._validate_non_singular(is_non_singular) + self._validate_num_operators() + self._validate_operator_dimensions() + is_square = self._validate_square(is_square) + with ops.name_scope(name): + super(LinearOperatorBlockLowerTriangular, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _validate_num_operators(self): + for i, row in enumerate(self.operators): + if len(row) != i + 1: + raise ValueError( + f"Argument `operators[{i}]` must contain `{i + 1}` blocks. " + f"Received: {len(row)} blocks.") + + def _validate_operator_dimensions(self): + """Check that `operators` have compatible dimensions.""" + for i in range(1, len(self.operators)): + for j in range(i): + op = self.operators[i][j] + + # `above_op` is the operator directly above `op` in the blockwise + # structure, in row partition `i-1`, column partition `j`. `op` should + # have the same `domain_dimension` as `above_op`. + above_op = self.operators[i - 1][j] + + # `right_op` is the operator to the right of `op` in the blockwise + # structure, in row partition `i`, column partition `j+1`. `op` should + # have the same `range_dimension` as `right_op`. + right_op = self.operators[i][j + 1] + + if (op.domain_dimension is not None and + above_op.domain_dimension is not None): + if op.domain_dimension != above_op.domain_dimension: + raise ValueError(f"Argument `operators[{i}][{j}].domain_dimension` " + f"({op.domain_dimension}) must be the same as " + f"`operators[{i-1}][{j}].domain_dimension` " + f"({above_op.domain_dimension}).") + if (op.range_dimension is not None and + right_op.range_dimension is not None): + if op.range_dimension != right_op.range_dimension: + raise ValueError(f"Argument `operators[{i}][{j}].range_dimension` " + f"({op.range_dimension}) must be the same as " + f"`operators[{i}][{j + 1}].range_dimension` " + f"({right_op.range_dimension}).") + + # pylint: disable=g-bool-id-comparison + def _validate_non_singular(self, is_non_singular): + if all(op.is_non_singular for op in self._diagonal_operators): + if is_non_singular is False: + raise ValueError( + f"A blockwise lower-triangular operator with non-singular " + f"operators on the main diagonal is always non-singular. " + f"Expected argument `is_non_singular` to be True. " + f"Received: {is_non_singular}.") + return True + if any(op.is_non_singular is False for op in self._diagonal_operators): + if is_non_singular is True: + raise ValueError( + f"A blockwise lower-triangular operator with a singular operator " + f"on the main diagonal is always singular. Expected argument " + f"`is_non_singular` to be True. Received: {is_non_singular}.") + return False + + def _validate_square(self, is_square): + if is_square is False: + raise ValueError(f"`LinearOperatorBlockLowerTriangular` must be square. " + f"Expected argument `is_square` to be True. " + f"Received: {is_square}.") + for i, op in enumerate(self._diagonal_operators): + if op.is_square is False: + raise ValueError( + f"Matrices on the diagonal (the final elements of each " + f"row-partition in the `operators` list) must be square. Expected " + f"argument `operators[{i}][-1].is_square` to be True. " + f"Received: {op.is_square}.") + return True + # pylint: enable=g-bool-id-comparison + + def _validate_dtype(self, dtype): + for i, row in enumerate(self.operators): + for operator in row: + if operator.dtype != dtype: + name_type = (str((o.name, o.dtype)) for o in row) + raise TypeError( + "Expected all operators to have the same dtype. Found {} in row " + "{} and {} in row 0.".format(name_type, i, str(dtype))) + + @property + def operators(self): + return self._operators + + def _block_range_dimensions(self): + return [op.range_dimension for op in self._diagonal_operators] + + def _block_domain_dimensions(self): + return [op.domain_dimension for op in self._diagonal_operators] + + def _block_range_dimension_tensors(self): + return [op.range_dimension_tensor() for op in self._diagonal_operators] + + def _block_domain_dimension_tensors(self): + return [op.domain_dimension_tensor() for op in self._diagonal_operators] + + def _shape(self): + # Get final matrix shape. + domain_dimension = sum(self._block_domain_dimensions()) + range_dimension = sum(self._block_range_dimensions()) + matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension]) + + # Get broadcast batch shape. + # broadcast_shape checks for compatibility. + batch_shape = self.operators[0][0].batch_shape + for row in self.operators[1:]: + for operator in row: + batch_shape = common_shapes.broadcast_shape( + batch_shape, operator.batch_shape) + + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + # Avoid messy broadcasting if possible. + if self.shape.is_fully_defined(): + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.shape.as_list(), dtype=dtypes.int32, name="shape" + ) + + domain_dimension = sum(self._block_domain_dimension_tensors()) + range_dimension = sum(self._block_range_dimension_tensors()) + matrix_shape = array_ops_stack.stack([domain_dimension, range_dimension]) + + batch_shape = self.operators[0][0].batch_shape_tensor() + for row in self.operators[1:]: + for operator in row: + batch_shape = array_ops.broadcast_dynamic_shape( + batch_shape, operator.batch_shape_tensor()) + + return array_ops.concat((batch_shape, matrix_shape), 0) + + def _linop_inverse(self) -> "LinearOperatorBlockLowerTriangular": + """Inverse of LinearOperatorBlockLowerTriangular. + + We recursively apply the identity: + + ```none + |A 0|' = | A' 0| + |B C| |-C'BA' C'| + ``` + + where `A` is n-by-n, `B` is m-by-n, + `C` is m-by-m, and `'` denotes inverse. + + This identity can be verified through multiplication: + + ```none + |A 0|| A' 0| + |B C||-C'BA' C'| + + = | AA' 0| + |BA'-CC'BA' CC'| + + = |I 0| + |0 I| + ``` + Returns: + A 'LinearOperatorBlockLowerTriangular'. + """ + if len(self.operators) == 1: + return (LinearOperatorBlockLowerTriangular( + [[self.operators[0][0].inverse()]], + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=(self. + is_positive_definite), + is_square=True)) + + blockwise_dim = len(self.operators) + + # Calculate the inverse of the `LinearOperatorBlockLowerTriangular` + # representing all but the last row of `self` with + # a recursive call (the matrix `A'` in the docstring definition). + upper_left_inverse = ( + LinearOperatorBlockLowerTriangular(self.operators[:-1]).inverse()) + + bottom_row = self.operators[-1] + bottom_right_inverse = bottom_row[-1].inverse() + + # Find the bottom row of the inverse (equal to `[-C'BA', C']` + # in the docstring definition, where `C` is the bottom-right operator of + # `self` and `B` is the set of operators in the + # bottom row excluding `C`). To find `-C'BA'`, we first iterate over the + # column partitions of `A'`. + inverse_bottom_row = [] + for i in range(blockwise_dim - 1): + # Find the `i`-th block of `BA'`. + blocks = [] + for j in range(i, blockwise_dim - 1): + result = bottom_row[j].matmul(upper_left_inverse.operators[j][i]) + if not any( + isinstance(result, op_type) + for op_type in linear_operator_addition.SUPPORTED_OPERATORS + ): + result = linear_operator_full_matrix.LinearOperatorFullMatrix( + result.to_dense()) + blocks.append(result) + + summed_blocks = linear_operator_addition.add_operators(blocks) + assert len(summed_blocks) == 1 + block = summed_blocks[0] + + # Find the `i`-th block of `-C'BA'`. + block = bottom_right_inverse.matmul(block) + block = linear_operator_identity.LinearOperatorScaledIdentity( + num_rows=bottom_right_inverse.domain_dimension_tensor(), + multiplier=math_ops.cast(-1, dtype=block.dtype)).matmul(block) + inverse_bottom_row.append(block) + + # `C'` is the last block of the inverted linear operator. + inverse_bottom_row.append(bottom_right_inverse) + + return (LinearOperatorBlockLowerTriangular( + upper_left_inverse.operators + [inverse_bottom_row], + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=(self.is_positive_definite), + is_square=True)) + + def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"): + """Transform [batch] matrix `x` with left multiplication: `x --> Ax`. + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + X = ... # shape [..., N, R], batch matrix, R > 0. + + Y = operator.matmul(X) + Y.shape + ==> [..., M, R] + + Y[..., :, r] = sum_j A[..., :, j] X[j, r] + ``` + + Args: + x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as + `self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See + class docstring for definition of shape compatibility. + adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. + adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is + the hermitian transpose (transposition and complex conjugation). + name: A name for this `Op`. + + Returns: + A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype` + as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that + concatenate to `[..., M, R]`. + """ + if isinstance(x, linear_operator.LinearOperator): + left_operator = self.adjoint() if adjoint else self + right_operator = x.adjoint() if adjoint_arg else x + + if (right_operator.range_dimension is not None and + left_operator.domain_dimension is not None and + right_operator.range_dimension != left_operator.domain_dimension): + raise ValueError( + "Operators are incompatible. Expected `x` to have dimension" + " {} but got {}.".format( + left_operator.domain_dimension, right_operator.range_dimension)) + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_matmul(left_operator, right_operator) + + with self._name_scope(name): # pylint: disable=not-callable + arg_dim = -1 if adjoint_arg else -2 + block_dimensions = (self._block_range_dimensions() if adjoint + else self._block_domain_dimensions()) + if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim): + for i, block in enumerate(x): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim]) + x[i] = block + else: + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + op_dimension = (self.range_dimension if adjoint + else self.domain_dimension) + op_dimension.assert_is_compatible_with(x.shape[arg_dim]) + return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + arg_dim = -1 if adjoint_arg else -2 + block_dimensions = (self._block_range_dimensions() if adjoint + else self._block_domain_dimensions()) + blockwise_arg = linear_operator_util.arg_is_blockwise( + block_dimensions, x, arg_dim) + if blockwise_arg: + split_x = x + else: + split_dim = -1 if adjoint_arg else -2 + # Split input by columns if adjoint_arg is True, else rows + split_x = linear_operator_util.split_arg_into_blocks( + self._block_domain_dimensions(), + self._block_domain_dimension_tensors, + x, axis=split_dim) + + result_list = [] + # Iterate over row-partitions (i.e. column-partitions of the adjoint). + if adjoint: + for index in range(len(self.operators)): + # Begin with the operator on the diagonal and apply it to the + # respective `rhs` block. + result = self.operators[index][index].matmul( + split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg) + + # Iterate top to bottom over the operators in the remainder of the + # column-partition (i.e. left to right over the row-partition of the + # adjoint), apply the operator to the respective `rhs` block and + # accumulate the sum. For example, given the + # `LinearOperatorBlockLowerTriangular`: + # + # op = [[A, 0, 0], + # [B, C, 0], + # [D, E, F]] + # + # if `index = 1`, the following loop calculates: + # `y_1 = (C.matmul(x_1, adjoint=adjoint) + + # E.matmul(x_2, adjoint=adjoint)`, + # where `x_1` and `x_2` are splits of `x`. + for j in range(index + 1, len(self.operators)): + result += self.operators[j][index].matmul( + split_x[j], adjoint=adjoint, adjoint_arg=adjoint_arg) + result_list.append(result) + else: + for row in self.operators: + # Begin with the left-most operator in the row-partition and apply it + # to the first `rhs` block. + result = row[0].matmul( + split_x[0], adjoint=adjoint, adjoint_arg=adjoint_arg) + # Iterate left to right over the operators in the remainder of the row + # partition, apply the operator to the respective `rhs` block, and + # accumulate the sum. + for j, operator in enumerate(row[1:]): + result += operator.matmul( + split_x[j + 1], adjoint=adjoint, adjoint_arg=adjoint_arg) + result_list.append(result) + + if blockwise_arg: + return result_list + + result_list = linear_operator_util.broadcast_matrix_batch_dims( + result_list) + return array_ops.concat(result_list, axis=-2) + + def matvec(self, x, adjoint=False, name="matvec"): + """Transform [batch] vector `x` with left multiplication: `x --> Ax`. + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + + X = ... # shape [..., N], batch vector + + Y = operator.matvec(X) + Y.shape + ==> [..., M] + + Y[..., :] = sum_j A[..., :, j] X[..., j] + ``` + + Args: + x: `Tensor` with compatible shape and same `dtype` as `self`, or an + iterable of `Tensor`s. `Tensor`s are treated a [batch] vectors, meaning + for every set of leading dimensions, the last dimension defines a + vector. + See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. + name: A name for this `Op`. + + Returns: + A `Tensor` with shape `[..., M]` and same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + block_dimensions = (self._block_range_dimensions() if adjoint + else self._block_domain_dimensions()) + if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1): + for i, block in enumerate(x): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[-1]) + x[i] = block + x_mat = [block[..., array_ops.newaxis] for block in x] + y_mat = self.matmul(x_mat, adjoint=adjoint) + return [array_ops.squeeze(y, axis=-1) for y in y_mat] + + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + self._check_input_dtype(x) + op_dimension = (self.range_dimension if adjoint + else self.domain_dimension) + op_dimension.assert_is_compatible_with(x.shape[-1]) + x_mat = x[..., array_ops.newaxis] + y_mat = self.matmul(x_mat, adjoint=adjoint) + return array_ops.squeeze(y_mat, axis=-1) + + def _determinant(self): + if all(op.is_positive_definite for op in self._diagonal_operators): + return math_ops.exp(self._log_abs_determinant()) + result = self._diagonal_operators[0].determinant() + for op in self._diagonal_operators[1:]: + result *= op.determinant() + return result + + def _log_abs_determinant(self): + result = self._diagonal_operators[0].log_abs_determinant() + for op in self._diagonal_operators[1:]: + result += op.log_abs_determinant() + return result + + def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"): + """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`. + + The returned `Tensor` will be close to an exact solution if `A` is well + conditioned. Otherwise closeness will vary. See class docstring for details. + + Given the blockwise `n + 1`-by-`n + 1` linear operator: + + op = [[A_00 0 ... 0 ... 0], + [A_10 A_11 ... 0 ... 0], + ... + [A_k0 A_k1 ... A_kk ... 0], + ... + [A_n0 A_n1 ... A_nk ... A_nn]] + + we find `x = op.solve(y)` by observing that + + `y_k = A_k0.matmul(x_0) + A_k1.matmul(x_1) + ... + A_kk.matmul(x_k)` + + and therefore + + `x_k = A_kk.solve(y_k - + A_k0.matmul(x_0) - ... - A_k(k-1).matmul(x_(k-1)))` + + where `x_k` and `y_k` are the `k`th blocks obtained by decomposing `x` + and `y` along their appropriate axes. + + We first solve `x_0 = A_00.solve(y_0)`. Proceeding inductively, we solve + for `x_k`, `k = 1..n`, given `x_0..x_(k-1)`. + + The adjoint case is solved similarly, beginning with + `x_n = A_nn.solve(y_n, adjoint=True)` and proceeding backwards. + + Examples: + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + # Solve R > 0 linear systems for every member of the batch. + RHS = ... # shape [..., M, R] + + X = operator.solve(RHS) + # X[..., :, r] is the solution to the r'th linear system + # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r] + + operator.matmul(X) + ==> RHS + ``` + + Args: + rhs: `Tensor` with same `dtype` as this operator and compatible shape, + or a list of `Tensor`s. `Tensor`s are treated like a [batch] matrices + meaning for every set of leading dimensions, the last two dimensions + defines a matrix. + See class docstring for definition of compatibility. + adjoint: Python `bool`. If `True`, solve the system involving the adjoint + of this `LinearOperator`: `A^H X = rhs`. + adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H` + is the hermitian transpose (transposition and complex conjugation). + name: A name scope to use for ops added by this method. + + Returns: + `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`. + + Raises: + NotImplementedError: If `self.is_non_singular` or `is_square` is False. + """ + if self.is_non_singular is False: + raise NotImplementedError( + "Exact solve not implemented for an operator that is expected to " + "be singular.") + if self.is_square is False: + raise NotImplementedError( + "Exact solve not implemented for an operator that is expected to " + "not be square.") + if isinstance(rhs, linear_operator.LinearOperator): + left_operator = self.adjoint() if adjoint else self + right_operator = rhs.adjoint() if adjoint_arg else rhs + + if (right_operator.range_dimension is not None and + left_operator.domain_dimension is not None and + right_operator.range_dimension != left_operator.domain_dimension): + raise ValueError( + "Operators are incompatible. Expected `rhs` to have dimension" + " {} but got {}.".format( + left_operator.domain_dimension, right_operator.range_dimension)) + with self._name_scope(name): # pylint: disable=not-callable + return self._linop_solve(left_operator, right_operator) + + with self._name_scope(name): # pylint: disable=not-callable + block_dimensions = (self._block_domain_dimensions() if adjoint + else self._block_range_dimensions()) + arg_dim = -1 if adjoint_arg else -2 + blockwise_arg = linear_operator_util.arg_is_blockwise( + block_dimensions, rhs, arg_dim) + if blockwise_arg: + for i, block in enumerate(rhs): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim]) + rhs[i] = block + if adjoint_arg: + split_rhs = [linalg.adjoint(y) for y in rhs] + else: + split_rhs = rhs + + else: + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs" + ) + self._check_input_dtype(rhs) + op_dimension = (self.domain_dimension if adjoint + else self.range_dimension) + op_dimension.assert_is_compatible_with(rhs.shape[arg_dim]) + + rhs = linalg.adjoint(rhs) if adjoint_arg else rhs + split_rhs = linear_operator_util.split_arg_into_blocks( + self._block_domain_dimensions(), + self._block_domain_dimension_tensors, + rhs, axis=-2) + + solution_list = [] + if adjoint: + # For an adjoint blockwise lower-triangular linear operator, the system + # must be solved bottom to top. Iterate backwards over rows of the + # adjoint (i.e. columns of the non-adjoint operator). + for index in reversed(range(len(self.operators))): + y = split_rhs[index] + # Iterate top to bottom over the operators in the off-diagonal portion + # of the column-partition (i.e. row-partition of the adjoint), apply + # the operator to the respective block of the solution found in + # previous iterations, and subtract the result from the `rhs` block. + # For example,let `A`, `B`, and `D` be the linear operators in the top + # row-partition of the adjoint of + # `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])`, + # and `x_1` and `x_2` be blocks of the solution found in previous + # iterations of the outer loop. The following loop (when `index == 0`) + # expresses + # `Ax_0 + Bx_1 + Dx_2 = y_0` as `Ax_0 = y_0*`, where + # `y_0* = y_0 - Bx_1 - Dx_2`. + for j in reversed(range(index + 1, len(self.operators))): + y = y - self.operators[j][index].matmul( + solution_list[len(self.operators) - 1 - j], + adjoint=adjoint) + # Continuing the example above, solve `Ax_0 = y_0*` for `x_0`. + solution_list.append( + self._diagonal_operators[index].solve(y, adjoint=adjoint)) + solution_list.reverse() + else: + # Iterate top to bottom over the row-partitions. + for row, y in zip(self.operators, split_rhs): + # Iterate left to right over the operators in the off-diagonal portion + # of the row-partition, apply the operator to the block of the + # solution found in previous iterations, and subtract the result from + # the `rhs` block. For example, let `D`, `E`, and `F` be the linear + # operators in the bottom row-partition of + # `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])` and + # `x_0` and `x_1` be blocks of the solution found in previous + # iterations of the outer loop. The following loop + # (when `index == 2`), expresses + # `Dx_0 + Ex_1 + Fx_2 = y_2` as `Fx_2 = y_2*`, where + # `y_2* = y_2 - D_x0 - Ex_1`. + for i, operator in enumerate(row[:-1]): + y = y - operator.matmul(solution_list[i], adjoint=adjoint) + # Continuing the example above, solve `Fx_2 = y_2*` for `x_2`. + solution_list.append(row[-1].solve(y, adjoint=adjoint)) + + if blockwise_arg: + return solution_list + + solution_list = linear_operator_util.broadcast_matrix_batch_dims( + solution_list) + return array_ops.concat(solution_list, axis=-2) + + def solvevec(self, rhs, adjoint=False, name="solve"): + """Solve single equation with best effort: `A X = rhs`. + + The returned `Tensor` will be close to an exact solution if `A` is well + conditioned. Otherwise closeness will vary. See class docstring for details. + + Examples: + + ```python + # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] + operator = LinearOperator(...) + operator.shape = [..., M, N] + + # Solve one linear system for every member of the batch. + RHS = ... # shape [..., M] + + X = operator.solvevec(RHS) + # X is the solution to the linear system + # sum_j A[..., :, j] X[..., j] = RHS[..., :] + + operator.matvec(X) + ==> RHS + ``` + + Args: + rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s + (for blockwise operators). `Tensor`s are treated as [batch] vectors, + meaning for every set of leading dimensions, the last dimension defines + a vector. See class docstring for definition of compatibility regarding + batch dimensions. + adjoint: Python `bool`. If `True`, solve the system involving the adjoint + of this `LinearOperator`: `A^H X = rhs`. + name: A name scope to use for ops added by this method. + + Returns: + `Tensor` with shape `[...,N]` and same `dtype` as `rhs`. + + Raises: + NotImplementedError: If `self.is_non_singular` or `is_square` is False. + """ + with self._name_scope(name): # pylint: disable=not-callable + block_dimensions = (self._block_domain_dimensions() if adjoint + else self._block_range_dimensions()) + if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1): + for i, block in enumerate(rhs): + if not isinstance(block, linear_operator.LinearOperator): + block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block) + self._check_input_dtype(block) + block_dimensions[i].assert_is_compatible_with(block.shape[-1]) + rhs[i] = block + rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs] + solution_mat = self.solve(rhs_mat, adjoint=adjoint) + return [array_ops.squeeze(x, axis=-1) for x in solution_mat] + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs" + ) + self._check_input_dtype(rhs) + op_dimension = (self.domain_dimension if adjoint + else self.range_dimension) + op_dimension.assert_is_compatible_with(rhs.shape[-1]) + rhs_mat = array_ops.expand_dims(rhs, axis=-1) + solution_mat = self.solve(rhs_mat, adjoint=adjoint) + return array_ops.squeeze(solution_mat, axis=-1) + + def _diag_part(self): + diag_list = [] + for op in self._diagonal_operators: + # Extend the axis, since `broadcast_matrix_batch_dims` treats all but the + # final two dimensions as batch dimensions. + diag_list.append(op.diag_part()[..., array_ops.newaxis]) + diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list) + diagonal = array_ops.concat(diag_list, axis=-2) + return array_ops.squeeze(diagonal, axis=-1) + + def _trace(self): + result = self._diagonal_operators[0].trace() + for op in self._diagonal_operators[1:]: + result += op.trace() + return result + + def _to_dense(self): + num_cols = 0 + dense_rows = [] + flat_broadcast_operators = linear_operator_util.broadcast_matrix_batch_dims( + [op.to_dense() for row in self.operators for op in row]) # pylint: disable=g-complex-comprehension + broadcast_operators = [ + flat_broadcast_operators[i * (i + 1) // 2:(i + 1) * (i + 2) // 2] + for i in range(len(self.operators))] + for row_blocks in broadcast_operators: + batch_row_shape = array_ops.shape(row_blocks[0])[:-1] + num_cols += array_ops.shape(row_blocks[-1])[-1] + zeros_to_pad_after_shape = array_ops.concat( + [batch_row_shape, + [self.domain_dimension_tensor() - num_cols]], axis=-1) + zeros_to_pad_after = array_ops.zeros( + shape=zeros_to_pad_after_shape, dtype=self.dtype) + + row_blocks.append(zeros_to_pad_after) + dense_rows.append(array_ops.concat(row_blocks, axis=-1)) + + mat = array_ops.concat(dense_rows, axis=-2) + mat.set_shape(self.shape) + return mat + + def _assert_non_singular(self): + return control_flow_ops.group([ + op.assert_non_singular() for op in self._diagonal_operators]) + + def _eigvals(self): + eig_list = [] + for op in self._diagonal_operators: + # Extend the axis for broadcasting. + eig_list.append(op.eigvals()[..., array_ops.newaxis]) + eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list) + eigs = array_ops.concat(eig_list, axis=-2) + return array_ops.squeeze(eigs, axis=-1) + + @property + def _composite_tensor_fields(self): + return ("operators",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + # None of the operators contribute to the matrix shape. + return {"operators": nest.map_structure(lambda _: 0, self.operators)} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_circulant.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_circulant.py new file mode 100644 index 0000000000000000000000000000000000000000..969ac6507af9ac04cc5fdaca02d27c5628adadd7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_circulant.py @@ -0,0 +1,1551 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` coming from a [[nested] block] circulant matrix.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.distributions import util as distribution_util +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.linalg import property_hint_util +from tensorflow.python.ops.signal import fft_ops +from tensorflow.python.util.tf_export import tf_export + +__all__ = [ + "LinearOperatorCirculant", + "LinearOperatorCirculant2D", + "LinearOperatorCirculant3D", +] + +# Different FFT Ops will be used for different block depths. +_FFT_OP = {1: fft_ops.fft, 2: fft_ops.fft2d, 3: fft_ops.fft3d} +_IFFT_OP = {1: fft_ops.ifft, 2: fft_ops.ifft2d, 3: fft_ops.ifft3d} + + +def exponential_power_convolution_kernel( + grid_shape, + length_scale, + power=None, + divisor=None, + zero_inflation=None, +): + """Make an exponentiated convolution kernel. + + In signal processing, a [kernel] + (https://en.wikipedia.org/wiki/Kernel_(image_processing)) `h` can be convolved + with a signal `x` to filter its spectral content. + + This function makes a `d-dimensional` convolution kernel `h` of shape + `grid_shape = [N0, N1, ...]`. For `n` a multi-index with `n[i] < Ni / 2`, + + ```h[n] = exp{sum(|n / (length_scale * grid_shape)|**power) / divisor}.``` + + For other `n`, `h` is extended to be circularly symmetric. That is + + ```h[n0 % N0, ...] = h[(-n0) % N0, ...]``` + + Since `h` is circularly symmetric and real valued, `H = FFTd[h]` is the + spectrum of a symmetric (real) circulant operator `A`. + + #### Example uses + + ``` + # Matern one-half kernel, d=1. + # Will be positive definite without zero_inflation. + h = exponential_power_convolution_kernel( + grid_shape=[10], length_scale=[0.1], power=1) + A = LinearOperatorCirculant( + tf.signal.fft(tf.cast(h, tf.complex64)), + is_self_adjoint=True, is_positive_definite=True) + + # Gaussian RBF kernel, d=3. + # Needs zero_inflation since `length_scale` is long enough to cause aliasing. + h = exponential_power_convolution_kernel( + grid_shape=[10, 10, 10], length_scale=[0.1, 0.2, 0.2], power=2, + zero_inflation=0.15) + A = LinearOperatorCirculant3D( + tf.signal.fft3d(tf.cast(h, tf.complex64)), + is_self_adjoint=True, is_positive_definite=True) + ``` + + Args: + grid_shape: Length `d` (`d` in {1, 2, 3}) list-like of Python integers. The + shape of the grid on which the convolution kernel is defined. + length_scale: Length `d` `float` `Tensor`. The scale at which the kernel + decays in each direction, as a fraction of `grid_shape`. + power: Scalar `Tensor` of same `dtype` as `length_scale`, default `2`. + Higher (lower) `power` results in nearby points being more (less) + correlated, and far away points being less (more) correlated. + divisor: Scalar `Tensor` of same `dtype` as `length_scale`. The slope of + decay of `log(kernel)` in terms of fractional grid points, along each + axis, at `length_scale`, is `power/divisor`. By default, `divisor` is set + to `power`. This means, by default, `power=2` results in an exponentiated + quadratic (Gaussian) kernel, and `power=1` is a Matern one-half. + zero_inflation: Scalar `Tensor` of same `dtype` as `length_scale`, in + `[0, 1]`. Let `delta` be the Kronecker delta. That is, + `delta[0, ..., 0] = 1` and all other entries are `0`. Then + `zero_inflation` modifies the return value via + `h --> (1 - zero_inflation) * h + zero_inflation * delta`. This may be + needed to ensure a positive definite kernel, especially if `length_scale` + is large enough for aliasing and `power > 1`. + + Returns: + `Tensor` of shape `grid_shape` with same `dtype` as `length_scale`. + """ + nd = len(grid_shape) + + length_scale = tensor_conversion.convert_to_tensor_v2_with_dispatch( + length_scale, name="length_scale" + ) + dtype = length_scale.dtype + + power = 2. if power is None else power + power = tensor_conversion.convert_to_tensor_v2_with_dispatch( + power, name="power", dtype=dtype + ) + divisor = power if divisor is None else divisor + divisor = tensor_conversion.convert_to_tensor_v2_with_dispatch( + divisor, name="divisor", dtype=dtype + ) + + # With K = grid_shape[i], we implicitly assume the grid vertices along the + # ith dimension are at: + # 0 = 0 / (K - 1), 1 / (K - 1), 2 / (K - 1), ..., (K - 1) / (K - 1) = 1. + zero = math_ops.cast(0., dtype) + one = math_ops.cast(1., dtype) + ts = [math_ops.linspace(zero, one, num=n) for n in grid_shape] + + log_vals = [] + for i, x in enumerate(array_ops.meshgrid(*ts, indexing="ij")): + # midpoint[i] is the vertex just to the left of 1 / 2. + # ifftshift will shift this vertex to position 0. + midpoint = ts[i][math_ops.cast( + math_ops.floor(one / 2. * grid_shape[i]), dtypes.int32)] + log_vals.append(-(math_ops.abs( + (x - midpoint) / length_scale[i]))**power / divisor) + kernel = math_ops.exp( + fft_ops.ifftshift(sum(log_vals), axes=[-i for i in range(1, nd + 1)])) + + if zero_inflation: + # delta.shape = grid_shape, delta[0, 0, 0] = 1., all other entries are 0. + zero_inflation = tensor_conversion.convert_to_tensor_v2_with_dispatch( + zero_inflation, name="zero_inflation", dtype=dtype + ) + delta = array_ops.pad( + array_ops.reshape(one, [1] * nd), [[0, dim - 1] for dim in grid_shape]) + kernel = (1. - zero_inflation) * kernel + zero_inflation * delta + + return kernel + + +# TODO(langmore) Add transformations that create common spectrums, e.g. +# starting with the convolution kernel +# start with half a spectrum, and create a Hermitian one. +# common filters. +# TODO(langmore) Support rectangular Toeplitz matrices. +class _BaseLinearOperatorCirculant(linear_operator.LinearOperator): + """Base class for circulant operators. Not user facing. + + `LinearOperator` acting like a [batch] [[nested] block] circulant matrix. + """ + + def __init__(self, + spectrum: tensor.Tensor, + block_depth: int, + input_output_dtype=dtypes.complex64, + is_non_singular: bool = None, + is_self_adjoint: bool = None, + is_positive_definite: bool = None, + is_square: bool = True, + parameters=None, + name="LinearOperatorCirculant"): + r"""Initialize an `_BaseLinearOperatorCirculant`. + + Args: + spectrum: Shape `[B1,...,Bb] + N` `Tensor`, where `rank(N) in {1, 2, 3}`. + Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, + `complex128`. Type can be different than `input_output_dtype` + block_depth: Python integer, either 1, 2, or 3. Will be 1 for circulant, + 2 for block circulant, and 3 for nested block circulant. + input_output_dtype: `dtype` for input/output. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `spectrum` is real, this will always be true. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix\ + #Extension_for_non_symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + parameters: Python `dict` of parameters used to instantiate this + `LinearOperator`. + name: A name to prepend to all ops created by this class. + + Raises: + ValueError: If `block_depth` is not an allowed value. + TypeError: If `spectrum` is not an allowed type. + """ + + allowed_block_depths = [1, 2, 3] + + self._name = name + + if block_depth not in allowed_block_depths: + raise ValueError( + f"Argument `block_depth` must be one of {allowed_block_depths}. " + f"Received: {block_depth}.") + self._block_depth = block_depth + + with ops.name_scope(name, values=[spectrum]): + self._spectrum = self._check_spectrum_and_return_tensor(spectrum) + + # Check and auto-set hints. + if not self.spectrum.dtype.is_complex: + if is_self_adjoint is False: + raise ValueError( + f"A real spectrum always corresponds to a self-adjoint operator. " + f"Expected argument `is_self_adjoint` to be True when " + f"`spectrum.dtype.is_complex` = True. " + f"Received: {is_self_adjoint}.") + is_self_adjoint = True + + if is_square is False: + raise ValueError( + f"A [[nested] block] circulant operator is always square. " + f"Expected argument `is_square` to be True. Received: {is_square}.") + is_square = True + + super(_BaseLinearOperatorCirculant, self).__init__( + dtype=dtypes.as_dtype(input_output_dtype), + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _check_spectrum_and_return_tensor(self, spectrum): + """Static check of spectrum. Then return `Tensor` version.""" + spectrum = linear_operator_util.convert_nonref_to_tensor(spectrum, + name="spectrum") + + if spectrum.shape.ndims is not None: + if spectrum.shape.ndims < self.block_depth: + raise ValueError( + f"Argument `spectrum` must have at least {self.block_depth} " + f"dimensions. Received: {spectrum}.") + return spectrum + + @property + def block_depth(self): + """Depth of recursively defined circulant blocks defining this `Operator`. + + With `A` the dense representation of this `Operator`, + + `block_depth = 1` means `A` is symmetric circulant. For example, + + ``` + A = |w z y x| + |x w z y| + |y x w z| + |z y x w| + ``` + + `block_depth = 2` means `A` is block symmetric circulant with symmetric + circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant, + + ``` + A = |W Z Y X| + |X W Z Y| + |Y X W Z| + |Z Y X W| + ``` + + `block_depth = 3` means `A` is block symmetric circulant with block + symmetric circulant blocks. + + Returns: + Python `integer`. + """ + return self._block_depth + + def block_shape_tensor(self): + """Shape of the block dimensions of `self.spectrum`.""" + # If spectrum.shape = [s0, s1, s2], and block_depth = 2, + # block_shape = [s1, s2] + return self._block_shape_tensor() + + def _block_shape_tensor(self, spectrum_shape=None): + if self.block_shape.is_fully_defined(): + return linear_operator_util.shape_tensor( + self.block_shape.as_list(), name="block_shape") + spectrum_shape = ( + array_ops.shape(self.spectrum) + if spectrum_shape is None else spectrum_shape) + return spectrum_shape[-self.block_depth:] + + def _linop_adjoint(self) -> "_BaseLinearOperatorCirculant": + spectrum = self.spectrum + if spectrum.dtype.is_complex: + spectrum = math_ops.conj(spectrum) + + # Conjugating the spectrum is sufficient to get the adjoint. + return _BaseLinearOperatorCirculant( + spectrum=spectrum, + block_depth=self.block_depth, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_inverse(self) -> "_BaseLinearOperatorCirculant": + return _BaseLinearOperatorCirculant( + spectrum=1. / self.spectrum, + block_depth=self.block_depth, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True, + input_output_dtype=self.dtype) + + def _linop_matmul( + self, + left_operator: "_BaseLinearOperatorCirculant", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if (not isinstance(right_operator, _BaseLinearOperatorCirculant) + or not isinstance(left_operator, type(right_operator))): + return super()._linop_matmul(left_operator, right_operator) + + return _BaseLinearOperatorCirculant( + spectrum=left_operator.spectrum * right_operator.spectrum, + block_depth=left_operator.block_depth, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)), + is_square=True) + + def _linop_solve( + self, + left_operator: "_BaseLinearOperatorCirculant", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if (not isinstance(right_operator, _BaseLinearOperatorCirculant) + or not isinstance(left_operator, type(right_operator))): + return super()._linop_solve(left_operator, right_operator) + + return _BaseLinearOperatorCirculant( + spectrum=right_operator.spectrum / left_operator.spectrum, + block_depth=left_operator.block_depth, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)), + is_square=True) + + @property + def block_shape(self): + return self.spectrum.shape[-self.block_depth:] + + @property + def spectrum(self) -> tensor.Tensor: + return self._spectrum + + def _vectorize_then_blockify(self, matrix): + """Shape batch matrix to batch vector, then blockify trailing dimensions.""" + # Suppose + # matrix.shape = [m0, m1, m2, m3], + # and matrix is a matrix because the final two dimensions are matrix dims. + # self.block_depth = 2, + # self.block_shape = [b0, b1] (note b0 * b1 = m2). + # We will reshape matrix to + # [m3, m0, m1, b0, b1]. + + # Vectorize: Reshape to batch vector. + # [m0, m1, m2, m3] --> [m3, m0, m1, m2] + # This is called "vectorize" because we have taken the final two matrix dims + # and turned this into a size m3 batch of vectors. + vec = distribution_util.rotate_transpose(matrix, shift=1) + + # Blockify: Blockfy trailing dimensions. + # [m3, m0, m1, m2] --> [m3, m0, m1, b0, b1] + if (vec.shape.is_fully_defined() and + self.block_shape.is_fully_defined()): + # vec_leading_shape = [m3, m0, m1], + # the parts of vec that will not be blockified. + vec_leading_shape = vec.shape[:-1] + final_shape = vec_leading_shape.concatenate(self.block_shape) + else: + vec_leading_shape = array_ops.shape(vec)[:-1] + final_shape = array_ops.concat( + (vec_leading_shape, self.block_shape_tensor()), 0) + return array_ops.reshape(vec, final_shape) + + def _unblockify(self, x): + """Flatten the trailing block dimensions.""" + # Suppose + # x.shape = [v0, v1, v2, v3], + # self.block_depth = 2. + # Then + # leading shape = [v0, v1] + # block shape = [v2, v3]. + # We will reshape x to + # [v0, v1, v2*v3]. + if x.shape.is_fully_defined(): + # x_shape = [v0, v1, v2, v3] + x_shape = x.shape.as_list() + # x_leading_shape = [v0, v1] + x_leading_shape = x_shape[:-self.block_depth] + # x_block_shape = [v2, v3] + x_block_shape = x_shape[-self.block_depth:] + # flat_shape = [v0, v1, v2*v3] + flat_shape = x_leading_shape + [np.prod(x_block_shape)] + else: + x_shape = array_ops.shape(x) + x_leading_shape = x_shape[:-self.block_depth] + x_block_shape = x_shape[-self.block_depth:] + flat_shape = array_ops.concat( + (x_leading_shape, [math_ops.reduce_prod(x_block_shape)]), 0) + return array_ops.reshape(x, flat_shape) + + def _unblockify_then_matricize(self, vec): + """Flatten the block dimensions then reshape to a batch matrix.""" + # Suppose + # vec.shape = [v0, v1, v2, v3], + # self.block_depth = 2. + # Then + # leading shape = [v0, v1] + # block shape = [v2, v3]. + # We will reshape vec to + # [v1, v2*v3, v0]. + + # Un-blockify: Flatten block dimensions. Reshape + # [v0, v1, v2, v3] --> [v0, v1, v2*v3]. + vec_flat = self._unblockify(vec) + + # Matricize: Reshape to batch matrix. + # [v0, v1, v2*v3] --> [v1, v2*v3, v0], + # representing a shape [v1] batch of [v2*v3, v0] matrices. + matrix = distribution_util.rotate_transpose(vec_flat, shift=-1) + return matrix + + def _fft(self, x): + """FFT along the last self.block_depth dimensions of x. + + Args: + x: `Tensor` with floating or complex `dtype`. + Should be in the form returned by self._vectorize_then_blockify. + + Returns: + `Tensor` with `dtype` `complex64`. + """ + x_complex = _to_complex(x) + return _FFT_OP[self.block_depth](x_complex) + + def _ifft(self, x): + """IFFT along the last self.block_depth dimensions of x. + + Args: + x: `Tensor` with floating or complex dtype. Should be in the form + returned by self._vectorize_then_blockify. + + Returns: + `Tensor` with `dtype` `complex64`. + """ + x_complex = _to_complex(x) + return _IFFT_OP[self.block_depth](x_complex) + + def convolution_kernel(self, name="convolution_kernel"): + """Convolution kernel corresponding to `self.spectrum`. + + The `D` dimensional DFT of this kernel is the frequency domain spectrum of + this operator. + + Args: + name: A name to give this `Op`. + + Returns: + `Tensor` with `dtype` `self.dtype`. + """ + with self._name_scope(name): # pylint: disable=not-callable + h = self._ifft(_to_complex(self.spectrum)) + return math_ops.cast(h, self.dtype) + + def _shape(self): + s_shape = self._spectrum.shape + # Suppose spectrum.shape = [a, b, c, d] + # block_depth = 2 + # Then: + # batch_shape = [a, b] + # N = c*d + # and we want to return + # [a, b, c*d, c*d] + batch_shape = s_shape[:-self.block_depth] + # trailing_dims = [c, d] + trailing_dims = s_shape[-self.block_depth:] + if trailing_dims.is_fully_defined(): + n = np.prod(trailing_dims.as_list()) + else: + n = None + n_x_n = tensor_shape.TensorShape([n, n]) + return batch_shape.concatenate(n_x_n) + + def _shape_tensor(self, spectrum=None): + spectrum = self.spectrum if spectrum is None else spectrum + # See self.shape for explanation of steps + s_shape = array_ops.shape(spectrum) + batch_shape = s_shape[:-self.block_depth] + trailing_dims = s_shape[-self.block_depth:] + n = math_ops.reduce_prod(trailing_dims) + n_x_n = [n, n] + return array_ops.concat((batch_shape, n_x_n), 0) + + def assert_hermitian_spectrum(self, name="assert_hermitian_spectrum"): + """Returns an `Op` that asserts this operator has Hermitian spectrum. + + This operator corresponds to a real-valued matrix if and only if its + spectrum is Hermitian. + + Args: + name: A name to give this `Op`. + + Returns: + An `Op` that asserts this operator has Hermitian spectrum. + """ + eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps + with self._name_scope(name): # pylint: disable=not-callable + # Assume linear accumulation of error. + max_err = eps * self.domain_dimension_tensor() + imag_convolution_kernel = math_ops.imag(self.convolution_kernel()) + return check_ops.assert_less( + math_ops.abs(imag_convolution_kernel), + max_err, + message="Spectrum was not Hermitian") + + def _assert_non_singular(self): + return linear_operator_util.assert_no_entries_with_modulus_zero( + self.spectrum, + message="Singular operator: Spectrum contained zero values.") + + def _assert_positive_definite(self): + # This operator has the action Ax = F^H D F x, + # where D is the diagonal matrix with self.spectrum on the diag. Therefore, + # = , + # Since F is bijective, the condition for positive definite is the same as + # for a diagonal matrix, i.e. real part of spectrum is positive. + message = ( + "Not positive definite: Real part of spectrum was not all positive.") + return check_ops.assert_positive( + math_ops.real(self.spectrum), message=message) + + def _assert_self_adjoint(self): + # Recall correspondence between symmetry and real transforms. See docstring + return linear_operator_util.assert_zero_imag_part( + self.spectrum, + message=( + "Not self-adjoint: The spectrum contained non-zero imaginary part." + )) + + def _broadcast_batch_dims(self, x, spectrum): + """Broadcast batch dims of batch matrix `x` and spectrum.""" + spectrum = tensor_conversion.convert_to_tensor_v2_with_dispatch( + spectrum, name="spectrum" + ) + # spectrum.shape = batch_shape + block_shape + # First make spectrum a batch matrix with + # spectrum.shape = batch_shape + [prod(block_shape), 1] + batch_shape = self._batch_shape_tensor( + shape=self._shape_tensor(spectrum=spectrum)) + spec_mat = array_ops.reshape( + spectrum, array_ops.concat((batch_shape, [-1, 1]), axis=0)) + # Second, broadcast, possibly requiring an addition of array of zeros. + x, spec_mat = linear_operator_util.broadcast_matrix_batch_dims((x, + spec_mat)) + # Third, put the block shape back into spectrum. + x_batch_shape = array_ops.shape(x)[:-2] + spectrum_shape = array_ops.shape(spectrum) + spectrum = array_ops.reshape( + spec_mat, + array_ops.concat( + (x_batch_shape, + self._block_shape_tensor(spectrum_shape=spectrum_shape)), + axis=0)) + + return x, spectrum + + def _cond(self): + # Regardless of whether the operator is real, it is always diagonalizable by + # the Fourier basis F. I.e. A = F S F^H, with S a diagonal matrix + # containing the spectrum. We then have: + # A A^H = F SS^H F^H = F K F^H, + # where K = diag with squared absolute values of the spectrum. + # So in all cases, + abs_singular_values = math_ops.abs(self._unblockify(self.spectrum)) + return (math_ops.reduce_max(abs_singular_values, axis=-1) / + math_ops.reduce_min(abs_singular_values, axis=-1)) + + def _eigvals(self): + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + self._unblockify(self.spectrum) + ) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + x = linalg.adjoint(x) if adjoint_arg else x + # With F the matrix of a DFT, and F^{-1}, F^H the inverse and Hermitian + # transpose, one can show that F^{-1} = F^{H} is the IDFT matrix. Therefore + # matmul(x) = F^{-1} diag(spectrum) F x, + # = F^{H} diag(spectrum) F x, + # so that + # matmul(x, adjoint=True) = F^{H} diag(conj(spectrum)) F x. + spectrum = _to_complex(self.spectrum) + if adjoint: + spectrum = math_ops.conj(spectrum) + + x = math_ops.cast(x, spectrum.dtype) + + x, spectrum = self._broadcast_batch_dims(x, spectrum) + + x_vb = self._vectorize_then_blockify(x) + fft_x_vb = self._fft(x_vb) + block_vector_result = self._ifft(spectrum * fft_x_vb) + y = self._unblockify_then_matricize(block_vector_result) + + return math_ops.cast(y, self.dtype) + + def _determinant(self): + axis = [-(i + 1) for i in range(self.block_depth)] + det = math_ops.reduce_prod(self.spectrum, axis=axis) + return math_ops.cast(det, self.dtype) + + def _log_abs_determinant(self): + axis = [-(i + 1) for i in range(self.block_depth)] + lad = math_ops.reduce_sum( + math_ops.log(math_ops.abs(self.spectrum)), axis=axis) + return math_ops.cast(lad, self.dtype) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + rhs = linalg.adjoint(rhs) if adjoint_arg else rhs + spectrum = _to_complex(self.spectrum) + if adjoint: + spectrum = math_ops.conj(spectrum) + + rhs, spectrum = self._broadcast_batch_dims(rhs, spectrum) + + rhs_vb = self._vectorize_then_blockify(rhs) + fft_rhs_vb = self._fft(rhs_vb) + solution_vb = self._ifft(fft_rhs_vb / spectrum) + x = self._unblockify_then_matricize(solution_vb) + return math_ops.cast(x, self.dtype) + + def _diag_part(self): + # Get ones in shape of diag, which is [B1,...,Bb, N] + # Also get the size of the diag, "N". + if self.shape.is_fully_defined(): + diag_shape = self.shape[:-1] + diag_size = self.domain_dimension.value + else: + diag_shape = self.shape_tensor()[:-1] + diag_size = self.domain_dimension_tensor() + ones_diag = array_ops.ones(diag_shape, dtype=self.dtype) + + # As proved in comments in self._trace, the value on the diag is constant, + # repeated N times. This value is the trace divided by N. + + # The handling of self.shape = (0, 0) is tricky, and is the reason we choose + # to compute trace and use that to compute diag_part, rather than computing + # the value on the diagonal ("diag_value") directly. Both result in a 0/0, + # but in different places, and the current method gives the right result in + # the end. + + # Here, if self.shape = (0, 0), then self.trace() = 0., and then + # diag_value = 0. / 0. = NaN. + diag_value = self.trace() / math_ops.cast(diag_size, self.dtype) + + # If self.shape = (0, 0), then ones_diag = [] (empty tensor), and then + # the following line is NaN * [] = [], as needed. + return diag_value[..., array_ops.newaxis] * ones_diag + + def _trace(self): + # The diagonal of the [[nested] block] circulant operator is the mean of + # the spectrum. + # Proof: For the [0,...,0] element, this follows from the IDFT formula. + # Then the result follows since all diagonal elements are the same. + + # Therefore, the trace is the sum of the spectrum. + + # Get shape of diag along with the axis over which to reduce the spectrum. + # We will reduce the spectrum over all block indices. + if self.spectrum.shape.is_fully_defined(): + spec_rank = self.spectrum.shape.ndims + axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32) + else: + spec_rank = array_ops.rank(self.spectrum) + axis = math_ops.range(spec_rank - self.block_depth, spec_rank) + + # Real diag part "re_d". + # Suppose spectrum.shape = [B1,...,Bb, N1, N2] + # self.shape = [B1,...,Bb, N, N], with N1 * N2 = N. + # re_d_value.shape = [B1,...,Bb] + re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis) + + if not self.dtype.is_complex: + return math_ops.cast(re_d_value, self.dtype) + + # Imaginary part, "im_d". + if self.is_self_adjoint: + im_d_value = array_ops.zeros_like(re_d_value) + else: + im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis) + + return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype) + + @property + def _composite_tensor_fields(self): + return ("spectrum", "input_output_dtype") + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"spectrum": self.block_depth} + + +@tf_export("linalg.LinearOperatorCirculant") +@linear_operator.make_composite_tensor +class LinearOperatorCirculant(_BaseLinearOperatorCirculant): + """`LinearOperator` acting like a circulant matrix. + + This operator acts like a circulant matrix `A` with + shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + #### Description in terms of circulant matrices + + Circulant means the entries of `A` are generated by a single vector, the + convolution kernel `h`: `A_{mn} := h_{m-n mod N}`. With `h = [w, x, y, z]`, + + ``` + A = |w z y x| + |x w z y| + |y x w z| + |z y x w| + ``` + + This means that the result of matrix multiplication `v = Au` has `Lth` column + given circular convolution between `h` with the `Lth` column of `u`. + + #### Description in terms of the frequency spectrum + + There is an equivalent description in terms of the [batch] spectrum `H` and + Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch + dimensions. Define the discrete Fourier transform (DFT) and its inverse by + + ``` + DFT[ h[n] ] = H[k] := sum_{n = 0}^{N - 1} h_n e^{-i 2pi k n / N} + IDFT[ H[k] ] = h[n] = N^{-1} sum_{k = 0}^{N - 1} H_k e^{i 2pi k n / N} + ``` + + From these definitions, we see that + + ``` + H[0] = sum_{n = 0}^{N - 1} h_n + H[1] = "the first positive frequency" + H[N - 1] = "the first negative frequency" + ``` + + Loosely speaking, with `*` element-wise multiplication, matrix multiplication + is equal to the action of a Fourier multiplier: `A u = IDFT[ H * DFT[u] ]`. + Precisely speaking, given `[N, R]` matrix `u`, let `DFT[u]` be the `[N, R]` + matrix with `rth` column equal to the DFT of the `rth` column of `u`. + Define the `IDFT` similarly. + Matrix multiplication may be expressed columnwise: + + ```(A u)_r = IDFT[ H * (DFT[u])_r ]``` + + #### Operator properties deduced from the spectrum. + + Letting `U` be the `kth` Euclidean basis vector, and `U = IDFT[u]`. + The above formulas show that`A U = H_k * U`. We conclude that the elements + of `H` are the eigenvalues of this operator. Therefore + + * This operator is positive definite if and only if `Real{H} > 0`. + + A general property of Fourier transforms is the correspondence between + Hermitian functions and real valued transforms. + + Suppose `H.shape = [B1,...,Bb, N]`. We say that `H` is a Hermitian spectrum + if, with `%` meaning modulus division, + + ```H[..., n % N] = ComplexConjugate[ H[..., (-n) % N] ]``` + + * This operator corresponds to a real matrix if and only if `H` is Hermitian. + * This operator is self-adjoint if and only if `H` is real. + + See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer. + + #### Example of a self-adjoint positive definite operator + + ```python + # spectrum is real ==> operator is self-adjoint + # spectrum is positive ==> operator is positive definite + spectrum = [6., 4, 2] + + operator = LinearOperatorCirculant(spectrum) + + # IFFT[spectrum] + operator.convolution_kernel() + ==> [4 + 0j, 1 + 0.58j, 1 - 0.58j] + + operator.to_dense() + ==> [[4 + 0.0j, 1 - 0.6j, 1 + 0.6j], + [1 + 0.6j, 4 + 0.0j, 1 - 0.6j], + [1 - 0.6j, 1 + 0.6j, 4 + 0.0j]] + ``` + + #### Example of defining in terms of a real convolution kernel + + ```python + # convolution_kernel is real ==> spectrum is Hermitian. + convolution_kernel = [1., 2., 1.]] + spectrum = tf.signal.fft(tf.cast(convolution_kernel, tf.complex64)) + + # spectrum is Hermitian ==> operator is real. + # spectrum is shape [3] ==> operator is shape [3, 3] + # We force the input/output type to be real, which allows this to operate + # like a real matrix. + operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32) + + operator.to_dense() + ==> [[ 1, 1, 2], + [ 2, 1, 1], + [ 1, 2, 1]] + ``` + + #### Example of Hermitian spectrum + + ```python + # spectrum is shape [3] ==> operator is shape [3, 3] + # spectrum is Hermitian ==> operator is real. + spectrum = [1, 1j, -1j] + + operator = LinearOperatorCirculant(spectrum) + + operator.to_dense() + ==> [[ 0.33 + 0j, 0.91 + 0j, -0.24 + 0j], + [-0.24 + 0j, 0.33 + 0j, 0.91 + 0j], + [ 0.91 + 0j, -0.24 + 0j, 0.33 + 0j] + ``` + + #### Example of forcing real `dtype` when spectrum is Hermitian + + ```python + # spectrum is shape [4] ==> operator is shape [4, 4] + # spectrum is real ==> operator is self-adjoint + # spectrum is Hermitian ==> operator is real + # spectrum has positive real part ==> operator is positive-definite. + spectrum = [6., 4, 2, 4] + + # Force the input dtype to be float32. + # Cast the output to float32. This is fine because the operator will be + # real due to Hermitian spectrum. + operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32) + + operator.shape + ==> [4, 4] + + operator.to_dense() + ==> [[4, 1, 0, 1], + [1, 4, 1, 0], + [0, 1, 4, 1], + [1, 0, 1, 4]] + + # convolution_kernel = tf.signal.ifft(spectrum) + operator.convolution_kernel() + ==> [4, 1, 0, 1] + ``` + + #### Performance + + Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, + and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` is `O(R*N*Log[N])` + * `operator.solve(x)` is `O(R*N*Log[N])` + * `operator.determinant()` involves a size `N` `reduce_prod`. + + If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + + References: + Toeplitz and Circulant Matrices - A Review: + [Gray, 2006](https://www.nowpublishers.com/article/Details/CIT-006) + ([pdf](https://ee.stanford.edu/~gray/toeplitz.pdf)) + """ + + def __init__(self, + spectrum: tensor.Tensor, + input_output_dtype=dtypes.complex64, + is_non_singular: bool = None, + is_self_adjoint: bool = None, + is_positive_definite: bool = None, + is_square: bool = True, + name="LinearOperatorCirculant"): + r"""Initialize an `LinearOperatorCirculant`. + + This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]` + by providing `spectrum`, a `[B1,...,Bb, N]` `Tensor`. + + If `input_output_dtype = DTYPE`: + + * Arguments to methods such as `matmul` or `solve` must be `DTYPE`. + * Values returned by all methods, such as `matmul` or `determinant` will be + cast to `DTYPE`. + + Note that if the spectrum is not Hermitian, then this operator corresponds + to a complex matrix with non-zero imaginary part. In this case, setting + `input_output_dtype` to a real type will forcibly cast the output to be + real, resulting in incorrect results! + + If on the other hand the spectrum is Hermitian, then this operator + corresponds to a real-valued matrix, and setting `input_output_dtype` to + a real type is fine. + + Args: + spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`, + `float32`, `float64`, `complex64`, `complex128`. Type can be different + than `input_output_dtype` + input_output_dtype: `dtype` for input/output. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `spectrum` is real, this will always be true. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix\ + #Extension_for_non_symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name to prepend to all ops created by this class. + """ + parameters = dict( + spectrum=spectrum, + input_output_dtype=input_output_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + super(LinearOperatorCirculant, self).__init__( + spectrum, + block_depth=1, + input_output_dtype=input_output_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _linop_adjoint(self) -> "LinearOperatorCirculant": + spectrum = self.spectrum + if spectrum.dtype.is_complex: + spectrum = math_ops.conj(spectrum) + + # Conjugating the spectrum is sufficient to get the adjoint. + return LinearOperatorCirculant( + spectrum=spectrum, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorCirculant": + return LinearOperatorCirculant( + spectrum=1. / self.spectrum, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True, + input_output_dtype=self.dtype) + + def _linop_matmul( + self, + left_operator: "LinearOperatorCirculant", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if not isinstance( + right_operator, LinearOperatorCirculant + ) or not isinstance(left_operator, type(right_operator)): + return super()._linop_matmul(left_operator, right_operator) + + return LinearOperatorCirculant( + spectrum=left_operator.spectrum * right_operator.spectrum, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator + ), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator + ), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator + ) + ), + is_square=True, + ) + + def _linop_solve( + self, + left_operator: "LinearOperatorCirculant", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if not isinstance(right_operator, LinearOperatorCirculant): + return super()._linop_solve(left_operator, right_operator) + + return LinearOperatorCirculant( + spectrum=right_operator.spectrum / left_operator.spectrum, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)), + is_square=True) + + +@tf_export("linalg.LinearOperatorCirculant2D") +@linear_operator.make_composite_tensor +class LinearOperatorCirculant2D(_BaseLinearOperatorCirculant): + """`LinearOperator` acting like a block circulant matrix. + + This operator acts like a block circulant matrix `A` with + shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + #### Description in terms of block circulant matrices + + If `A` is block circulant, with block sizes `N0, N1` (`N0 * N1 = N`): + `A` has a block circulant structure, composed of `N0 x N0` blocks, with each + block an `N1 x N1` circulant matrix. + + For example, with `W`, `X`, `Y`, `Z` each circulant, + + ``` + A = |W Z Y X| + |X W Z Y| + |Y X W Z| + |Z Y X W| + ``` + + Note that `A` itself will not in general be circulant. + + #### Description in terms of the frequency spectrum + + There is an equivalent description in terms of the [batch] spectrum `H` and + Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch + dimensions. + + If `H.shape = [N0, N1]`, (`N0 * N1 = N`): + Loosely speaking, matrix multiplication is equal to the action of a + Fourier multiplier: `A u = IDFT2[ H DFT2[u] ]`. + Precisely speaking, given `[N, R]` matrix `u`, let `DFT2[u]` be the + `[N0, N1, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, R]` and taking + a two dimensional DFT across the first two dimensions. Let `IDFT2` be the + inverse of `DFT2`. Matrix multiplication may be expressed columnwise: + + ```(A u)_r = IDFT2[ H * (DFT2[u])_r ]``` + + #### Operator properties deduced from the spectrum. + + * This operator is positive definite if and only if `Real{H} > 0`. + + A general property of Fourier transforms is the correspondence between + Hermitian functions and real valued transforms. + + Suppose `H.shape = [B1,...,Bb, N0, N1]`, we say that `H` is a Hermitian + spectrum if, with `%` indicating modulus division, + + ``` + H[..., n0 % N0, n1 % N1] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1 ]. + ``` + + * This operator corresponds to a real matrix if and only if `H` is Hermitian. + * This operator is self-adjoint if and only if `H` is real. + + See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer. + + ### Example of a self-adjoint positive definite operator + + ```python + # spectrum is real ==> operator is self-adjoint + # spectrum is positive ==> operator is positive definite + spectrum = [[1., 2., 3.], + [4., 5., 6.], + [7., 8., 9.]] + + operator = LinearOperatorCirculant2D(spectrum) + + # IFFT[spectrum] + operator.convolution_kernel() + ==> [[5.0+0.0j, -0.5-.3j, -0.5+.3j], + [-1.5-.9j, 0, 0], + [-1.5+.9j, 0, 0]] + + operator.to_dense() + ==> Complex self adjoint 9 x 9 matrix. + ``` + + #### Example of defining in terms of a real convolution kernel, + + ```python + # convolution_kernel is real ==> spectrum is Hermitian. + convolution_kernel = [[1., 2., 1.], [5., -1., 1.]] + spectrum = tf.signal.fft2d(tf.cast(convolution_kernel, tf.complex64)) + + # spectrum is shape [2, 3] ==> operator is shape [6, 6] + # spectrum is Hermitian ==> operator is real. + operator = LinearOperatorCirculant2D(spectrum, input_output_dtype=tf.float32) + ``` + + #### Performance + + Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, + and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` is `O(R*N*Log[N])` + * `operator.solve(x)` is `O(R*N*Log[N])` + * `operator.determinant()` involves a size `N` `reduce_prod`. + + If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + spectrum: tensor.Tensor, + input_output_dtype=dtypes.complex64, + is_non_singular: bool = None, + is_self_adjoint: bool = None, + is_positive_definite: bool = None, + is_square: bool = True, + name="LinearOperatorCirculant2D"): + r"""Initialize an `LinearOperatorCirculant2D`. + + This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]` + by providing `spectrum`, a `[B1,...,Bb, N0, N1]` `Tensor` with `N0*N1 = N`. + + If `input_output_dtype = DTYPE`: + + * Arguments to methods such as `matmul` or `solve` must be `DTYPE`. + * Values returned by all methods, such as `matmul` or `determinant` will be + cast to `DTYPE`. + + Note that if the spectrum is not Hermitian, then this operator corresponds + to a complex matrix with non-zero imaginary part. In this case, setting + `input_output_dtype` to a real type will forcibly cast the output to be + real, resulting in incorrect results! + + If on the other hand the spectrum is Hermitian, then this operator + corresponds to a real-valued matrix, and setting `input_output_dtype` to + a real type is fine. + + Args: + spectrum: Shape `[B1,...,Bb, N0, N1]` `Tensor`. Allowed dtypes: + `float16`, `float32`, `float64`, `complex64`, `complex128`. + Type can be different than `input_output_dtype` + input_output_dtype: `dtype` for input/output. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `spectrum` is real, this will always be true. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix\ + #Extension_for_non_symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name to prepend to all ops created by this class. + """ + parameters = dict( + spectrum=spectrum, + input_output_dtype=input_output_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + super(LinearOperatorCirculant2D, self).__init__( + spectrum, + block_depth=2, + input_output_dtype=input_output_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _linop_adjoint(self) -> "LinearOperatorCirculant2D": + spectrum = self.spectrum + if spectrum.dtype.is_complex: + spectrum = math_ops.conj(spectrum) + + # Conjugating the spectrum is sufficient to get the adjoint. + return LinearOperatorCirculant2D( + spectrum=spectrum, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorCirculant2D": + return LinearOperatorCirculant2D( + spectrum=1. / self.spectrum, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True, + input_output_dtype=self.dtype) + + def _linop_matmul( + self, + left_operator: "LinearOperatorCirculant2D", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if not isinstance( + right_operator, LinearOperatorCirculant2D + ) or not isinstance(left_operator, type(right_operator)): + return super()._linop_matmul(left_operator, right_operator) + + return LinearOperatorCirculant2D( + spectrum=left_operator.spectrum * right_operator.spectrum, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator + ), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator + ), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator + ) + ), + is_square=True, + ) + + def _linop_solve( + self, + left_operator: "LinearOperatorCirculant2D", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if not isinstance(right_operator, LinearOperatorCirculant2D): + return super()._linop_solve(left_operator, right_operator) + + return LinearOperatorCirculant2D( + spectrum=right_operator.spectrum / left_operator.spectrum, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)), + is_square=True) + + +@tf_export("linalg.LinearOperatorCirculant3D") +@linear_operator.make_composite_tensor +class LinearOperatorCirculant3D(_BaseLinearOperatorCirculant): + """`LinearOperator` acting like a nested block circulant matrix. + + This operator acts like a block circulant matrix `A` with + shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + #### Description in terms of block circulant matrices + + If `A` is nested block circulant, with block sizes `N0, N1, N2` + (`N0 * N1 * N2 = N`): + `A` has a block structure, composed of `N0 x N0` blocks, with each + block an `N1 x N1` block circulant matrix. + + For example, with `W`, `X`, `Y`, `Z` each block circulant, + + ``` + A = |W Z Y X| + |X W Z Y| + |Y X W Z| + |Z Y X W| + ``` + + Note that `A` itself will not in general be circulant. + + #### Description in terms of the frequency spectrum + + There is an equivalent description in terms of the [batch] spectrum `H` and + Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch + dimensions. + + If `H.shape = [N0, N1, N2]`, (`N0 * N1 * N2 = N`): + Loosely speaking, matrix multiplication is equal to the action of a + Fourier multiplier: `A u = IDFT3[ H DFT3[u] ]`. + Precisely speaking, given `[N, R]` matrix `u`, let `DFT3[u]` be the + `[N0, N1, N2, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, N2, R]` and + taking a three dimensional DFT across the first three dimensions. Let `IDFT3` + be the inverse of `DFT3`. Matrix multiplication may be expressed columnwise: + + ```(A u)_r = IDFT3[ H * (DFT3[u])_r ]``` + + #### Operator properties deduced from the spectrum. + + * This operator is positive definite if and only if `Real{H} > 0`. + + A general property of Fourier transforms is the correspondence between + Hermitian functions and real valued transforms. + + Suppose `H.shape = [B1,...,Bb, N0, N1, N2]`, we say that `H` is a Hermitian + spectrum if, with `%` meaning modulus division, + + ``` + H[..., n0 % N0, n1 % N1, n2 % N2] + = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1, (-n2) % N2] ]. + ``` + + * This operator corresponds to a real matrix if and only if `H` is Hermitian. + * This operator is self-adjoint if and only if `H` is real. + + See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer. + + ### Examples + + See `LinearOperatorCirculant` and `LinearOperatorCirculant2D` for examples. + + #### Performance + + Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, + and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` is `O(R*N*Log[N])` + * `operator.solve(x)` is `O(R*N*Log[N])` + * `operator.determinant()` involves a size `N` `reduce_prod`. + + If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + spectrum: tensor.Tensor, + input_output_dtype=dtypes.complex64, + is_non_singular: bool = None, + is_self_adjoint: bool = None, + is_positive_definite: bool = None, + is_square: bool = True, + name="LinearOperatorCirculant3D"): + """Initialize an `LinearOperatorCirculant`. + + This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]` + by providing `spectrum`, a `[B1,...,Bb, N0, N1, N2]` `Tensor` + with `N0*N1*N2 = N`. + + If `input_output_dtype = DTYPE`: + + * Arguments to methods such as `matmul` or `solve` must be `DTYPE`. + * Values returned by all methods, such as `matmul` or `determinant` will be + cast to `DTYPE`. + + Note that if the spectrum is not Hermitian, then this operator corresponds + to a complex matrix with non-zero imaginary part. In this case, setting + `input_output_dtype` to a real type will forcibly cast the output to be + real, resulting in incorrect results! + + If on the other hand the spectrum is Hermitian, then this operator + corresponds to a real-valued matrix, and setting `input_output_dtype` to + a real type is fine. + + Args: + spectrum: Shape `[B1,...,Bb, N0, N1, N2]` `Tensor`. Allowed dtypes: + `float16`, `float32`, `float64`, `complex64`, `complex128`. + Type can be different than `input_output_dtype` + input_output_dtype: `dtype` for input/output. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `spectrum` is real, this will always be true. + is_positive_definite: Expect that this operator is positive definite, + meaning the real part of all eigenvalues is positive. We do not require + the operator to be self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix + #Extension_for_non_symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name to prepend to all ops created by this class. + """ + parameters = dict( + spectrum=spectrum, + input_output_dtype=input_output_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + super(LinearOperatorCirculant3D, self).__init__( + spectrum, + block_depth=3, + input_output_dtype=input_output_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _linop_adjoint(self) -> "LinearOperatorCirculant3D": + spectrum = self.spectrum + if spectrum.dtype.is_complex: + spectrum = math_ops.conj(spectrum) + + # Conjugating the spectrum is sufficient to get the adjoint. + return LinearOperatorCirculant3D( + spectrum=spectrum, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorCirculant3D": + return LinearOperatorCirculant3D( + spectrum=1. / self.spectrum, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True, + input_output_dtype=self.dtype) + + def _linop_matmul( + self, + left_operator: "LinearOperatorCirculant3D", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if not isinstance( + right_operator, LinearOperatorCirculant3D + ) or not isinstance(left_operator, type(right_operator)): + return super()._linop_matmul(left_operator, right_operator) + + return LinearOperatorCirculant3D( + spectrum=left_operator.spectrum * right_operator.spectrum, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator + ), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator + ), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator + ) + ), + is_square=True, + ) + + def _linop_solve( + self, + left_operator: "LinearOperatorCirculant3D", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + if not isinstance(right_operator, LinearOperatorCirculant3D): + return super()._linop_solve(left_operator, right_operator) + + return LinearOperatorCirculant3D( + spectrum=right_operator.spectrum / left_operator.spectrum, + is_non_singular=property_hint_util.combined_non_singular_hint( + left_operator, right_operator), + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator), + is_positive_definite=( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)), + is_square=True) + + +def _to_complex(x): + if x.dtype.is_complex: + return x + dtype = dtypes.complex64 + + if x.dtype == dtypes.float64: + dtype = dtypes.complex128 + return math_ops.cast(x, dtype) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_composition.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_composition.py new file mode 100644 index 0000000000000000000000000000000000000000..f81a17ae4d5717f995c69a272e405360a8025863 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_composition.py @@ -0,0 +1,404 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Composes one or more `LinearOperators`.""" + +from tensorflow.python.framework import common_shapes +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_lower_triangular +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorComposition"] + + +@tf_export("linalg.LinearOperatorComposition") +@linear_operator.make_composite_tensor +class LinearOperatorComposition(linear_operator.LinearOperator): + """Composes one or more `LinearOperators`. + + This operator composes one or more linear operators `[op1,...,opJ]`, + building a new `LinearOperator` with action defined by: + + ``` + op_composed(x) := op1(op2(...(opJ(x)...)) + ``` + + If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the + [batch] matrix formed with the multiplication `A1 A2...AJ`. + + If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have + `N_j = M_{j+1}`, in which case the composed operator has shape equal to + `broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the + mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate + batch shapes broadcast. Even if the composed shape is well defined, the + composed operator's methods may fail due to lack of broadcasting ability in + the defining operators' methods. + + ```python + # Create a 2 x 2 linear operator composed of two 2 x 2 operators. + operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) + operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) + operator = LinearOperatorComposition([operator_1, operator_2]) + + operator.to_dense() + ==> [[1., 2.] + [3., 4.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor + + # Create a [2, 3] batch of 4 x 5 linear operators. + matrix_45 = tf.random.normal(shape=[2, 3, 4, 5]) + operator_45 = LinearOperatorFullMatrix(matrix) + + # Create a [2, 3] batch of 5 x 6 linear operators. + matrix_56 = tf.random.normal(shape=[2, 3, 5, 6]) + operator_56 = LinearOperatorFullMatrix(matrix_56) + + # Compose to create a [2, 3] batch of 4 x 6 operators. + operator_46 = LinearOperatorComposition([operator_45, operator_56]) + + # Create a shape [2, 3, 6, 2] vector. + x = tf.random.normal(shape=[2, 3, 6, 2]) + operator.matmul(x) + ==> Shape [2, 3, 4, 2] Tensor + ``` + + #### Performance + + The performance of `LinearOperatorComposition` on any operation is equal to + the sum of the individual operators' operations. + + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + operators, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=None): + r"""Initialize a `LinearOperatorComposition`. + + `LinearOperatorComposition` is initialized with a list of operators + `[op_1,...,op_J]`. For the `matmul` method to be well defined, the + composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have + similar constraints. + + Args: + operators: Iterable of `LinearOperator` objects, each with + the same `dtype` and composable shape. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is the individual + operators names joined with `_o_`. + + Raises: + TypeError: If all operators do not have the same `dtype`. + ValueError: If `operators` is empty. + """ + parameters = dict( + operators=operators, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name) + + # Validate operators. + check_ops.assert_proper_iterable(operators) + operators = list(operators) + if not operators: + raise ValueError( + "Expected a non-empty list of operators. Found: %s" % operators) + self._operators = operators + + # Validate dtype. + dtype = operators[0].dtype + for operator in operators: + if operator.dtype != dtype: + name_type = (str((o.name, o.dtype)) for o in operators) + raise TypeError( + "Expected all operators to have the same dtype. Found %s" + % " ".join(name_type)) + + # Auto-set and check hints. + if all(operator.is_non_singular for operator in operators): + if is_non_singular is False: # pylint:disable=g-bool-id-comparison + raise ValueError( + "The composition of non-singular operators is always non-singular.") + is_non_singular = True + + if _composition_must_be_self_adjoint(operators): + if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison + raise ValueError( + "The composition was determined to be self-adjoint but user " + "provided incorrect `False` hint.") + is_self_adjoint = True + + if linear_operator_util.is_aat_form(operators): + if is_square is False: # pylint:disable=g-bool-id-comparison + raise ValueError( + "The composition was determined have the form " + "A @ A.H, hence it must be square. The user " + "provided an incorrect `False` hint.") + is_square = True + + if linear_operator_util.is_aat_form(operators) and is_non_singular: + if is_positive_definite is False: # pylint:disable=g-bool-id-comparison + raise ValueError( + "The composition was determined to be non-singular and have the " + "form A @ A.H, hence it must be positive-definite. The user " + "provided an incorrect `False` hint.") + is_positive_definite = True + + # Initialization. + + if name is None: + name = "_o_".join(operator.name for operator in operators) + with ops.name_scope(name): + super(LinearOperatorComposition, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + @property + def operators(self): + return self._operators + + def _shape(self): + # Get final matrix shape. + domain_dimension = self.operators[0].domain_dimension + for operator in self.operators[1:]: + domain_dimension.assert_is_compatible_with(operator.range_dimension) + domain_dimension = operator.domain_dimension + + matrix_shape = tensor_shape.TensorShape( + [self.operators[0].range_dimension, + self.operators[-1].domain_dimension]) + + # Get broadcast batch shape. + # broadcast_shape checks for compatibility. + batch_shape = self.operators[0].batch_shape + for operator in self.operators[1:]: + batch_shape = common_shapes.broadcast_shape( + batch_shape, operator.batch_shape) + + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + # Avoid messy broadcasting if possible. + if self.shape.is_fully_defined(): + return ops.convert_to_tensor( + self.shape.as_list(), dtype=dtypes.int32, name="shape") + + # Don't check the matrix dimensions. That would add unnecessary Asserts to + # the graph. Things will fail at runtime naturally if shapes are + # incompatible. + matrix_shape = array_ops_stack.stack([ + self.operators[0].range_dimension_tensor(), + self.operators[-1].domain_dimension_tensor() + ]) + + # Dummy Tensor of zeros. Will never be materialized. + zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) + for operator in self.operators[1:]: + zeros += array_ops.zeros(shape=operator.batch_shape_tensor()) + batch_shape = array_ops.shape(zeros) + + return array_ops.concat((batch_shape, matrix_shape), 0) + + def _linop_cholesky(self) -> linear_operator.LinearOperator: + """Computes Cholesky(LinearOperatorComposition).""" + # L @ L.H will be handled with special code below. Why is L @ L.H the most + # important special case? + # Note that Diag @ Diag.H and Diag @ TriL and TriL @ Diag are already + # compressed to Diag or TriL by diag matmul + # registration. Similarly for Identity and ScaledIdentity. + # So these would not appear in a LinearOperatorComposition unless explicitly + # constructed as such. So the most important thing to check is L @ L.H. + def _is_llt_product(self): + """Determines if linop = L @ L.H for L = LinearOperatorLowerTriangular.""" + if len(self.operators) != 2: + return False + if not linear_operator_util.is_aat_form(self.operators): + return False + return isinstance( + self.operators[0], + linear_operator_lower_triangular.LinearOperatorLowerTriangular) + + if not _is_llt_product(self): + return linear_operator_lower_triangular.LinearOperatorLowerTriangular( + linalg_ops.cholesky(self.to_dense()), + is_non_singular=True, + is_self_adjoint=False, + is_square=True) + + left_op = self.operators[0] + + # left_op.is_positive_definite ==> op already has positive diag,return it. + if left_op.is_positive_definite: + return left_op + + # Recall that the base class has already verified + # linop.is_positive_definite, else linop.cholesky() would have raised. + # So in particular, we know the diagonal has nonzero entries. + # In the generic case, we make op have positive diag by dividing each row + # by the sign of the diag. This is equivalent to setting A = L @ D where + # D is diag(sign(1 / L.diag_part())). Then A is lower triangular with + # positive diag and A @ A^H = L @ D @ D^H @ L^H = L @ L^H = linop. + # This also works for complex L, + # since sign(x + iy) = exp(i * angle(x + iy)). + diag_sign = array_ops.expand_dims( + math_ops.sign(left_op.diag_part()), axis=-2) + return linear_operator_lower_triangular.LinearOperatorLowerTriangular( + tril=left_op.tril / diag_sign, + is_non_singular=left_op.is_non_singular, + # L.is_self_adjoint ==> L is diagonal ==> L @ D is diagonal ==> SA + # L.is_self_adjoint is False ==> L not diagonal ==> L @ D not diag ... + is_self_adjoint=left_op.is_self_adjoint, + # L.is_positive_definite ==> L has positive diag ==> L = L @ D + # ==> (L @ D).is_positive_definite. + # L.is_positive_definite is False could result + # in L @ D being PD or not. + # Consider L = [[1, 0], [-2, 1]] and quadratic form with x = [1, 1]. + # Note we will already return left_op if left_op.is_positive_definite + # above, but to be explicit write this below. + is_positive_definite=True if left_op.is_positive_definite else None, + is_square=True, + ) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + # If self.operators = [A, B], and not adjoint, then + # matmul_order_list = [B, A]. + # As a result, we return A.matmul(B.matmul(x)) + if adjoint: + matmul_order_list = self.operators + else: + matmul_order_list = list(reversed(self.operators)) + + result = matmul_order_list[0].matmul( + x, adjoint=adjoint, adjoint_arg=adjoint_arg) + for operator in matmul_order_list[1:]: + result = operator.matmul(result, adjoint=adjoint) + return result + + def _determinant(self): + result = self.operators[0].determinant() + for operator in self.operators[1:]: + result *= operator.determinant() + return result + + def _log_abs_determinant(self): + result = self.operators[0].log_abs_determinant() + for operator in self.operators[1:]: + result += operator.log_abs_determinant() + return result + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + # TODO(langmore) Implement solve using solve_ls if some intermediate + # operator maps to a high dimensional space. + # In that case, an exact solve may still be possible. + + # If self.operators = [A, B], and not adjoint, then + # solve_order_list = [A, B]. + # As a result, we return B.solve(A.solve(x)) + if adjoint: + solve_order_list = list(reversed(self.operators)) + else: + solve_order_list = self.operators + + solution = solve_order_list[0].solve( + rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + for operator in solve_order_list[1:]: + solution = operator.solve(solution, adjoint=adjoint) + return solution + + def _assert_non_singular(self): + if all(operator.is_square for operator in self.operators): + asserts = [operator.assert_non_singular() for operator in self.operators] + return control_flow_ops.group(asserts) + return super(LinearOperatorComposition, self)._assert_non_singular() + + @property + def _composite_tensor_fields(self): + return ("operators",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"operators": [0] * len(self.operators)} + + +def _composition_must_be_self_adjoint(operators): + """Runs some checks to see if composition operators must be SA. + + Args: + operators: List of LinearOperators. + + Returns: + True if the composition must be SA. False if it is not SA OR if we did not + determine whether the composition is SA. + """ + if len(operators) == 1 and operators[0].is_self_adjoint: + return True + + # Check for forms like A @ A.H or (A1 @ A2) @ (A2.H @ A1.H) or ... + if linear_operator_util.is_aat_form(operators): + return True + + # Done checking...could still be SA. + # We may not catch some cases. E.g. (A @ I) @ A.H is SA, but is not AAT form. + return False diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_diag.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_diag.py new file mode 100644 index 0000000000000000000000000000000000000000..37bfe78d9f46c77c0b658237506520cf2a1c97e3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_diag.py @@ -0,0 +1,388 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a diagonal matrix.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_lower_triangular +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.linalg import property_hint_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorDiag",] + + +@tf_export("linalg.LinearOperatorDiag") +@linear_operator.make_composite_tensor +class LinearOperatorDiag(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] square diagonal matrix. + + This operator acts like a [batch] diagonal matrix `A` with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + `LinearOperatorDiag` is initialized with a (batch) vector. + + ```python + # Create a 2 x 2 diagonal linear operator. + diag = [1., -1.] + operator = LinearOperatorDiag(diag) + + operator.to_dense() + ==> [[1., 0.] + [0., -1.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor + + # Create a [2, 3] batch of 4 x 4 linear operators. + diag = tf.random.normal(shape=[2, 3, 4]) + operator = LinearOperatorDiag(diag) + + # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible + # since the batch dimensions, [2, 1], are broadcast to + # operator.batch_shape = [2, 3]. + y = tf.random.normal(shape=[2, 1, 4, 2]) + x = operator.solve(y) + ==> operator.matmul(x) = y + ``` + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + #### Performance + + Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`, + and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` involves `N * R` multiplications. + * `operator.solve(x)` involves `N` divisions and `N * R` multiplications. + * `operator.determinant()` involves a size `N` `reduce_prod`. + + If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + diag, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorDiag"): + r"""Initialize a `LinearOperatorDiag`. + + Args: + diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. + The diagonal of the operator. Allowed dtypes: `float16`, `float32`, + `float64`, `complex64`, `complex128`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `diag.dtype` is real, this is auto-set to `True`. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + Raises: + TypeError: If `diag.dtype` is not an allowed type. + ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`. + """ + parameters = dict( + diag=diag, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + with ops.name_scope(name, values=[diag]): + self._diag = linear_operator_util.convert_nonref_to_tensor( + diag, name="diag") + self._check_diag(self._diag) + + # Check and auto-set hints. + if not self._diag.dtype.is_complex: + if is_self_adjoint is False: + raise ValueError("A real diagonal operator is always self adjoint.") + else: + is_self_adjoint = True + + if is_square is False: + raise ValueError("Only square diagonal operators currently supported.") + is_square = True + + super(LinearOperatorDiag, self).__init__( + dtype=self._diag.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _check_diag(self, diag): + """Static check of diag.""" + if diag.shape.ndims is not None and diag.shape.ndims < 1: + raise ValueError("Argument diag must have at least 1 dimension. " + "Found: %s" % diag) + + def _shape(self): + # If d_shape = [5, 3], we return [5, 3, 3]. + d_shape = self._diag.shape + return d_shape.concatenate(d_shape[-1:]) + + def _shape_tensor(self): + d_shape = array_ops.shape(self._diag) + k = d_shape[-1] + return array_ops.concat((d_shape, [k]), 0) + + @property + def diag(self): + return self._diag + + def _linop_inverse(self) -> "LinearOperatorDiag": + return LinearOperatorDiag( + 1. / self.diag, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_matmul( + self, + left_operator: "LinearOperatorDiag", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + is_non_singular = property_hint_util.combined_non_singular_hint( + left_operator, right_operator) + is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator) + is_positive_definite = ( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)) + if isinstance(right_operator, LinearOperatorDiag): + return LinearOperatorDiag( + diag=left_operator.diag * right_operator.diag, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True, + ) + # instance of linear_operator_identity.LinearOperatorScaledIdentity + elif hasattr(right_operator, "_ones_diag") and hasattr( + right_operator, "multiplier" + ): + return LinearOperatorDiag( + diag=left_operator.diag * right_operator.multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + elif isinstance( + right_operator, + linear_operator_lower_triangular.LinearOperatorLowerTriangular, + ): + return linear_operator_lower_triangular.LinearOperatorLowerTriangular( + tril=left_operator.diag[..., None] * right_operator.to_dense(), + is_non_singular=is_non_singular, + # This is safe to do since the Triangular matrix is only self-adjoint + # when it is a diagonal matrix, and hence commutes. + is_self_adjoint=is_self_adjoint, + is_positive_definite=None, + is_square=True) + else: + return super()._linop_matmul(left_operator, right_operator) + + def _linop_solve( + self, + left_operator: "LinearOperatorDiag", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + is_non_singular = property_hint_util.combined_non_singular_hint( + left_operator, right_operator) + is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator) + is_positive_definite = ( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)) + if isinstance(right_operator, LinearOperatorDiag): + return LinearOperatorDiag( + diag=right_operator.diag / left_operator.diag, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + # instance of linear_operator_identity.LinearOperatorScaledIdentity + elif (hasattr(right_operator, "_ones_diag") + and hasattr(right_operator, "multiplier")): + return LinearOperatorDiag( + diag=right_operator.multiplier / left_operator.diag, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + elif isinstance( + right_operator, + linear_operator_lower_triangular.LinearOperatorLowerTriangular): + return linear_operator_lower_triangular.LinearOperatorLowerTriangular( + tril=right_operator.to_dense() / left_operator.diag[..., None], + is_non_singular=is_non_singular, + # This is safe to do since the Triangular matrix is only self-adjoint + # when it is a diagonal matrix, and hence commutes. + is_self_adjoint=is_self_adjoint, + is_positive_definite=None, + is_square=True) + else: + return super()._linop_solve(left_operator, right_operator) + + def _assert_non_singular(self): + return linear_operator_util.assert_no_entries_with_modulus_zero( + self._diag, + message="Singular operator: Diagonal contained zero values.") + + def _assert_positive_definite(self): + if self.dtype.is_complex: + message = ( + "Diagonal operator had diagonal entries with non-positive real part, " + "thus was not positive definite.") + else: + message = ( + "Real diagonal operator had non-positive diagonal entries, " + "thus was not positive definite.") + + return check_ops.assert_positive( + math_ops.real(self._diag), + message=message) + + def _assert_self_adjoint(self): + return linear_operator_util.assert_zero_imag_part( + self._diag, + message=( + "This diagonal operator contained non-zero imaginary values. " + " Thus it was not self-adjoint.")) + + def _linop_adjoint(self) -> "LinearOperatorDiag": + diag = self.diag + if diag.dtype.is_complex: + diag = math_ops.conj(diag) + + return LinearOperatorDiag( + diag=diag, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_cholesky(self) -> "LinearOperatorDiag": + return LinearOperatorDiag( + math_ops.sqrt(self.diag), + is_non_singular=True, + is_self_adjoint=True, + is_positive_definite=True, + is_square=True) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + diag_term = math_ops.conj(self._diag) if adjoint else self._diag + x = linalg.adjoint(x) if adjoint_arg else x + diag_mat = array_ops.expand_dims(diag_term, -1) + return diag_mat * x + + def _matvec(self, x, adjoint=False): + diag_term = math_ops.conj(self._diag) if adjoint else self._diag + return diag_term * x + + def _determinant(self): + return math_ops.reduce_prod(self._diag, axis=[-1]) + + def _log_abs_determinant(self): + log_det = math_ops.reduce_sum( + math_ops.log(math_ops.abs(self._diag)), axis=[-1]) + if self.dtype.is_complex: + log_det = math_ops.cast(log_det, dtype=self.dtype) + return log_det + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + diag_term = math_ops.conj(self._diag) if adjoint else self._diag + rhs = linalg.adjoint(rhs) if adjoint_arg else rhs + inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1) + return rhs * inv_diag_mat + + def _to_dense(self): + return array_ops.matrix_diag(self._diag) + + def _diag_part(self): + return self.diag + + def _add_to_tensor(self, x): + x_diag = array_ops.matrix_diag_part(x) + new_diag = self._diag + x_diag + return array_ops.matrix_set_diag(x, new_diag) + + def _eigvals(self): + return tensor_conversion.convert_to_tensor_v2_with_dispatch(self.diag) + + def _cond(self): + abs_diag = math_ops.abs(self.diag) + return (math_ops.reduce_max(abs_diag, axis=-1) / + math_ops.reduce_min(abs_diag, axis=-1)) + + @property + def _composite_tensor_fields(self): + return ("diag",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"diag": 1} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_full_matrix.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_full_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..579f2d7c74db4555fa283ec0666d9faea361503e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_full_matrix.py @@ -0,0 +1,207 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` that wraps a [batch] matrix.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorFullMatrix"] + + +@tf_export("linalg.LinearOperatorFullMatrix") +@linear_operator.make_composite_tensor +class LinearOperatorFullMatrix(linear_operator.LinearOperator): + """`LinearOperator` that wraps a [batch] matrix. + + This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape + `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `M x N` matrix. + + ```python + # Create a 2 x 2 linear operator. + matrix = [[1., 2.], [3., 4.]] + operator = LinearOperatorFullMatrix(matrix) + + operator.to_dense() + ==> [[1., 2.] + [3., 4.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor + + # Create a [2, 3] batch of 4 x 4 linear operators. + matrix = tf.random.normal(shape=[2, 3, 4, 4]) + operator = LinearOperatorFullMatrix(matrix) + ``` + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [M, N], with b >= 0 + x.shape = [B1,...,Bb] + [N, R], with R >= 0. + ``` + + #### Performance + + `LinearOperatorFullMatrix` has exactly the same performance as would be + achieved by using standard `TensorFlow` matrix ops. Intelligent choices are + made based on the following initialization hints. + + * If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a + Cholesky factorization is used for the determinant and solve. + + In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape + `[M, N]`, and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` is `O(M * N * R)`. + * If `M=N`, `operator.solve(x)` is `O(N^3 * R)`. + * If `M=N`, `operator.determinant()` is `O(N^3)`. + + If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + matrix, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorFullMatrix"): + r"""Initialize a `LinearOperatorFullMatrix`. + + Args: + matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`. + Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, + `complex128`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + Raises: + TypeError: If `diag.dtype` is not an allowed type. + """ + parameters = dict( + matrix=matrix, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + with ops.name_scope(name, values=[matrix]): + self._matrix = linear_operator_util.convert_nonref_to_tensor( + matrix, name="matrix") + self._check_matrix(self._matrix) + + super(LinearOperatorFullMatrix, self).__init__( + dtype=self._matrix.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _check_matrix(self, matrix): + """Static check of the `matrix` argument.""" + allowed_dtypes = [ + dtypes.float16, + dtypes.float32, + dtypes.float64, + dtypes.complex64, + dtypes.complex128, + ] + + matrix = tensor_conversion.convert_to_tensor_v2_with_dispatch( + matrix, name="matrix" + ) + + dtype = matrix.dtype + if dtype not in allowed_dtypes: + raise TypeError(f"Argument `matrix` must have dtype in {allowed_dtypes}. " + f"Received: {dtype}.") + + if matrix.shape.ndims is not None and matrix.shape.ndims < 2: + raise ValueError(f"Argument `matrix` must have at least 2 dimensions. " + f"Received: {matrix}.") + + @property + def matrix(self): + """The matrix defining this operator.""" + return self._matrix + + def _shape(self): + return self._matrix.shape + + def _shape_tensor(self): + return array_ops.shape(self._matrix) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + return math_ops.matmul( + self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _to_dense(self): + return self._matrix + + @property + def _composite_tensor_fields(self): + return ("matrix",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"matrix": 2} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_householder.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_householder.py new file mode 100644 index 0000000000000000000000000000000000000000..975c4d2fa162f5836f9c7aa8299adca33e471385 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_householder.py @@ -0,0 +1,285 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a Householder transformation.""" + +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorHouseholder",] + + +@tf_export("linalg.LinearOperatorHouseholder") +@linear_operator.make_composite_tensor +class LinearOperatorHouseholder(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] of Householder transformations. + + This operator acts like a [batch] of householder reflections with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + `LinearOperatorHouseholder` is initialized with a (batch) vector. + + A Householder reflection, defined via a vector `v`, which reflects points + in `R^n` about the hyperplane orthogonal to `v` and through the origin. + + ```python + # Create a 2 x 2 householder transform. + vec = [1 / np.sqrt(2), 1. / np.sqrt(2)] + operator = LinearOperatorHouseholder(vec) + + operator.to_dense() + ==> [[0., -1.] + [-1., -0.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor + ``` + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + reflection_axis, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorHouseholder"): + r"""Initialize a `LinearOperatorHouseholder`. + + Args: + reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. + The vector defining the hyperplane to reflect about. + Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, + `complex128`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. This is autoset to true + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + This is autoset to false. + is_square: Expect that this operator acts like square [batch] matrices. + This is autoset to true. + name: A name for this `LinearOperator`. + + Raises: + ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is + not `False` or `is_square` is not `True`. + """ + parameters = dict( + reflection_axis=reflection_axis, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + with ops.name_scope(name, values=[reflection_axis]): + self._reflection_axis = linear_operator_util.convert_nonref_to_tensor( + reflection_axis, name="reflection_axis") + self._check_reflection_axis(self._reflection_axis) + + # Check and auto-set hints. + if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison + raise ValueError("A Householder operator is always self adjoint.") + else: + is_self_adjoint = True + + if is_positive_definite is True: # pylint:disable=g-bool-id-comparison + raise ValueError( + "A Householder operator is always non-positive definite.") + else: + is_positive_definite = False + + if is_square is False: # pylint:disable=g-bool-id-comparison + raise ValueError("A Householder operator is always square.") + is_square = True + + super(LinearOperatorHouseholder, self).__init__( + dtype=self._reflection_axis.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _check_reflection_axis(self, reflection_axis): + """Static check of reflection_axis.""" + if (reflection_axis.shape.ndims is not None and + reflection_axis.shape.ndims < 1): + raise ValueError( + "Argument reflection_axis must have at least 1 dimension. " + "Found: %s" % reflection_axis) + + def _shape(self): + # If d_shape = [5, 3], we return [5, 3, 3]. + d_shape = self._reflection_axis.shape + return d_shape.concatenate(d_shape[-1:]) + + def _shape_tensor(self): + d_shape = array_ops.shape(self._reflection_axis) + k = d_shape[-1] + return array_ops.concat((d_shape, [k]), 0) + + def _assert_non_singular(self): + return control_flow_ops.no_op("assert_non_singular") + + def _assert_positive_definite(self): + raise errors.InvalidArgumentError( + node_def=None, op=None, message="Householder operators are always " + "non-positive definite.") + + def _assert_self_adjoint(self): + return control_flow_ops.no_op("assert_self_adjoint") + + def _linop_adjoint(self) -> "LinearOperatorHouseholder": + return self + + def _linop_inverse(self) -> "LinearOperatorHouseholder": + return self + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + # Given a vector `v`, we would like to reflect `x` about the hyperplane + # orthogonal to `v` going through the origin. We first project `x` to `v` + # to get v * dot(v, x) / dot(v, v). After we project, we can reflect the + # projection about the hyperplane by flipping sign to get + # -v * dot(v, x) / dot(v, v). Finally, we can add back the component + # that is orthogonal to v. This is invariant under reflection, since the + # whole hyperplane is invariant. This component is equal to x - v * dot(v, + # x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v) + # for the reflection. + + # Note that because this is a reflection, it lies in O(n) (for real vector + # spaces) or U(n) (for complex vector spaces), and thus is its own adjoint. + reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.reflection_axis + ) + x = linalg.adjoint(x) if adjoint_arg else x + normalized_axis = nn.l2_normalize(reflection_axis, axis=-1) + mat = normalized_axis[..., array_ops.newaxis] + x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True) + + return x - 2 * mat * x_dot_normalized_v + + def _trace(self): + # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue. + shape = self.shape_tensor() + return math_ops.cast( + self._domain_dimension_tensor(shape=shape) - 2, + self.dtype) * array_ops.ones( + shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype) + + def _determinant(self): + # For householder transformations, the determinant is -1. + return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) # pylint: disable=invalid-unary-operand-type + + def _log_abs_determinant(self): + # Orthogonal matrix -> log|Q| = 0. + return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + # A householder reflection is a reflection, hence is idempotent. Thus we + # can just apply a matmul. + return self._matmul(rhs, adjoint, adjoint_arg) + + def _to_dense(self): + reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.reflection_axis + ) + normalized_axis = nn.l2_normalize(reflection_axis, axis=-1) + mat = normalized_axis[..., array_ops.newaxis] + matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True) + return array_ops.matrix_set_diag( + matrix, 1. + array_ops.matrix_diag_part(matrix)) + + def _diag_part(self): + reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.reflection_axis + ) + normalized_axis = nn.l2_normalize(reflection_axis, axis=-1) + return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis) + + def _eigvals(self): + # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue. + result_shape = array_ops.shape(self.reflection_axis) + n = result_shape[-1] + ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1) + neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1) + eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype) + eigvals = array_ops.concat( + [-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1) # pylint: disable=invalid-unary-operand-type + return eigvals + + def _cond(self): + # Householder matrices are rotations which have condition number 1. + return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) + + @property + def reflection_axis(self): + return self._reflection_axis + + @property + def _composite_tensor_fields(self): + return ("reflection_axis",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"reflection_axis": 1} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_identity.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_identity.py new file mode 100644 index 0000000000000000000000000000000000000000..2d3fc2d5a85fff7f1576924381d6642d81ad1bf8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_identity.py @@ -0,0 +1,929 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like the identity matrix.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_diag +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.linalg import property_hint_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = [ + "LinearOperatorIdentity", + "LinearOperatorScaledIdentity", +] + + +class BaseLinearOperatorIdentity(linear_operator.LinearOperator): + """Base class for Identity operators.""" + + def _check_num_rows_possibly_add_asserts(self): + """Static check of init arg `num_rows`, possibly add asserts.""" + # Possibly add asserts. + if self._assert_proper_shapes: + self._num_rows = control_flow_ops.with_dependencies([ + check_ops.assert_rank( + self._num_rows, + 0, + message="Argument num_rows must be a 0-D Tensor."), + check_ops.assert_non_negative( + self._num_rows, + message="Argument num_rows must be non-negative."), + ], self._num_rows) + + # Static checks. + if not self._num_rows.dtype.is_integer: + raise TypeError("Argument num_rows must be integer type. Found:" + " %s" % self._num_rows) + + num_rows_static = self._num_rows_static + + if num_rows_static is None: + return # Cannot do any other static checks. + + if num_rows_static.ndim != 0: + raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" + " %s" % num_rows_static) + + if num_rows_static < 0: + raise ValueError("Argument num_rows must be non-negative. Found:" + " %s" % num_rows_static) + + def _min_matrix_dim(self): + """Minimum of domain/range dimension, if statically available, else None.""" + domain_dim = tensor_shape.dimension_value(self.domain_dimension) + range_dim = tensor_shape.dimension_value(self.range_dimension) + if domain_dim is None or range_dim is None: + return None + return min(domain_dim, range_dim) + + def _min_matrix_dim_tensor(self): + """Minimum of domain/range dimension, as a tensor.""" + return math_ops.reduce_min(self.shape_tensor()[-2:]) + + def _ones_diag(self): + """Returns the diagonal of this operator as all ones.""" + if self.shape.is_fully_defined(): + d_shape = self.batch_shape.concatenate([self._min_matrix_dim()]) + else: + d_shape = array_ops.concat( + [self.batch_shape_tensor(), + [self._min_matrix_dim_tensor()]], axis=0) + + return array_ops.ones(shape=d_shape, dtype=self.dtype) + + +@tf_export("linalg.LinearOperatorIdentity") +@linear_operator.make_composite_tensor +class LinearOperatorIdentity(BaseLinearOperatorIdentity): + """`LinearOperator` acting like a [batch] square identity matrix. + + This operator acts like a [batch] identity matrix `A` with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + `LinearOperatorIdentity` is initialized with `num_rows`, and optionally + `batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this + operator efficiently passes through all arguments. If `batch_shape` is + provided, broadcasting may occur, which will require making copies. + + ```python + # Create a 2 x 2 identity matrix. + operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32) + + operator.to_dense() + ==> [[1., 0.] + [0., 1.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> 0. + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor, same as x. + + y = tf.random.normal(shape=[3, 2, 4]) + # Note that y.shape is compatible with operator.shape because operator.shape + # is broadcast to [3, 2, 2]. + # This broadcast does NOT require copying data, since we can infer that y + # will be passed through without changing shape. We are always able to infer + # this if the operator has no batch_shape. + x = operator.solve(y) + ==> Shape [3, 2, 4] Tensor, same as y. + + # Create a 2-batch of 2x2 identity matrices + operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2]) + operator.to_dense() + ==> [[[1., 0.] + [0., 1.]], + [[1., 0.] + [0., 1.]]] + + # Here, even though the operator has a batch shape, the input is the same as + # the output, so x can be passed through without a copy. The operator is able + # to detect that no broadcast is necessary because both x and the operator + # have statically defined shape. + x = ... Shape [2, 2, 3] + operator.matmul(x) + ==> Shape [2, 2, 3] Tensor, same as x + + # Here the operator and x have different batch_shape, and are broadcast. + # This requires a copy, since the output is different size than the input. + x = ... Shape [1, 2, 3] + operator.matmul(x) + ==> Shape [2, 2, 3] Tensor, equal to [x, x] + ``` + + ### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + ### Performance + + If `batch_shape` initialization arg is `None`: + + * `operator.matmul(x)` is `O(1)` + * `operator.solve(x)` is `O(1)` + * `operator.determinant()` is `O(1)` + + If `batch_shape` initialization arg is provided, and static checks cannot + rule out the need to broadcast: + + * `operator.matmul(x)` is `O(D1*...*Dd*N*R)` + * `operator.solve(x)` is `O(D1*...*Dd*N*R)` + * `operator.determinant()` is `O(B1*...*Bb)` + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + num_rows, + batch_shape=None, + dtype=None, + is_non_singular=True, + is_self_adjoint=True, + is_positive_definite=True, + is_square=True, + assert_proper_shapes=False, + name="LinearOperatorIdentity"): + r"""Initialize a `LinearOperatorIdentity`. + + The `LinearOperatorIdentity` is initialized with arguments defining `dtype` + and shape. + + This operator is able to broadcast the leading (batch) dimensions, which + sometimes requires copying data. If `batch_shape` is `None`, the operator + can take arguments of any batch shape without copying. See examples. + + Args: + num_rows: Scalar non-negative integer `Tensor`. Number of rows in the + corresponding identity matrix. + batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading + dimensions. If `None`, this operator has no leading dimensions. + dtype: Data type of the matrix that this operator represents. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + assert_proper_shapes: Python `bool`. If `False`, only perform static + checks that initialization and method arguments have proper shape. + If `True`, and static checks are inconclusive, add asserts to the graph. + name: A name for this `LinearOperator` + + Raises: + ValueError: If `num_rows` is determined statically to be non-scalar, or + negative. + ValueError: If `batch_shape` is determined statically to not be 1-D, or + negative. + ValueError: If any of the following is not `True`: + `{is_self_adjoint, is_non_singular, is_positive_definite}`. + TypeError: If `num_rows` or `batch_shape` is ref-type (e.g. Variable). + """ + parameters = dict( + num_rows=num_rows, + batch_shape=batch_shape, + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + assert_proper_shapes=assert_proper_shapes, + name=name) + + dtype = dtype or dtypes.float32 + self._assert_proper_shapes = assert_proper_shapes + + with ops.name_scope(name): + dtype = dtypes.as_dtype(dtype) + if not is_self_adjoint: + raise ValueError("An identity operator is always self adjoint.") + if not is_non_singular: + raise ValueError("An identity operator is always non-singular.") + if not is_positive_definite: + raise ValueError("An identity operator is always positive-definite.") + if not is_square: + raise ValueError("An identity operator is always square.") + + super(LinearOperatorIdentity, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + linear_operator_util.assert_not_ref_type(num_rows, "num_rows") + linear_operator_util.assert_not_ref_type(batch_shape, "batch_shape") + + self._num_rows = linear_operator_util.shape_tensor( + num_rows, name="num_rows") + self._num_rows_static = tensor_util.constant_value(self._num_rows) + self._check_num_rows_possibly_add_asserts() + + if batch_shape is None: + self._batch_shape_arg = None + else: + self._batch_shape_arg = linear_operator_util.shape_tensor( + batch_shape, name="batch_shape_arg") + self._batch_shape_static = tensor_util.constant_value( + self._batch_shape_arg) + self._check_batch_shape_possibly_add_asserts() + + def _shape(self): + matrix_shape = tensor_shape.TensorShape((self._num_rows_static, + self._num_rows_static)) + if self._batch_shape_arg is None: + return matrix_shape + + batch_shape = tensor_shape.TensorShape(self._batch_shape_static) + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + matrix_shape = array_ops_stack.stack( + (self._num_rows, self._num_rows), axis=0) + if self._batch_shape_arg is None: + return matrix_shape + + return array_ops.concat((self._batch_shape_arg, matrix_shape), 0) + + def _linop_adjoint(self) -> "LinearOperatorIdentity": + return self + + def _linop_cholesky(self) -> "LinearOperatorIdentity": + return LinearOperatorIdentity( + num_rows=self._num_rows, # pylint: disable=protected-access + batch_shape=self.batch_shape, + dtype=self.dtype, + is_non_singular=True, + is_self_adjoint=True, + is_positive_definite=True, + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorIdentity": + return self + + def _linop_matmul( + self, + left_operator: "LinearOperatorIdentity", + right_operator: linear_operator.LinearOperator, + ) -> "LinearOperatorIdentity": + del left_operator + return right_operator + + def _linop_solve( + self, + left_operator: "LinearOperatorIdentity", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + del left_operator + return right_operator + + def _assert_non_singular(self): + return control_flow_ops.no_op("assert_non_singular") + + def _assert_positive_definite(self): + return control_flow_ops.no_op("assert_positive_definite") + + def _assert_self_adjoint(self): + return control_flow_ops.no_op("assert_self_adjoint") + + def _possibly_broadcast_batch_shape(self, x): + """Return 'x', possibly after broadcasting the leading dimensions.""" + # If we have no batch shape, our batch shape broadcasts with everything! + if self._batch_shape_arg is None: + return x + + # Static attempt: + # If we determine that no broadcast is necessary, pass x through + # If we need a broadcast, add to an array of zeros. + # + # special_shape is the shape that, when broadcast with x's shape, will give + # the correct broadcast_shape. Note that + # We have already verified the second to last dimension of self.shape + # matches x's shape in assert_compatible_matrix_dimensions. + # Also, the final dimension of 'x' can have any shape. + # Therefore, the final two dimensions of special_shape are 1's. + special_shape = self.batch_shape.concatenate([1, 1]) + bshape = array_ops.broadcast_static_shape(x.shape, special_shape) + if special_shape.is_fully_defined(): + # bshape.is_fully_defined iff special_shape.is_fully_defined. + if bshape == x.shape: + return x + # Use the built in broadcasting of addition. + zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) + return x + zeros + + # Dynamic broadcast: + # Always add to an array of zeros, rather than using a "cond", since a + # cond would require copying data from GPU --> CPU. + special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0) + zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) + return x + zeros + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + # Note that adjoint has no effect since this matrix is self-adjoint. + x = linalg.adjoint(x) if adjoint_arg else x + if self._assert_proper_shapes: + aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x) + x = control_flow_ops.with_dependencies([aps], x) + return self._possibly_broadcast_batch_shape(x) + + def _determinant(self): + return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) + + def _log_abs_determinant(self): + return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + return self._matmul(rhs, adjoint_arg=adjoint_arg) + + def _trace(self): + # Get Tensor of all ones of same shape as self.batch_shape. + if self.batch_shape.is_fully_defined(): + batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype) + else: + batch_of_ones = array_ops.ones( + shape=self.batch_shape_tensor(), dtype=self.dtype) + + if self._min_matrix_dim() is not None: + return self._min_matrix_dim() * batch_of_ones + else: + return (math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) * + batch_of_ones) + + def _diag_part(self): + return self._ones_diag() + + def add_to_tensor(self, mat, name="add_to_tensor"): + """Add matrix represented by this operator to `mat`. Equiv to `I + mat`. + + Args: + mat: `Tensor` with same `dtype` and shape broadcastable to `self`. + name: A name to give this `Op`. + + Returns: + A `Tensor` with broadcast shape and same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + mat = tensor_conversion.convert_to_tensor_v2_with_dispatch( + mat, name="mat" + ) + mat_diag = array_ops.matrix_diag_part(mat) + new_diag = 1 + mat_diag + return array_ops.matrix_set_diag(mat, new_diag) + + def _eigvals(self): + return self._ones_diag() + + def _cond(self): + return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) + + def _check_num_rows_possibly_add_asserts(self): + """Static check of init arg `num_rows`, possibly add asserts.""" + # Possibly add asserts. + if self._assert_proper_shapes: + self._num_rows = control_flow_ops.with_dependencies([ + check_ops.assert_rank( + self._num_rows, + 0, + message="Argument num_rows must be a 0-D Tensor."), + check_ops.assert_non_negative( + self._num_rows, + message="Argument num_rows must be non-negative."), + ], self._num_rows) + + # Static checks. + if not self._num_rows.dtype.is_integer: + raise TypeError("Argument num_rows must be integer type. Found:" + " %s" % self._num_rows) + + num_rows_static = self._num_rows_static + + if num_rows_static is None: + return # Cannot do any other static checks. + + if num_rows_static.ndim != 0: + raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" + " %s" % num_rows_static) + + if num_rows_static < 0: + raise ValueError("Argument num_rows must be non-negative. Found:" + " %s" % num_rows_static) + + def _check_batch_shape_possibly_add_asserts(self): + """Static check of init arg `batch_shape`, possibly add asserts.""" + if self._batch_shape_arg is None: + return + + # Possibly add asserts + if self._assert_proper_shapes: + self._batch_shape_arg = control_flow_ops.with_dependencies([ + check_ops.assert_rank( + self._batch_shape_arg, + 1, + message="Argument batch_shape must be a 1-D Tensor."), + check_ops.assert_non_negative( + self._batch_shape_arg, + message="Argument batch_shape must be non-negative."), + ], self._batch_shape_arg) + + # Static checks + if not self._batch_shape_arg.dtype.is_integer: + raise TypeError("Argument batch_shape must be integer type. Found:" + " %s" % self._batch_shape_arg) + + if self._batch_shape_static is None: + return # Cannot do any other static checks. + + if self._batch_shape_static.ndim != 1: + raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:" + " %s" % self._batch_shape_static) + + if np.any(self._batch_shape_static < 0): + raise ValueError("Argument batch_shape must be non-negative. Found:" + "%s" % self._batch_shape_static) + + @property + def _composite_tensor_prefer_static_fields(self): + return ("num_rows", "batch_shape") + + @property + def _composite_tensor_fields(self): + return ("num_rows", "batch_shape", "dtype", "assert_proper_shapes") + + def __getitem__(self, slices): + # Slice the batch shape and return a new LinearOperatorIdentity. + # Use a proxy shape and slice it. Use this as the new batch shape + new_batch_shape = array_ops.shape( + array_ops.ones(self._batch_shape_arg)[slices]) + parameters = dict(self.parameters, batch_shape=new_batch_shape) + return LinearOperatorIdentity(**parameters) + + +@tf_export("linalg.LinearOperatorScaledIdentity") +@linear_operator.make_composite_tensor +class LinearOperatorScaledIdentity(BaseLinearOperatorIdentity): + """`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`. + + This operator acts like a scaled [batch] identity matrix `A` with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + a scaled version of the `N x N` identity matrix. + + `LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier` + (a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the + `multiplier` determines the scale for each batch member. + + ```python + # Create a 2 x 2 scaled identity matrix. + operator = LinearOperatorIdentity(num_rows=2, multiplier=3.) + + operator.to_dense() + ==> [[3., 0.] + [0., 3.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> 2 * Log[3] + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> 3 * x + + y = tf.random.normal(shape=[3, 2, 4]) + # Note that y.shape is compatible with operator.shape because operator.shape + # is broadcast to [3, 2, 2]. + x = operator.solve(y) + ==> 3 * x + + # Create a 2-batch of 2x2 identity matrices + operator = LinearOperatorIdentity(num_rows=2, multiplier=5.) + operator.to_dense() + ==> [[[5., 0.] + [0., 5.]], + [[5., 0.] + [0., 5.]]] + + x = ... Shape [2, 2, 3] + operator.matmul(x) + ==> 5 * x + + # Here the operator and x have different batch_shape, and are broadcast. + x = ... Shape [1, 2, 3] + operator.matmul(x) + ==> 5 * x + ``` + + ### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + ### Performance + + * `operator.matmul(x)` is `O(D1*...*Dd*N*R)` + * `operator.solve(x)` is `O(D1*...*Dd*N*R)` + * `operator.determinant()` is `O(D1*...*Dd)` + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + num_rows, + multiplier, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=True, + assert_proper_shapes=False, + name="LinearOperatorScaledIdentity"): + r"""Initialize a `LinearOperatorScaledIdentity`. + + The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which + determines the size of each identity matrix, and a `multiplier`, + which defines `dtype`, batch shape, and scale of each matrix. + + This operator is able to broadcast the leading (batch) dimensions. + + Args: + num_rows: Scalar non-negative integer `Tensor`. Number of rows in the + corresponding identity matrix. + multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar). + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + assert_proper_shapes: Python `bool`. If `False`, only perform static + checks that initialization and method arguments have proper shape. + If `True`, and static checks are inconclusive, add asserts to the graph. + name: A name for this `LinearOperator` + + Raises: + ValueError: If `num_rows` is determined statically to be non-scalar, or + negative. + """ + parameters = dict( + num_rows=num_rows, + multiplier=multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + assert_proper_shapes=assert_proper_shapes, + name=name) + + self._assert_proper_shapes = assert_proper_shapes + + with ops.name_scope(name, values=[multiplier, num_rows]): + self._multiplier = linear_operator_util.convert_nonref_to_tensor( + multiplier, name="multiplier") + + # Check and auto-set hints. + if not self._multiplier.dtype.is_complex: + if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison + raise ValueError("A real diagonal operator is always self adjoint.") + else: + is_self_adjoint = True + + if not is_square: + raise ValueError("A ScaledIdentity operator is always square.") + + linear_operator_util.assert_not_ref_type(num_rows, "num_rows") + + super(LinearOperatorScaledIdentity, self).__init__( + dtype=self._multiplier.dtype.base_dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + self._num_rows = linear_operator_util.shape_tensor( + num_rows, name="num_rows") + self._num_rows_static = tensor_util.constant_value(self._num_rows) + self._check_num_rows_possibly_add_asserts() + self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype) + self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows, + self.dtype.real_dtype) + + def _shape(self): + matrix_shape = tensor_shape.TensorShape((self._num_rows_static, + self._num_rows_static)) + + batch_shape = self.multiplier.shape + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + matrix_shape = array_ops_stack.stack( + (self._num_rows, self._num_rows), axis=0) + + batch_shape = array_ops.shape(self.multiplier) + return array_ops.concat((batch_shape, matrix_shape), 0) + + def _assert_non_singular(self): + return check_ops.assert_positive( + math_ops.abs(self.multiplier), message="LinearOperator was singular") + + def _assert_positive_definite(self): + return check_ops.assert_positive( + math_ops.real(self.multiplier), + message="LinearOperator was not positive definite.") + + def _assert_self_adjoint(self): + imag_multiplier = math_ops.imag(self.multiplier) + return check_ops.assert_equal( + array_ops.zeros_like(imag_multiplier), + imag_multiplier, + message="LinearOperator was not self-adjoint") + + def _make_multiplier_matrix(self, conjugate=False): + # Shape [B1,...Bb, 1, 1] + multiplier_matrix = array_ops.expand_dims( + array_ops.expand_dims(self.multiplier, -1), -1) + if conjugate: + multiplier_matrix = math_ops.conj(multiplier_matrix) + return multiplier_matrix + + def _linop_adjoint(self) -> "LinearOperatorScaledIdentity": + multiplier = self.multiplier + if multiplier.dtype.is_complex: + multiplier = math_ops.conj(multiplier) + + return LinearOperatorScaledIdentity( + num_rows=self._num_rows, + multiplier=multiplier, + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_cholesky(self) -> "LinearOperatorScaledIdentity": + return LinearOperatorScaledIdentity( + num_rows=self._num_rows, + multiplier=math_ops.sqrt(self.multiplier), + is_non_singular=True, + is_self_adjoint=True, + is_positive_definite=True, + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorScaledIdentity": + return LinearOperatorScaledIdentity( + num_rows=self._num_rows, + multiplier=1. / self.multiplier, + is_non_singular=self.is_non_singular, + is_self_adjoint=True, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_matmul( + self, + left_operator: "LinearOperatorScaledIdentity", + right_operator: linear_operator.LinearOperator, + ) -> "LinearOperatorScaledIdentity": + is_non_singular = property_hint_util.combined_non_singular_hint( + left_operator, right_operator) + is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator) + is_positive_definite = ( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)) + if isinstance(right_operator, LinearOperatorScaledIdentity): + return LinearOperatorScaledIdentity( + num_rows=left_operator.domain_dimension_tensor(), + multiplier=left_operator.multiplier * right_operator.multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + elif isinstance(right_operator, linear_operator_diag.LinearOperatorDiag): + return linear_operator_diag.LinearOperatorDiag( + diag=right_operator.diag * left_operator.multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + else: + return super()._linop_matmul(left_operator, right_operator) + + def _linop_solve( + self, + left_operator: "LinearOperatorScaledIdentity", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + is_non_singular = property_hint_util.combined_non_singular_hint( + left_operator, right_operator) + is_self_adjoint = property_hint_util.combined_commuting_self_adjoint_hint( + left_operator, right_operator) + is_positive_definite = ( + property_hint_util.combined_commuting_positive_definite_hint( + left_operator, right_operator)) + if isinstance(right_operator, LinearOperatorScaledIdentity): + return LinearOperatorScaledIdentity( + num_rows=left_operator.domain_dimension_tensor(), + multiplier=right_operator.multiplier / left_operator.multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + elif isinstance(right_operator, linear_operator_diag.LinearOperatorDiag): + return linear_operator_diag.LinearOperatorDiag( + diag=right_operator.diag / left_operator.multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=True) + else: + return super()._linop_solve(left_operator, right_operator) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + x = linalg.adjoint(x) if adjoint_arg else x + if self._assert_proper_shapes: + aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x) + x = control_flow_ops.with_dependencies([aps], x) + return x * self._make_multiplier_matrix(conjugate=adjoint) + + def _determinant(self): + return self.multiplier**self._num_rows_cast_to_dtype + + def _log_abs_determinant(self): + return self._num_rows_cast_to_real_dtype * math_ops.log( + math_ops.abs(self.multiplier)) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + rhs = linalg.adjoint(rhs) if adjoint_arg else rhs + if self._assert_proper_shapes: + aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs) + rhs = control_flow_ops.with_dependencies([aps], rhs) + return rhs / self._make_multiplier_matrix(conjugate=adjoint) + + def _trace(self): + # Get Tensor of all ones of same shape as self.batch_shape. + if self.batch_shape.is_fully_defined(): + batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype) + else: + batch_of_ones = array_ops.ones( + shape=self.batch_shape_tensor(), dtype=self.dtype) + + if self._min_matrix_dim() is not None: + return self.multiplier * self._min_matrix_dim() * batch_of_ones + else: + return (self.multiplier * math_ops.cast(self._min_matrix_dim_tensor(), + self.dtype) * batch_of_ones) + + def _diag_part(self): + return self._ones_diag() * self.multiplier[..., array_ops.newaxis] + + def add_to_tensor(self, mat, name="add_to_tensor"): + """Add matrix represented by this operator to `mat`. Equiv to `I + mat`. + + Args: + mat: `Tensor` with same `dtype` and shape broadcastable to `self`. + name: A name to give this `Op`. + + Returns: + A `Tensor` with broadcast shape and same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + # Shape [B1,...,Bb, 1] + multiplier_vector = array_ops.expand_dims(self.multiplier, -1) + + # Shape [C1,...,Cc, M, M] + mat = tensor_conversion.convert_to_tensor_v2_with_dispatch( + mat, name="mat" + ) + + # Shape [C1,...,Cc, M] + mat_diag = array_ops.matrix_diag_part(mat) + + # multiplier_vector broadcasts here. + new_diag = multiplier_vector + mat_diag + + return array_ops.matrix_set_diag(mat, new_diag) + + def _eigvals(self): + return self._ones_diag() * self.multiplier[..., array_ops.newaxis] + + def _cond(self): + # Condition number for a scalar time identity matrix is one, except when the + # scalar is zero. + return array_ops.where_v2( + math_ops.equal(self._multiplier, 0.), + math_ops.cast(np.nan, dtype=self.dtype), + math_ops.cast(1., dtype=self.dtype)) + + @property + def multiplier(self): + """The [batch] scalar `Tensor`, `c` in `cI`.""" + return self._multiplier + + @property + def _composite_tensor_prefer_static_fields(self): + return ("num_rows",) + + @property + def _composite_tensor_fields(self): + return ("num_rows", "multiplier", "assert_proper_shapes") + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"multiplier": 0} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_inversion.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_inversion.py new file mode 100644 index 0000000000000000000000000000000000000000..f47a7d8eb0c360ca372eaf0925df157bae415605 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_inversion.py @@ -0,0 +1,231 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Inverts a non-singular `LinearOperator`.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorInversion"] + + +@tf_export("linalg.LinearOperatorInversion") +@linear_operator.make_composite_tensor +class LinearOperatorInversion(linear_operator.LinearOperator): + """`LinearOperator` representing the inverse of another operator. + + This operator represents the inverse of another operator. + + ```python + # Create a 2 x 2 linear operator. + operator = LinearOperatorFullMatrix([[1., 0.], [0., 2.]]) + operator_inv = LinearOperatorInversion(operator) + + operator_inv.to_dense() + ==> [[1., 0.] + [0., 0.5]] + + operator_inv.shape + ==> [2, 2] + + operator_inv.log_abs_determinant() + ==> - log(2) + + x = ... Shape [2, 4] Tensor + operator_inv.matmul(x) + ==> Shape [2, 4] Tensor, equal to operator.solve(x) + ``` + + #### Performance + + The performance of `LinearOperatorInversion` depends on the underlying + operators performance: `solve` and `matmul` are swapped, and determinant is + inverted. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + operator, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=None): + r"""Initialize a `LinearOperatorInversion`. + + `LinearOperatorInversion` is initialized with an operator `A`. The `solve` + and `matmul` methods are effectively swapped. E.g. + + ``` + A = MyLinearOperator(...) + B = LinearOperatorInversion(A) + x = [....] # a vector + + assert A.matvec(x) == B.solvevec(x) + ``` + + Args: + operator: `LinearOperator` object. If `operator.is_non_singular == False`, + an exception is raised. We do allow `operator.is_non_singular == None`, + in which case this operator will have `is_non_singular == None`. + Similarly for `is_self_adjoint` and `is_positive_definite`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is `operator.name + + "_inv"`. + + Raises: + ValueError: If `operator.is_non_singular` is False. + """ + parameters = dict( + operator=operator, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + self._operator = operator + + # Auto-set and check hints. + if operator.is_non_singular is False or is_non_singular is False: + raise ValueError( + f"Argument `is_non_singular` or argument `operator` must have " + f"supplied hint `is_non_singular` equal to `True` or `None`. " + f"Found `operator.is_non_singular`: {operator.is_non_singular}, " + f"`is_non_singular`: {is_non_singular}.") + if operator.is_square is False or is_square is False: + raise ValueError( + f"Argument `is_square` or argument `operator` must have supplied " + f"hint `is_square` equal to `True` or `None`. Found " + f"`operator.is_square`: {operator.is_square}, " + f"`is_square`: {is_square}.") + + # The congruency of is_non_singular and is_self_adjoint was checked in the + # base operator. Other hints are, in this special case of inversion, ones + # that must be the same for base/derived operator. + combine_hint = ( + linear_operator_util.use_operator_or_provided_hint_unless_contradicting) + + is_square = combine_hint( + operator, "is_square", is_square, + "An operator is square if and only if its inverse is square.") + + is_non_singular = combine_hint( + operator, "is_non_singular", is_non_singular, + "An operator is non-singular if and only if its inverse is " + "non-singular.") + + is_self_adjoint = combine_hint( + operator, "is_self_adjoint", is_self_adjoint, + "An operator is self-adjoint if and only if its inverse is " + "self-adjoint.") + + is_positive_definite = combine_hint( + operator, "is_positive_definite", is_positive_definite, + "An operator is positive-definite if and only if its inverse is " + "positive-definite.") + + # Initialization. + if name is None: + name = operator.name + "_inv" + with ops.name_scope(name): + super(LinearOperatorInversion, self).__init__( + dtype=operator.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + @property + def operator(self) -> "LinearOperatorInversion": + """The operator before inversion.""" + return self._operator + + def _linop_inverse(self) -> linear_operator.LinearOperator: + return self.operator + + def _linop_solve( + self, + left_operator: "LinearOperatorInversion", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + """Solve inverse of generic `LinearOperator`s.""" + return left_operator.operator.matmul(right_operator) + + def _assert_non_singular(self): + return self.operator.assert_non_singular() + + def _assert_positive_definite(self): + return self.operator.assert_positive_definite() + + def _assert_self_adjoint(self): + return self.operator.assert_self_adjoint() + + def _shape(self): + return self.operator.shape + + def _shape_tensor(self): + return self.operator.shape_tensor() + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + return self.operator.solve(x, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _determinant(self): + return 1. / self.operator.determinant() + + def _log_abs_determinant(self): + return -1. * self.operator.log_abs_determinant() + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + return self.operator.matmul(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + + def _eigvals(self): + return 1. / self.operator.eigvals() + + def _cond(self): + return self.operator.cond() + + @property + def _composite_tensor_fields(self): + return ("operator",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"operator": 0} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_kronecker.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_kronecker.py new file mode 100644 index 0000000000000000000000000000000000000000..ccbcad12c4a4fa52ae307b327003e4850b7cc5bb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_kronecker.py @@ -0,0 +1,538 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Construct the Kronecker product of one or more `LinearOperators`.""" + +from tensorflow.python.framework import common_shapes +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorKronecker"] + + +def _prefer_static_shape(x): + if x.shape.is_fully_defined(): + return x.shape + return array_ops.shape(x) + + +def _prefer_static_concat_shape(first_shape, second_shape_int_list): + """Concatenate a shape with a list of integers as statically as possible. + + Args: + first_shape: `TensorShape` or `Tensor` instance. If a `TensorShape`, + `first_shape.is_fully_defined()` must return `True`. + second_shape_int_list: `list` of scalar integer `Tensor`s. + + Returns: + `Tensor` representing concatenating `first_shape` and + `second_shape_int_list` as statically as possible. + """ + second_shape_int_list_static = [ + tensor_util.constant_value(s) for s in second_shape_int_list] + if (isinstance(first_shape, tensor_shape.TensorShape) and + all(s is not None for s in second_shape_int_list_static)): + return first_shape.concatenate(second_shape_int_list_static) + return array_ops.concat([first_shape, second_shape_int_list], axis=0) + + +@tf_export("linalg.LinearOperatorKronecker") +@linear_operator.make_composite_tensor +class LinearOperatorKronecker(linear_operator.LinearOperator): + """Kronecker product between two `LinearOperators`. + + This operator composes one or more linear operators `[op1,...,opJ]`, + building a new `LinearOperator` representing the Kronecker product: + `op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is + associative). + + If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the composed operator + will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`, + where the product is over all operators. + + ```python + # Create a 4 x 4 linear operator composed of two 2 x 2 operators. + operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) + operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]]) + operator = LinearOperatorKronecker([operator_1, operator_2]) + + operator.to_dense() + ==> [[1., 0., 2., 0.], + [2., 1., 4., 2.], + [3., 0., 4., 0.], + [6., 3., 8., 4.]] + + operator.shape + ==> [4, 4] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [4, 2] Tensor + operator.matmul(x) + ==> Shape [4, 2] Tensor + + # Create a [2, 3] batch of 4 x 5 linear operators. + matrix_45 = tf.random.normal(shape=[2, 3, 4, 5]) + operator_45 = LinearOperatorFullMatrix(matrix) + + # Create a [2, 3] batch of 5 x 6 linear operators. + matrix_56 = tf.random.normal(shape=[2, 3, 5, 6]) + operator_56 = LinearOperatorFullMatrix(matrix_56) + + # Compose to create a [2, 3] batch of 20 x 30 operators. + operator_large = LinearOperatorKronecker([operator_45, operator_56]) + + # Create a shape [2, 3, 20, 2] vector. + x = tf.random.normal(shape=[2, 3, 6, 2]) + operator_large.matmul(x) + ==> Shape [2, 3, 30, 2] Tensor + ``` + + #### Performance + + The performance of `LinearOperatorKronecker` on any operation is equal to + the sum of the individual operators' operations. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + operators, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=None): + r"""Initialize a `LinearOperatorKronecker`. + + `LinearOperatorKronecker` is initialized with a list of operators + `[op_1,...,op_J]`. + + Args: + operators: Iterable of `LinearOperator` objects, each with + the same `dtype` and composable shape, representing the Kronecker + factors. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix\ + #Extension_for_non_symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is the individual + operators names joined with `_x_`. + + Raises: + TypeError: If all operators do not have the same `dtype`. + ValueError: If `operators` is empty. + """ + parameters = dict( + operators=operators, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + # Validate operators. + check_ops.assert_proper_iterable(operators) + operators = list(operators) + if not operators: + raise ValueError(f"Argument `operators` must be a list of >=1 operators. " + f"Received: {operators}.") + self._operators = operators + + # Validate dtype. + dtype = operators[0].dtype + for operator in operators: + if operator.dtype != dtype: + name_type = (str((o.name, o.dtype)) for o in operators) + raise TypeError( + f"Expected every operation in argument `operators` to have the " + f"same dtype. Received {list(name_type)}.") + + # Auto-set and check hints. + # A Kronecker product is invertible, if and only if all factors are + # invertible. + if all(operator.is_non_singular for operator in operators): + if is_non_singular is False: + raise ValueError( + f"The Kronecker product of non-singular operators is always " + f"non-singular. Expected argument `is_non_singular` to be True. " + f"Received: {is_non_singular}.") + is_non_singular = True + + if all(operator.is_self_adjoint for operator in operators): + if is_self_adjoint is False: + raise ValueError( + f"The Kronecker product of self-adjoint operators is always " + f"self-adjoint. Expected argument `is_self_adjoint` to be True. " + f"Received: {is_self_adjoint}.") + is_self_adjoint = True + + # The eigenvalues of a Kronecker product are equal to the products of eigen + # values of the corresponding factors. + if all(operator.is_positive_definite for operator in operators): + if is_positive_definite is False: + raise ValueError( + f"The Kronecker product of positive-definite operators is always " + f"positive-definite. Expected argument `is_positive_definite` to " + f"be True. Received: {is_positive_definite}.") + is_positive_definite = True + + if name is None: + name = operators[0].name + for operator in operators[1:]: + name += "_x_" + operator.name + with ops.name_scope(name): + super(LinearOperatorKronecker, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + @property + def operators(self): + return self._operators + + def _shape(self): + # Get final matrix shape. + domain_dimension = self.operators[0].domain_dimension + for operator in self.operators[1:]: + domain_dimension = domain_dimension * operator.domain_dimension + + range_dimension = self.operators[0].range_dimension + for operator in self.operators[1:]: + range_dimension = range_dimension * operator.range_dimension + + matrix_shape = tensor_shape.TensorShape([ + range_dimension, domain_dimension]) + + # Get broadcast batch shape. + # broadcast_shape checks for compatibility. + batch_shape = self.operators[0].batch_shape + for operator in self.operators[1:]: + batch_shape = common_shapes.broadcast_shape( + batch_shape, operator.batch_shape) + + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + domain_dimension = self.operators[0].domain_dimension_tensor() + for operator in self.operators[1:]: + domain_dimension = domain_dimension * operator.domain_dimension_tensor() + + range_dimension = self.operators[0].range_dimension_tensor() + for operator in self.operators[1:]: + range_dimension = range_dimension * operator.range_dimension_tensor() + + matrix_shape = [range_dimension, domain_dimension] + + # Get broadcast batch shape. + # broadcast_shape checks for compatibility. + batch_shape = self.operators[0].batch_shape_tensor() + for operator in self.operators[1:]: + batch_shape = array_ops.broadcast_dynamic_shape( + batch_shape, operator.batch_shape_tensor()) + + return array_ops.concat((batch_shape, matrix_shape), 0) + + def _linop_adjoint(self) -> "LinearOperatorKronecker": + return LinearOperatorKronecker( + operators=[operator.adjoint() for operator in self.operators], + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _linop_cholesky(self) -> "LinearOperatorKronecker": + # Cholesky decomposition of a Kronecker product is the Kronecker product + # of cholesky decompositions. + return LinearOperatorKronecker( + operators=[operator.cholesky() for operator in self.operators], + is_non_singular=True, + is_self_adjoint=None, # Let the operators passed in decide. + is_square=True) + + def _linop_inverse(self) -> "LinearOperatorKronecker": + # Inverse decomposition of a Kronecker product is the Kronecker product + # of inverse decompositions. + return LinearOperatorKronecker( + operators=[ + operator.inverse() for operator in self.operators], + is_non_singular=self.is_non_singular, + is_self_adjoint=self.is_self_adjoint, + is_positive_definite=self.is_positive_definite, + is_square=True) + + def _solve_matmul_internal( + self, + x, + solve_matmul_fn, + adjoint=False, + adjoint_arg=False): + # We heavily rely on Roth's column Lemma [1]: + # (A x B) * vec X = vec BXA^T + # where vec stacks all the columns of the matrix under each other. + # In our case, we use a variant of the lemma that is row-major + # friendly: (A x B) * vec' X = vec' AXB^T + # Where vec' reshapes a matrix into a vector. We can repeatedly apply this + # for a collection of kronecker products. + # Given that (A x B)^-1 = A^-1 x B^-1 and (A x B)^T = A^T x B^T, we can + # use the above to compute multiplications, solves with any composition of + # transposes. + output = x + + if adjoint_arg: + if self.dtype.is_complex: + output = math_ops.conj(output) + else: + output = linalg.transpose(output) + + for o in reversed(self.operators): + # Statically compute the reshape. + if adjoint: + operator_dimension = o.range_dimension_tensor() + else: + operator_dimension = o.domain_dimension_tensor() + output_shape = _prefer_static_shape(output) + + if tensor_util.constant_value(operator_dimension) is not None: + operator_dimension = tensor_util.constant_value(operator_dimension) + if output.shape[-2] is not None and output.shape[-1] is not None: + dim = int(output.shape[-2] * output_shape[-1] // operator_dimension) + else: + dim = math_ops.cast( + output_shape[-2] * output_shape[-1] // operator_dimension, + dtype=dtypes.int32) + + output_shape = _prefer_static_concat_shape( + output_shape[:-2], [dim, operator_dimension]) + output = array_ops.reshape(output, shape=output_shape) + + # Conjugate because we are trying to compute A @ B^T, but + # `LinearOperator` only supports `adjoint_arg`. + if self.dtype.is_complex: + output = math_ops.conj(output) + + output = solve_matmul_fn( + o, output, adjoint=adjoint, adjoint_arg=True) + + if adjoint_arg: + col_dim = _prefer_static_shape(x)[-2] + else: + col_dim = _prefer_static_shape(x)[-1] + + if adjoint: + row_dim = self.domain_dimension_tensor() + else: + row_dim = self.range_dimension_tensor() + + matrix_shape = [row_dim, col_dim] + + output = array_ops.reshape( + output, + _prefer_static_concat_shape( + _prefer_static_shape(output)[:-2], matrix_shape)) + + if x.shape.is_fully_defined(): + if adjoint_arg: + column_dim = x.shape[-2] + else: + column_dim = x.shape[-1] + broadcast_batch_shape = common_shapes.broadcast_shape( + x.shape[:-2], self.batch_shape) + if adjoint: + matrix_dimensions = [self.domain_dimension, column_dim] + else: + matrix_dimensions = [self.range_dimension, column_dim] + + output.set_shape(broadcast_batch_shape.concatenate( + matrix_dimensions)) + + return output + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + def matmul_fn(o, x, adjoint, adjoint_arg): + return o.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) + return self._solve_matmul_internal( + x=x, + solve_matmul_fn=matmul_fn, + adjoint=adjoint, + adjoint_arg=adjoint_arg) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + def solve_fn(o, rhs, adjoint, adjoint_arg): + return o.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + return self._solve_matmul_internal( + x=rhs, + solve_matmul_fn=solve_fn, + adjoint=adjoint, + adjoint_arg=adjoint_arg) + + def _determinant(self): + # Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m + # matrix, and X2 is an n x n matrix. We can iteratively apply this property + # to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the + # domain dimension of all operators, then we have: + # |X1 x X2 x X3 ...| = + # |X1| ** (T / m) * |X2 x X3 ... | ** m = + # |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... = + # |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n) + # And by doing induction we have product(|X_i| ** (T / dim(X_i))). + total = self.domain_dimension_tensor() + determinant = 1. + for operator in self.operators: + determinant = determinant * operator.determinant() ** math_ops.cast( + total / operator.domain_dimension_tensor(), + dtype=operator.dtype) + return determinant + + def _log_abs_determinant(self): + # This will be sum((total / dim(x_i)) * log |X_i|) + total = self.domain_dimension_tensor() + log_abs_det = 0. + for operator in self.operators: + log_abs_det += operator.log_abs_determinant() * math_ops.cast( + total / operator.domain_dimension_tensor(), + dtype=operator.dtype) + return log_abs_det + + def _trace(self): + # tr(A x B) = tr(A) * tr(B) + trace = 1. + for operator in self.operators: + trace = trace * operator.trace() + return trace + + def _diag_part(self): + diag_part = self.operators[0].diag_part() + for operator in self.operators[1:]: + diag_part = diag_part[..., :, array_ops.newaxis] + op_diag_part = operator.diag_part()[..., array_ops.newaxis, :] + diag_part = diag_part * op_diag_part + diag_part = array_ops.reshape( + diag_part, + shape=array_ops.concat( + [array_ops.shape(diag_part)[:-2], [-1]], axis=0)) + if self.range_dimension > self.domain_dimension: + diag_dimension = self.domain_dimension + else: + diag_dimension = self.range_dimension + diag_part.set_shape( + self.batch_shape.concatenate(diag_dimension)) + return diag_part + + def _to_dense(self): + product = self.operators[0].to_dense() + for operator in self.operators[1:]: + # Product has shape [B, R1, 1, C1, 1]. + product = product[ + ..., :, array_ops.newaxis, :, array_ops.newaxis] + # Operator has shape [B, 1, R2, 1, C2]. + op_to_mul = operator.to_dense()[ + ..., array_ops.newaxis, :, array_ops.newaxis, :] + # This is now [B, R1, R2, C1, C2]. + product = product * op_to_mul + # Now merge together dimensions to get [B, R1 * R2, C1 * C2]. + product_shape = _prefer_static_shape(product) + shape = _prefer_static_concat_shape( + product_shape[:-4], + [product_shape[-4] * product_shape[-3], + product_shape[-2] * product_shape[-1]]) + + product = array_ops.reshape(product, shape=shape) + product.set_shape(self.shape) + return product + + def _eigvals(self): + # This will be the kronecker product of all the eigenvalues. + # Note: It doesn't matter which kronecker product it is, since every + # kronecker product of the same matrices are similar. + eigvals = [operator.eigvals() for operator in self.operators] + # Now compute the kronecker product + product = eigvals[0] + for eigval in eigvals[1:]: + # Product has shape [B, R1, 1]. + product = product[..., array_ops.newaxis] + # Eigval has shape [B, 1, R2]. Produces shape [B, R1, R2]. + product = product * eigval[..., array_ops.newaxis, :] + # Reshape to [B, R1 * R2] + product = array_ops.reshape( + product, + shape=array_ops.concat([array_ops.shape(product)[:-2], [-1]], axis=0)) + product.set_shape(self.shape[:-1]) + return product + + def _assert_non_singular(self): + if all(operator.is_square for operator in self.operators): + asserts = [operator.assert_non_singular() for operator in self.operators] + return control_flow_ops.group(asserts) + else: + raise errors.InvalidArgumentError( + node_def=None, + op=None, + message="All Kronecker factors must be square for the product to be " + "invertible. Expected hint `is_square` to be True for every operator " + "in argument `operators`.") + + def _assert_self_adjoint(self): + if all(operator.is_square for operator in self.operators): + asserts = [operator.assert_self_adjoint() for operator in self.operators] + return control_flow_ops.group(asserts) + else: + raise errors.InvalidArgumentError( + node_def=None, + op=None, + message="All Kronecker factors must be square for the product to be " + "invertible. Expected hint `is_square` to be True for every operator " + "in argument `operators`.") + + @property + def _composite_tensor_fields(self): + return ("operators",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"operators": [0] * len(self.operators)} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py new file mode 100644 index 0000000000000000000000000000000000000000..a326b9f99a62fd68e2dae34058f001875426ad0f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py @@ -0,0 +1,511 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Perturb a `LinearOperator` with a rank `K` update.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_diag +from tensorflow.python.ops.linalg import linear_operator_identity +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util.tf_export import tf_export + +__all__ = [ + "LinearOperatorLowRankUpdate", +] + + +@tf_export("linalg.LinearOperatorLowRankUpdate") +@linear_operator.make_composite_tensor +class LinearOperatorLowRankUpdate(linear_operator.LinearOperator): + """Perturb a `LinearOperator` with a rank `K` update. + + This operator acts like a [batch] matrix `A` with shape + `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `M x N` matrix. + + `LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where + + ``` + L, is a LinearOperator representing [batch] M x N matrices + U, is a [batch] M x K matrix. Typically K << M. + D, is a [batch] K x K matrix. + V, is a [batch] N x K matrix. Typically K << N. + V^H is the Hermitian transpose (adjoint) of V. + ``` + + If `M = N`, determinants and solves are done using the matrix determinant + lemma and Woodbury identities, and thus require L and D to be non-singular. + + Solves and determinants will be attempted unless the "is_non_singular" + property of L and D is False. + + In the event that L and D are positive-definite, and U = V, solves and + determinants can be done using a Cholesky factorization. + + ```python + # Create a 3 x 3 diagonal linear operator. + diag_operator = LinearOperatorDiag( + diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True, + is_positive_definite=True) + + # Perturb with a rank 2 perturbation + operator = LinearOperatorLowRankUpdate( + operator=diag_operator, + u=[[1., 2.], [-1., 3.], [0., 0.]], + diag_update=[11., 12.], + v=[[1., 2.], [-1., 3.], [10., 10.]]) + + operator.shape + ==> [3, 3] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [3, 4] Tensor + operator.matmul(x) + ==> Shape [3, 4] Tensor + ``` + + ### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [M, N], with b >= 0 + x.shape = [B1,...,Bb] + [N, R], with R >= 0. + ``` + + ### Performance + + Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`, + made from a rank `K` update of `base_operator` which performs `.matmul(x)` on + `x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly + for `solve`, `determinant`. Then, if `x.shape = [N, R]`, + + * `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)` + + and if `M = N`, + + * `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)` + * `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)` + + If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular`, `self_adjoint`, `positive_definite`, + `diag_update_positive` and `square`. These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + base_operator, + u, + diag_update=None, + v=None, + is_diag_update_positive=None, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorLowRankUpdate"): + """Initialize a `LinearOperatorLowRankUpdate`. + + This creates a `LinearOperator` of the form `A = L + U D V^H`, with + `L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch] + diagonal matrix. + + If `L` is non-singular, solves and determinants are available. + Solves/determinants both involve a solve/determinant of a `K x K` system. + In the event that L and D are self-adjoint positive-definite, and U = V, + this can be done using a Cholesky factorization. The user should set the + `is_X` matrix property hints, which will trigger the appropriate code path. + + Args: + base_operator: Shape `[B1,...,Bb, M, N]`. + u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`. + This is `U` above. + diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype` + as `base_operator`. This is the diagonal of `D` above. + Defaults to `D` being the identity operator. + v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]` + Defaults to `v = u`, in which case the perturbation is symmetric. + If `M != N`, then `v` must be set since the perturbation is not square. + is_diag_update_positive: Python `bool`. + If `True`, expect `diag_update > 0`. + is_non_singular: Expect that this operator is non-singular. + Default is `None`, unless `is_positive_definite` is auto-set to be + `True` (see below). + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. Default is `None`, unless `base_operator` is self-adjoint + and `v = None` (meaning `u=v`), in which case this defaults to `True`. + is_positive_definite: Expect that this operator is positive definite. + Default is `None`, unless `base_operator` is positive-definite + `v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case + this defaults to `True`. + Note that we say an operator is positive definite when the quadratic + form `x^H A x` has positive real part for all nonzero `x`. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + Raises: + ValueError: If `is_X` flags are set in an inconsistent way. + """ + parameters = dict( + base_operator=base_operator, + u=u, + diag_update=diag_update, + v=v, + is_diag_update_positive=is_diag_update_positive, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + dtype = base_operator.dtype + + if diag_update is not None: + if is_diag_update_positive and dtype.is_complex: + logging.warn("Note: setting is_diag_update_positive with a complex " + "dtype means that diagonal is real and positive.") + + if diag_update is None: + if is_diag_update_positive is False: + raise ValueError( + "Default diagonal is the identity, which is positive. However, " + "user set 'is_diag_update_positive' to False.") + is_diag_update_positive = True + + # In this case, we can use a Cholesky decomposition to help us solve/det. + self._use_cholesky = ( + base_operator.is_positive_definite and base_operator.is_self_adjoint + and is_diag_update_positive + and v is None) + + # Possibly auto-set some characteristic flags from None to True. + # If the Flags were set (by the user) incorrectly to False, then raise. + if base_operator.is_self_adjoint and v is None and not dtype.is_complex: + if is_self_adjoint is False: + raise ValueError( + "A = L + UDU^H, with L self-adjoint and D real diagonal. Since" + " UDU^H is self-adjoint, this must be a self-adjoint operator.") + is_self_adjoint = True + + # The condition for using a cholesky is sufficient for SPD, and + # we no weaker choice of these hints leads to SPD. Therefore, + # the following line reads "if hints indicate SPD..." + if self._use_cholesky: + if ( + is_positive_definite is False + or is_self_adjoint is False + or is_non_singular is False): + raise ValueError( + "Arguments imply this is self-adjoint positive-definite operator.") + is_positive_definite = True + is_self_adjoint = True + + with ops.name_scope(name): + + # Create U and V. + self._u = linear_operator_util.convert_nonref_to_tensor(u, name="u") + if v is None: + self._v = self._u + else: + self._v = linear_operator_util.convert_nonref_to_tensor(v, name="v") + + if diag_update is None: + self._diag_update = None + else: + self._diag_update = linear_operator_util.convert_nonref_to_tensor( + diag_update, name="diag_update") + + # Create base_operator L. + self._base_operator = base_operator + + super(LinearOperatorLowRankUpdate, self).__init__( + dtype=self._base_operator.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + # Create the diagonal operator D. + self._set_diag_operators(diag_update, is_diag_update_positive) + self._is_diag_update_positive = is_diag_update_positive + + self._check_shapes() + + def _check_shapes(self): + """Static check that shapes are compatible.""" + # Broadcast shape also checks that u and v are compatible. + uv_shape = array_ops.broadcast_static_shape( + self.u.shape, self.v.shape) + + batch_shape = array_ops.broadcast_static_shape( + self.base_operator.batch_shape, uv_shape[:-2]) + + tensor_shape.Dimension( + self.base_operator.domain_dimension).assert_is_compatible_with( + uv_shape[-2]) + + if self._diag_update is not None: + tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with( + self._diag_update.shape[-1]) + array_ops.broadcast_static_shape( + batch_shape, self._diag_update.shape[:-1]) + + def _set_diag_operators(self, diag_update, is_diag_update_positive): + """Set attributes self._diag_update and self._diag_operator.""" + if diag_update is not None: + self._diag_operator = linear_operator_diag.LinearOperatorDiag( + self._diag_update, is_positive_definite=is_diag_update_positive) + else: + if tensor_shape.dimension_value(self.u.shape[-1]) is not None: + r = tensor_shape.dimension_value(self.u.shape[-1]) + else: + r = array_ops.shape(self.u)[-1] + self._diag_operator = linear_operator_identity.LinearOperatorIdentity( + num_rows=r, dtype=self.dtype) + + @property + def u(self): + """If this operator is `A = L + U D V^H`, this is the `U`.""" + return self._u + + @property + def v(self): + """If this operator is `A = L + U D V^H`, this is the `V`.""" + return self._v + + @property + def is_diag_update_positive(self): + """If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise.""" + return self._is_diag_update_positive + + @property + def diag_update(self): + """If this operator is `A = L + U D V^H`, this is the diagonal of `D`.""" + return self._diag_update + + @property + def diag_operator(self): + """If this operator is `A = L + U D V^H`, this is `D`.""" + return self._diag_operator + + @property + def base_operator(self): + """If this operator is `A = L + U D V^H`, this is the `L`.""" + return self._base_operator + + def _assert_self_adjoint(self): + # Recall this operator is: + # A = L + UDV^H. + # So in one case self-adjoint depends only on L + if self.u is self.v and self.diag_update is None: + return self.base_operator.assert_self_adjoint() + # In all other cases, sufficient conditions for self-adjoint can be found + # efficiently. However, those conditions are not necessary conditions. + return super(LinearOperatorLowRankUpdate, self).assert_self_adjoint() + + def _shape(self): + batch_shape = array_ops.broadcast_static_shape( + self.base_operator.batch_shape, + self.diag_operator.batch_shape) + batch_shape = array_ops.broadcast_static_shape( + batch_shape, + self.u.shape[:-2]) + batch_shape = array_ops.broadcast_static_shape( + batch_shape, + self.v.shape[:-2]) + return batch_shape.concatenate(self.base_operator.shape[-2:]) + + def _shape_tensor(self): + batch_shape = array_ops.broadcast_dynamic_shape( + self.base_operator.batch_shape_tensor(), + self.diag_operator.batch_shape_tensor()) + batch_shape = array_ops.broadcast_dynamic_shape( + batch_shape, + array_ops.shape(self.u)[:-2]) + batch_shape = array_ops.broadcast_dynamic_shape( + batch_shape, + array_ops.shape(self.v)[:-2]) + return array_ops.concat( + [batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0) + + def _get_uv_as_tensors(self): + """Get (self.u, self.v) as tensors (in case they were refs).""" + u = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.u) + if self.v is self.u: + v = u + else: + v = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.v) + return u, v + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + u, v = self._get_uv_as_tensors() + l = self.base_operator + d = self.diag_operator + + leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) + + if adjoint: + uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg) + d_uh_x = d.matmul(uh_x, adjoint=adjoint) + v_d_uh_x = math_ops.matmul(v, d_uh_x) + return leading_term + v_d_uh_x + else: + vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg) + d_vh_x = d.matmul(vh_x, adjoint=adjoint) + u_d_vh_x = math_ops.matmul(u, d_vh_x) + return leading_term + u_d_vh_x + + def _determinant(self): + if self.is_positive_definite: + return math_ops.exp(self.log_abs_determinant()) + # The matrix determinant lemma gives + # https://en.wikipedia.org/wiki/Matrix_determinant_lemma + # det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L) + # = det(C) det(D) det(L) + # where C is sometimes known as the capacitance matrix, + # C := D^{-1} + V^H L^{-1} U + u, v = self._get_uv_as_tensors() + det_c = linalg_ops.matrix_determinant(self._make_capacitance(u=u, v=v)) + det_d = self.diag_operator.determinant() + det_l = self.base_operator.determinant() + return det_c * det_d * det_l + + def _diag_part(self): + # [U D V^T]_{ii} = sum_{jk} U_{ij} D_{jk} V_{ik} + # = sum_{j} U_{ij} D_{jj} V_{ij} + u, v = self._get_uv_as_tensors() + product = u * math_ops.conj(v) + if self.diag_update is not None: + product *= array_ops.expand_dims(self.diag_update, axis=-2) + return ( + math_ops.reduce_sum(product, axis=-1) + self.base_operator.diag_part()) + + def _log_abs_determinant(self): + u, v = self._get_uv_as_tensors() + # Recall + # det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L) + # = det(C) det(D) det(L) + log_abs_det_d = self.diag_operator.log_abs_determinant() + log_abs_det_l = self.base_operator.log_abs_determinant() + + if self._use_cholesky: + chol_cap_diag = array_ops.matrix_diag_part( + linalg_ops.cholesky(self._make_capacitance(u=u, v=v))) + log_abs_det_c = 2 * math_ops.reduce_sum( + math_ops.log(chol_cap_diag), axis=[-1]) + else: + det_c = linalg_ops.matrix_determinant(self._make_capacitance(u=u, v=v)) + log_abs_det_c = math_ops.log(math_ops.abs(det_c)) + if self.dtype.is_complex: + log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype) + + return log_abs_det_c + log_abs_det_d + log_abs_det_l + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + if self.base_operator.is_non_singular is False: + raise ValueError( + "Solve not implemented unless this is a perturbation of a " + "non-singular LinearOperator.") + # The Woodbury formula gives: + # https://en.wikipedia.org/wiki/Woodbury_matrix_identity + # (L + UDV^H)^{-1} + # = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1} + # = L^{-1} - L^{-1} U C^{-1} V^H L^{-1} + # where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U + # Note also that, with ^{-H} being the inverse of the adjoint, + # (L + UDV^H)^{-H} + # = L^{-H} - L^{-H} V C^{-H} U^H L^{-H} + l = self.base_operator + if adjoint: + # If adjoint, U and V have flipped roles in the operator. + v, u = self._get_uv_as_tensors() + # Capacitance should still be computed with u=self.u and v=self.v, which + # after the "flip" on the line above means u=v, v=u. I.e. no need to + # "flip" in the capacitance call, since the call to + # matrix_solve_with_broadcast below is done with the `adjoint` argument, + # and this takes care of things. + capacitance = self._make_capacitance(u=v, v=u) + else: + u, v = self._get_uv_as_tensors() + capacitance = self._make_capacitance(u=u, v=v) + + # L^{-1} rhs + linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + # V^H L^{-1} rhs + vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True) + # C^{-1} V^H L^{-1} rhs + if self._use_cholesky: + capinv_vh_linv_rhs = linalg_ops.cholesky_solve( + linalg_ops.cholesky(capacitance), vh_linv_rhs) + else: + capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast( + capacitance, vh_linv_rhs, adjoint=adjoint) + # U C^{-1} V^H M^{-1} rhs + u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs) + # L^{-1} U C^{-1} V^H L^{-1} rhs + linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint) + + # L^{-1} - L^{-1} U C^{-1} V^H L^{-1} + return linv_rhs - linv_u_capinv_vh_linv_rhs + + def _make_capacitance(self, u, v): + # C := D^{-1} + V^H L^{-1} U + # which is sometimes known as the "capacitance" matrix. + + # L^{-1} U + linv_u = self.base_operator.solve(u) + # V^H L^{-1} U + vh_linv_u = math_ops.matmul(v, linv_u, adjoint_a=True) + + # D^{-1} + V^H L^{-1} V + capacitance = self._diag_operator.inverse().add_to_tensor(vh_linv_u) + return capacitance + + @property + def _composite_tensor_fields(self): + return ("base_operator", "u", "diag_update", "v", "is_diag_update_positive") + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return { + "base_operator": 0, + "u": 2, + "diag_update": 1, + "v": 2 + } diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f54a622240f61656044927df5f434a120ce755 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py @@ -0,0 +1,245 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a lower triangular matrix.""" + +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.linalg import property_hint_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = [ + "LinearOperatorLowerTriangular", +] + + +@tf_export("linalg.LinearOperatorLowerTriangular") +@linear_operator.make_composite_tensor +class LinearOperatorLowerTriangular(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] square lower triangular matrix. + + This operator acts like a [batch] lower triangular matrix `A` with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. + + `LinearOperatorLowerTriangular` is initialized with a `Tensor` having + dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two + dimensions is ignored. + + ```python + # Create a 2 x 2 lower-triangular linear operator. + tril = [[1., 2.], [3., 4.]] + operator = LinearOperatorLowerTriangular(tril) + + # The upper triangle is ignored. + operator.to_dense() + ==> [[1., 0.] + [3., 4.]] + + operator.shape + ==> [2, 2] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor + + # Create a [2, 3] batch of 4 x 4 linear operators. + tril = tf.random.normal(shape=[2, 3, 4, 4]) + operator = LinearOperatorLowerTriangular(tril) + ``` + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [B1,...,Bb] + [N, R], with R >= 0. + ``` + + #### Performance + + Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`, + and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` involves `N^2 * R` multiplications. + * `operator.solve(x)` involves `N * R` size `N` back-substitutions. + * `operator.determinant()` involves a size `N` `reduce_prod`. + + If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + tril, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorLowerTriangular"): + r"""Initialize a `LinearOperatorLowerTriangular`. + + Args: + tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`. + The lower triangular part of `tril` defines this operator. The strictly + upper triangle is ignored. + is_non_singular: Expect that this operator is non-singular. + This operator is non-singular if and only if its diagonal elements are + all non-zero. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. This operator is self-adjoint only if it is diagonal with + real-valued diagonal entries. In this case it is advised to use + `LinearOperatorDiag`. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + Raises: + ValueError: If `is_square` is `False`. + """ + parameters = dict( + tril=tril, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + if is_square is False: + raise ValueError( + "Only square lower triangular operators supported at this time.") + is_square = True + + with ops.name_scope(name, values=[tril]): + self._tril = linear_operator_util.convert_nonref_to_tensor(tril, + name="tril") + self._check_tril(self._tril) + + super(LinearOperatorLowerTriangular, self).__init__( + dtype=self._tril.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + @property + def tril(self): + """The lower triangular matrix defining this operator.""" + return self._tril + + def _check_tril(self, tril): + """Static check of the `tril` argument.""" + + if tril.shape.ndims is not None and tril.shape.ndims < 2: + raise ValueError( + "Argument tril must have at least 2 dimensions. Found: %s" + % tril) + + def _get_tril(self): + """Gets the `tril` kwarg, with upper part zero-d out.""" + return array_ops.matrix_band_part(self._tril, -1, 0) + + def _get_diag(self): + """Gets the diagonal part of `tril` kwarg.""" + return array_ops.matrix_diag_part(self._tril) + + def _shape(self): + return self._tril.shape + + def _shape_tensor(self): + return array_ops.shape(self._tril) + + def _assert_non_singular(self): + return linear_operator_util.assert_no_entries_with_modulus_zero( + self._get_diag(), + message="Singular operator: Diagonal contained zero values.") + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + return math_ops.matmul( + self._get_tril(), x, adjoint_a=adjoint, adjoint_b=adjoint_arg) + + def _linop_matmul( + self, + left_operator: "LinearOperatorLowerTriangular", + right_operator: linear_operator.LinearOperator, + ) -> linear_operator.LinearOperator: + # instance check of linear_operator_diag.LinearOperatorDiag + if hasattr(right_operator, "_check_diag"): + return LinearOperatorLowerTriangular( + tril=left_operator.to_dense() * right_operator.diag, + is_non_singular=property_hint_util.combined_non_singular_hint( + right_operator, left_operator), + # This is safe to do since the Triangular matrix is only self-adjoint + # when it is a diagonal matrix, and hence commutes. + is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint( + right_operator, left_operator), + is_positive_definite=None, + is_square=True) + return super()._linop_matmul(left_operator, right_operator) + + def _determinant(self): + return math_ops.reduce_prod(self._get_diag(), axis=[-1]) + + def _log_abs_determinant(self): + return math_ops.reduce_sum( + math_ops.log(math_ops.abs(self._get_diag())), axis=[-1]) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + rhs = linalg.adjoint(rhs) if adjoint_arg else rhs + return linalg.triangular_solve( + self._get_tril(), rhs, lower=True, adjoint=adjoint) + + def _to_dense(self): + return self._get_tril() + + def _eigvals(self): + return self._get_diag() + + @property + def _composite_tensor_fields(self): + return ("tril",) + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"tril": 2} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_permutation.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_permutation.py new file mode 100644 index 0000000000000000000000000000000000000000..31f07732ef151ada0d5f1bbd31d30c424fef5135 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_permutation.py @@ -0,0 +1,271 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a permutation matrix.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sort_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorPermutation",] + + +@tf_export("linalg.LinearOperatorPermutation") +@linear_operator.make_composite_tensor +class LinearOperatorPermutation(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] of permutation matrices. + + This operator acts like a [batch] of permutations with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + `LinearOperatorPermutation` is initialized with a (batch) vector. + + A permutation, is defined by an integer vector `v` whose values are unique + and are in the range `[0, ... n]`. Applying the permutation on an input + matrix has the folllowing meaning: the value of `v` at index `i` + says to move the `v[i]`-th row of the input matrix to the `i`-th row. + Because all values are unique, this will result in a permutation of the + rows the input matrix. Note, that the permutation vector `v` has the same + semantics as `tf.transpose`. + + ```python + # Create a 3 x 3 permutation matrix that swaps the last two columns. + vec = [0, 2, 1] + operator = LinearOperatorPermutation(vec) + + operator.to_dense() + ==> [[1., 0., 0.] + [0., 0., 1.] + [0., 1., 0.]] + + operator.shape + ==> [3, 3] + + # This will be zero. + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [3, 4] Tensor + operator.matmul(x) + ==> Shape [3, 4] Tensor + ``` + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + perm, + dtype=dtypes.float32, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorPermutation"): + r"""Initialize a `LinearOperatorPermutation`. + + Args: + perm: Shape `[B1,...,Bb, N]` Integer `Tensor` with `b >= 0` + `N >= 0`. An integer vector that represents the permutation to apply. + Note that this argument is same as `tf.transpose`. However, this + permutation is applied on the rows, while the permutation in + `tf.transpose` is applied on the dimensions of the `Tensor`. `perm` + is required to have unique entries from `{0, 1, ... N-1}`. + dtype: The `dtype` of arguments to this operator. Default: `float32`. + Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, + `complex128`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. This is autoset to true + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + This is autoset to false. + is_square: Expect that this operator acts like square [batch] matrices. + This is autoset to true. + name: A name for this `LinearOperator`. + + Raises: + ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is + not `False` or `is_square` is not `True`. + """ + parameters = dict( + perm=perm, + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + with ops.name_scope(name, values=[perm]): + self._perm = linear_operator_util.convert_nonref_to_tensor( + perm, name="perm") + self._check_perm(self._perm) + + # Check and auto-set hints. + if is_non_singular is False: # pylint:disable=g-bool-id-comparison + raise ValueError(f"A Permutation operator is always non-singular. " + f"Expected argument `is_non_singular` to be True. " + f"Received: {is_non_singular}.") + + if is_square is False: # pylint:disable=g-bool-id-comparison + raise ValueError(f"A Permutation operator is always square. " + f"Expected argument `is_square` to be True. " + f"Received: {is_square}.") + is_square = True + + super(LinearOperatorPermutation, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _check_perm(self, perm): + """Static check of perm.""" + if (perm.shape.ndims is not None and perm.shape.ndims < 1): + raise ValueError(f"Argument `perm` must have at least 1 dimension. " + f"Received: {perm}.") + if not perm.dtype.is_integer: + raise TypeError(f"Argument `perm` must be integer dtype. " + f"Received: {perm}.") + # Check that the permutation satisfies the uniqueness constraint. + static_perm = tensor_util.constant_value(perm) + if static_perm is not None: + sorted_perm = np.sort(static_perm, axis=-1) + if np.any(sorted_perm != np.arange(0, static_perm.shape[-1])): + raise ValueError( + f"Argument `perm` must be a vector of unique integers from " + f"0 to {static_perm.shape[-1] - 1}.") + + def _shape(self): + perm_shape = self._perm.shape + return perm_shape.concatenate(perm_shape[-1:]) + + def _shape_tensor(self): + perm_shape = array_ops.shape(self._perm) + k = perm_shape[-1] + return array_ops.concat((perm_shape, [k]), 0) + + def _assert_non_singular(self): + return control_flow_ops.no_op("assert_non_singular") + + def _domain_dimension_tensor(self, perm=None): + perm = perm if perm is not None else self.perm + return array_ops.shape(perm)[-1] + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + perm = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.perm) + if adjoint and not self.is_self_adjoint: + # TODO(srvasude): invert_permutation doesn't work on batches so we use + # argsort. + perm = sort_ops.argsort(perm, axis=-1) + x = linalg.adjoint(x) if adjoint_arg else x + + # We need to broadcast x and the permutation since tf.gather doesn't + # broadcast. + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(x)[:-1], array_ops.shape(perm)) + k = array_ops.shape(x)[-1] + broadcast_x_shape = array_ops.concat([broadcast_shape, [k]], axis=-1) + x = array_ops.broadcast_to(x, broadcast_x_shape) + perm = array_ops.broadcast_to(perm, broadcast_shape) + + m = array_ops.shape(x)[-2] + x = array_ops.reshape(x, [-1, m, k]) + perm = array_ops.reshape(perm, [-1, m]) + + y = array_ops.gather(x, perm, axis=-2, batch_dims=1) + return array_ops.reshape(y, broadcast_x_shape) + + # TODO(srvasude): Permutation parity is equivalent to the determinant. + + def _log_abs_determinant(self): + # Permutation matrices have determinant +/- 1. + return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + # The inverse of a permutation matrix is the transpose matrix. + # Apply a matmul and flip the adjoint bit. + return self._matmul(rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg) + + def _to_dense(self): + perm = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.perm) + return math_ops.cast(math_ops.equal( + math_ops.range(0, self._domain_dimension_tensor(perm)), + perm[..., array_ops.newaxis]), self.dtype) + + def _diag_part(self): + perm = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.perm) + return math_ops.cast(math_ops.equal( + math_ops.range(0, self._domain_dimension_tensor(perm)), + perm), self.dtype) + + def _cond(self): + # Permutation matrices are rotations which have condition number 1. + return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) + + @property + def perm(self): + return self._perm + + @property + def _composite_tensor_fields(self): + return ("perm", "dtype") + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"perm": 1} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_test_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e96b2e2a17325cf1a87a22c456cecbefd07fe27e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_test_util.py @@ -0,0 +1,1436 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for testing `LinearOperator` and sub-classes.""" + +import abc +import itertools + +import numpy as np + +from tensorflow.python.eager import backprop +from tensorflow.python.eager import context +from tensorflow.python.eager import def_function +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import random_seed +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import test_util +from tensorflow.python.module import module +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gradients_impl +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import sort_ops +from tensorflow.python.ops import variables +from tensorflow.python.ops import while_v2 +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.platform import test +from tensorflow.python.saved_model import load as load_model +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.saved_model import save as save_model +from tensorflow.python.util import nest + + +class OperatorShapesInfo: + """Object encoding expected shape for a test. + + Encodes the expected shape of a matrix for a test. Also + allows additional metadata for the test harness. + """ + + def __init__(self, shape, **kwargs): + self.shape = shape + self.__dict__.update(kwargs) + + +class CheckTapeSafeSkipOptions: + + # Skip checking this particular method. + DETERMINANT = "determinant" + DIAG_PART = "diag_part" + LOG_ABS_DETERMINANT = "log_abs_determinant" + TRACE = "trace" + + +class LinearOperatorDerivedClassTest(test.TestCase, metaclass=abc.ABCMeta): + """Tests for derived classes. + + Subclasses should implement every abstractmethod, and this will enable all + test methods to work. + """ + + # Absolute/relative tolerance for tests. + _atol = { + dtypes.float16: 1e-3, + dtypes.float32: 1e-6, + dtypes.float64: 1e-12, + dtypes.complex64: 1e-6, + dtypes.complex128: 1e-12 + } + + _rtol = { + dtypes.float16: 1e-3, + dtypes.float32: 1e-6, + dtypes.float64: 1e-12, + dtypes.complex64: 1e-6, + dtypes.complex128: 1e-12 + } + + def assertAC(self, x, y, check_dtype=False): + """Derived classes can set _atol, _rtol to get different tolerance.""" + dtype = dtypes.as_dtype(x.dtype) + atol = self._atol[dtype] + rtol = self._rtol[dtype] + self.assertAllClose(x, y, atol=atol, rtol=rtol) + if check_dtype: + self.assertDTypeEqual(x, y.dtype) + + @staticmethod + def adjoint_options(): + return [False, True] + + @staticmethod + def adjoint_arg_options(): + return [False, True] + + @staticmethod + def dtypes_to_test(): + # TODO(langmore) Test tf.float16 once tf.linalg.solve works in 16bit. + return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] + + @staticmethod + def use_placeholder_options(): + return [False, True] + + @staticmethod + def use_blockwise_arg(): + return False + + @staticmethod + def operator_shapes_infos(): + """Returns list of OperatorShapesInfo, encapsulating the shape to test.""" + raise NotImplementedError("operator_shapes_infos has not been implemented.") + + @abc.abstractmethod + def operator_and_matrix( + self, shapes_info, dtype, use_placeholder, + ensure_self_adjoint_and_pd=False): + """Build a batch matrix and an Operator that should have similar behavior. + + Every operator acts like a (batch) matrix. This method returns both + together, and is used by tests. + + Args: + shapes_info: `OperatorShapesInfo`, encoding shape information about the + operator. + dtype: Numpy dtype. Data type of returned array/operator. + use_placeholder: Python bool. If True, initialize the operator with a + placeholder of undefined shape and correct dtype. + ensure_self_adjoint_and_pd: If `True`, + construct this operator to be Hermitian Positive Definite, as well + as ensuring the hints `is_positive_definite` and `is_self_adjoint` + are set. + This is useful for testing methods such as `cholesky`. + + Returns: + operator: `LinearOperator` subclass instance. + mat: `Tensor` representing operator. + """ + # Create a matrix as a numpy array with desired shape/dtype. + # Create a LinearOperator that should have the same behavior as the matrix. + raise NotImplementedError("Not implemented yet.") + + @abc.abstractmethod + def make_rhs(self, operator, adjoint, with_batch=True): + """Make a rhs appropriate for calling operator.solve(rhs). + + Args: + operator: A `LinearOperator` + adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the + adjoint operator. + with_batch: Python `bool`. If `True`, create `rhs` with the same batch + shape as operator, and otherwise create a matrix without any batch + shape. + + Returns: + A `Tensor` + """ + raise NotImplementedError("make_rhs is not defined.") + + @abc.abstractmethod + def make_x(self, operator, adjoint, with_batch=True): + """Make an 'x' appropriate for calling operator.matmul(x). + + Args: + operator: A `LinearOperator` + adjoint: Python `bool`. If `True`, we are making an 'x' value for the + adjoint operator. + with_batch: Python `bool`. If `True`, create `x` with the same batch shape + as operator, and otherwise create a matrix without any batch shape. + + Returns: + A `Tensor` + """ + raise NotImplementedError("make_x is not defined.") + + @staticmethod + def skip_these_tests(): + """List of test names to skip.""" + # Subclasses should over-ride if they want to skip some tests. + # To skip "test_foo", add "foo" to this list. + return [] + + @staticmethod + def optional_tests(): + """List of optional test names to run.""" + # Subclasses should over-ride if they want to add optional tests. + # To add "test_foo", add "foo" to this list. + return [] + + def assertRaisesError(self, msg): + """assertRaisesRegexp or OpError, depending on context.executing_eagerly.""" + if context.executing_eagerly(): + return self.assertRaisesRegexp(Exception, msg) + return self.assertRaisesOpError(msg) + + def check_convert_variables_to_tensors(self, operator): + """Checks that internal Variables are correctly converted to Tensors.""" + self.assertIsInstance(operator, composite_tensor.CompositeTensor) + tensor_operator = composite_tensor.convert_variables_to_tensors(operator) + self.assertIs(type(operator), type(tensor_operator)) + self.assertEmpty(tensor_operator.variables) + self._check_tensors_equal_variables(operator, tensor_operator) + + def _check_tensors_equal_variables(self, obj, tensor_obj): + """Checks that Variables in `obj` have equivalent Tensors in `tensor_obj.""" + if isinstance(obj, variables.Variable): + self.assertAllClose(ops.convert_to_tensor(obj), + ops.convert_to_tensor(tensor_obj)) + elif isinstance(obj, composite_tensor.CompositeTensor): + params = getattr(obj, "parameters", {}) + tensor_params = getattr(tensor_obj, "parameters", {}) + self.assertAllEqual(params.keys(), tensor_params.keys()) + self._check_tensors_equal_variables(params, tensor_params) + elif nest.is_mapping(obj): + for k, v in obj.items(): + self._check_tensors_equal_variables(v, tensor_obj[k]) + elif nest.is_nested(obj): + for x, y in zip(obj, tensor_obj): + self._check_tensors_equal_variables(x, y) + else: + # We only check Tensor, CompositeTensor, and nested structure parameters. + pass + + def check_tape_safe(self, operator, skip_options=None): + """Check gradients are not None w.r.t. operator.variables. + + Meant to be called from the derived class. + + This ensures grads are not w.r.t every variable in operator.variables. If + more fine-grained testing is needed, a custom test should be written. + + Args: + operator: LinearOperator. Exact checks done will depend on hints. + skip_options: Optional list of CheckTapeSafeSkipOptions. + Makes this test skip particular checks. + """ + skip_options = skip_options or [] + + if not operator.variables: + raise AssertionError("`operator.variables` was empty") + + def _assert_not_none(iterable): + for item in iterable: + self.assertIsNotNone(item) + + # Tape tests that can be run on every operator below. + with backprop.GradientTape() as tape: + grad = tape.gradient(operator.to_dense(), operator.variables) + _assert_not_none(grad) + + with backprop.GradientTape() as tape: + var_grad = tape.gradient(operator, operator.variables) + _assert_not_none(var_grad) + nest.assert_same_structure(var_grad, grad) + + with backprop.GradientTape() as tape: + _assert_not_none( + tape.gradient(operator.adjoint().to_dense(), operator.variables)) + + x = math_ops.cast( + array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype) + + with backprop.GradientTape() as tape: + _assert_not_none(tape.gradient(operator.matvec(x), operator.variables)) + + # Tests for square, but possibly non-singular operators below. + if not operator.is_square: + return + + for option in [ + CheckTapeSafeSkipOptions.DETERMINANT, + CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT, + CheckTapeSafeSkipOptions.DIAG_PART, + CheckTapeSafeSkipOptions.TRACE, + ]: + with backprop.GradientTape() as tape: + if option not in skip_options: + _assert_not_none( + tape.gradient(getattr(operator, option)(), operator.variables)) + + # Tests for non-singular operators below. + if operator.is_non_singular is False: # pylint: disable=g-bool-id-comparison + return + + with backprop.GradientTape() as tape: + _assert_not_none( + tape.gradient(operator.inverse().to_dense(), operator.variables)) + + with backprop.GradientTape() as tape: + _assert_not_none(tape.gradient(operator.solvevec(x), operator.variables)) + + # Tests for SPD operators below. + if not (operator.is_self_adjoint and operator.is_positive_definite): + return + + with backprop.GradientTape() as tape: + _assert_not_none( + tape.gradient(operator.cholesky().to_dense(), operator.variables)) + + +# pylint:disable=missing-docstring + + +def _test_slicing(use_placeholder, shapes_info, dtype): + def test_slicing(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + batch_shape = shapes_info.shape[:-2] + # Don't bother slicing for uninteresting batch shapes. + if not batch_shape or batch_shape[0] <= 1: + return + + slices = [slice(1, -1)] + if len(batch_shape) > 1: + # Slice out the last member. + slices += [..., slice(0, 1)] + sliced_operator = operator[slices] + matrix_slices = slices + [slice(None), slice(None)] + sliced_matrix = mat[matrix_slices] + sliced_op_dense = sliced_operator.to_dense() + op_dense_v, mat_v = sess.run([sliced_op_dense, sliced_matrix]) + self.assertAC(op_dense_v, mat_v) + return test_slicing + + +def _test_to_dense(use_placeholder, shapes_info, dtype): + def test_to_dense(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_dense = operator.to_dense() + if not use_placeholder: + self.assertAllEqual(shapes_info.shape, op_dense.shape) + op_dense_v, mat_v = sess.run([op_dense, mat]) + self.assertAC(op_dense_v, mat_v) + return test_to_dense + + +def _test_det(use_placeholder, shapes_info, dtype): + def test_det(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_det = operator.determinant() + if not use_placeholder: + self.assertAllEqual(shapes_info.shape[:-2], op_det.shape) + op_det_v, mat_det_v = sess.run( + [op_det, linalg_ops.matrix_determinant(mat)]) + self.assertAC(op_det_v, mat_det_v) + return test_det + + +def _test_log_abs_det(use_placeholder, shapes_info, dtype): + def test_log_abs_det(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_log_abs_det = operator.log_abs_determinant() + _, mat_log_abs_det = linalg.slogdet(mat) + if not use_placeholder: + self.assertAllEqual( + shapes_info.shape[:-2], op_log_abs_det.shape) + op_log_abs_det_v, mat_log_abs_det_v = sess.run( + [op_log_abs_det, mat_log_abs_det]) + self.assertAC(op_log_abs_det_v, mat_log_abs_det_v) + return test_log_abs_det + + +def _test_operator_matmul_with_same_type(use_placeholder, shapes_info, dtype): + """op_a.matmul(op_b), in the case where the same type is returned.""" + @test_util.run_without_tensor_float_32("Use FP32 in matmul") + def test_operator_matmul_with_same_type( + self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator_a, mat_a = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + operator_b, mat_b = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + + mat_matmul = math_ops.matmul(mat_a, mat_b) + op_matmul = operator_a.matmul(operator_b) + mat_matmul_v, op_matmul_v = sess.run([mat_matmul, op_matmul.to_dense()]) + + self.assertIsInstance(op_matmul, operator_a.__class__) + self.assertAC(mat_matmul_v, op_matmul_v) + return test_operator_matmul_with_same_type + + +def _test_operator_solve_with_same_type(use_placeholder, shapes_info, dtype): + """op_a.solve(op_b), in the case where the same type is returned.""" + def test_operator_solve_with_same_type( + self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator_a, mat_a = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + operator_b, mat_b = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + + mat_solve = linear_operator_util.matrix_solve_with_broadcast(mat_a, mat_b) + op_solve = operator_a.solve(operator_b) + mat_solve_v, op_solve_v = sess.run([mat_solve, op_solve.to_dense()]) + + self.assertIsInstance(op_solve, operator_a.__class__) + self.assertAC(mat_solve_v, op_solve_v) + return test_operator_solve_with_same_type + + +def _test_matmul_base( + self: "LinearOperatorDerivedClassTest", + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg, + with_batch): + # If batch dimensions are omitted, but there are + # no batch dimensions for the linear operator, then + # skip the test case. This is already checked with + # with_batch=True. + if not with_batch and len(shapes_info.shape) <= 2: + return + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + x = self.make_x( + operator, adjoint=adjoint, with_batch=with_batch) + # If adjoint_arg, compute A X^H^H = A X. + if adjoint_arg: + op_matmul = operator.matmul( + linalg.adjoint(x), + adjoint=adjoint, + adjoint_arg=adjoint_arg) + else: + op_matmul = operator.matmul(x, adjoint=adjoint) + mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint) + if not use_placeholder: + self.assertAllEqual(op_matmul.shape, + mat_matmul.shape) + + # If the operator is blockwise, test both blockwise `x` and `Tensor` `x`; + # else test only `Tensor` `x`. In both cases, evaluate all results in a + # single `sess.run` call to avoid re-sampling the random `x` in graph mode. + if blockwise_arg and len(operator.operators) > 1: + # pylint: disable=protected-access + block_dimensions = ( + operator._block_range_dimensions() if adjoint else + operator._block_domain_dimensions()) + block_dimensions_fn = ( + operator._block_range_dimension_tensors if adjoint else + operator._block_domain_dimension_tensors) + # pylint: enable=protected-access + split_x = linear_operator_util.split_arg_into_blocks( + block_dimensions, + block_dimensions_fn, + x, axis=-2) + if adjoint_arg: + split_x = [linalg.adjoint(y) for y in split_x] + split_matmul = operator.matmul( + split_x, adjoint=adjoint, adjoint_arg=adjoint_arg) + + self.assertEqual(len(split_matmul), len(operator.operators)) + split_matmul = linear_operator_util.broadcast_matrix_batch_dims( + split_matmul) + fused_block_matmul = array_ops.concat(split_matmul, axis=-2) + op_matmul_v, mat_matmul_v, fused_block_matmul_v = sess.run([ + op_matmul, mat_matmul, fused_block_matmul]) + + # Check that the operator applied to blockwise input gives the same result + # as matrix multiplication. + self.assertAC(fused_block_matmul_v, mat_matmul_v) + else: + op_matmul_v, mat_matmul_v = sess.run([op_matmul, mat_matmul]) + + # Check that the operator applied to a `Tensor` gives the same result as + # matrix multiplication. + self.assertAC(op_matmul_v, mat_matmul_v) + + +def _test_matmul( + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg): + @test_util.run_without_tensor_float_32("Use FP32 in matmul") + def test_matmul(self: "LinearOperatorDerivedClassTest"): + _test_matmul_base( + self, + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg, + with_batch=True) + return test_matmul + + +def _test_matmul_with_broadcast( + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg): + @test_util.run_without_tensor_float_32("Use FP32 in matmul") + def test_matmul_with_broadcast(self: "LinearOperatorDerivedClassTest"): + _test_matmul_base( + self, + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg, + with_batch=True) + return test_matmul_with_broadcast + + +def _test_adjoint(use_placeholder, shapes_info, dtype): + def test_adjoint(self: "LinearOperatorDerivedClassTest"): + with self.test_session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_adjoint = operator.adjoint().to_dense() + op_adjoint_h = operator.H.to_dense() + mat_adjoint = linalg.adjoint(mat) + op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run( + [op_adjoint, op_adjoint_h, mat_adjoint]) + self.assertAC(mat_adjoint_v, op_adjoint_v) + self.assertAC(mat_adjoint_v, op_adjoint_h_v) + return test_adjoint + + +def _test_cholesky(use_placeholder, shapes_info, dtype): + def test_cholesky(self: "LinearOperatorDerivedClassTest"): + with self.test_session(graph=ops.Graph()) as sess: + # This test fails to pass for float32 type by a small margin if we use + # random_seed.DEFAULT_GRAPH_SEED. The correct fix would be relaxing the + # test tolerance but the tolerance in this test is configured universally + # depending on its type. So instead of lowering tolerance for all tests + # or special casing this, just use a seed, +2, that makes this test pass. + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + 2 + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder, + ensure_self_adjoint_and_pd=True) + op_chol = operator.cholesky().to_dense() + mat_chol = linalg_ops.cholesky(mat) + op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol]) + self.assertAC(mat_chol_v, op_chol_v) + return test_cholesky + + +def _test_eigvalsh(use_placeholder, shapes_info, dtype): + def test_eigvalsh(self: "LinearOperatorDerivedClassTest"): + with self.test_session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder, + ensure_self_adjoint_and_pd=True) + # Eigenvalues are real, so we'll cast these to float64 and sort + # for comparison. + op_eigvals = sort_ops.sort( + math_ops.cast(operator.eigvals(), dtype=dtypes.float64), axis=-1) + if dtype.is_complex: + mat = math_ops.cast(mat, dtype=dtypes.complex128) + else: + mat = math_ops.cast(mat, dtype=dtypes.float64) + mat_eigvals = sort_ops.sort( + math_ops.cast( + linalg_ops.self_adjoint_eigvals(mat), dtype=dtypes.float64), + axis=-1) + op_eigvals_v, mat_eigvals_v = sess.run([op_eigvals, mat_eigvals]) + + atol = self._atol[dtype] # pylint: disable=protected-access + rtol = self._rtol[dtype] # pylint: disable=protected-access + if dtype == dtypes.float32 or dtype == dtypes.complex64: + atol = 2e-4 + rtol = 2e-4 + self.assertAllClose(op_eigvals_v, mat_eigvals_v, atol=atol, rtol=rtol) + return test_eigvalsh + + +def _test_cond(use_placeholder, shapes_info, dtype): + def test_cond(self: "LinearOperatorDerivedClassTest"): + with self.test_session(graph=ops.Graph()) as sess: + # svd does not work with zero dimensional matrices, so we'll + # skip + if 0 in shapes_info.shape[-2:]: + return + + # ROCm platform does not yet support complex types + if test.is_built_with_rocm() and \ + ((dtype == dtypes.complex64) or (dtype == dtypes.complex128)): + return + + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + # Ensure self-adjoint and PD so we get finite condition numbers. + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder, + ensure_self_adjoint_and_pd=True) + # Eigenvalues are real, so we'll cast these to float64 and sort + # for comparison. + op_cond = operator.cond() + s = math_ops.abs(linalg_ops.svd(mat, compute_uv=False)) + mat_cond = math_ops.reduce_max(s, axis=-1) / math_ops.reduce_min( + s, axis=-1) + op_cond_v, mat_cond_v = sess.run([op_cond, mat_cond]) + + atol_override = { + dtypes.float16: 1e-2, + dtypes.float32: 1e-3, + dtypes.float64: 1e-6, + dtypes.complex64: 1e-3, + dtypes.complex128: 1e-6, + } + rtol_override = { + dtypes.float16: 1e-2, + dtypes.float32: 1e-3, + dtypes.float64: 1e-4, + dtypes.complex64: 1e-3, + dtypes.complex128: 1e-6, + } + atol = atol_override[dtype] + rtol = rtol_override[dtype] + self.assertAllClose(op_cond_v, mat_cond_v, atol=atol, rtol=rtol) + return test_cond + + +def _test_solve_base( + self: "LinearOperatorDerivedClassTest", + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg, + with_batch): + # If batch dimensions are omitted, but there are + # no batch dimensions for the linear operator, then + # skip the test case. This is already checked with + # with_batch=True. + if not with_batch and len(shapes_info.shape) <= 2: + return + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + rhs = self.make_rhs( + operator, adjoint=adjoint, with_batch=with_batch) + # If adjoint_arg, solve A X = (rhs^H)^H = rhs. + if adjoint_arg: + op_solve = operator.solve( + linalg.adjoint(rhs), + adjoint=adjoint, + adjoint_arg=adjoint_arg) + else: + op_solve = operator.solve( + rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + mat_solve = linear_operator_util.matrix_solve_with_broadcast( + mat, rhs, adjoint=adjoint) + if not use_placeholder: + self.assertAllEqual(op_solve.shape, + mat_solve.shape) + + # If the operator is blockwise, test both blockwise rhs and `Tensor` rhs; + # else test only `Tensor` rhs. In both cases, evaluate all results in a + # single `sess.run` call to avoid re-sampling the random rhs in graph mode. + if blockwise_arg and len(operator.operators) > 1: + # pylint: disable=protected-access + block_dimensions = ( + operator._block_range_dimensions() if adjoint else + operator._block_domain_dimensions()) + block_dimensions_fn = ( + operator._block_range_dimension_tensors if adjoint else + operator._block_domain_dimension_tensors) + # pylint: enable=protected-access + split_rhs = linear_operator_util.split_arg_into_blocks( + block_dimensions, + block_dimensions_fn, + rhs, axis=-2) + if adjoint_arg: + split_rhs = [linalg.adjoint(y) for y in split_rhs] + split_solve = operator.solve( + split_rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) + self.assertEqual(len(split_solve), len(operator.operators)) + split_solve = linear_operator_util.broadcast_matrix_batch_dims( + split_solve) + fused_block_solve = array_ops.concat(split_solve, axis=-2) + op_solve_v, mat_solve_v, fused_block_solve_v = sess.run([ + op_solve, mat_solve, fused_block_solve]) + + # Check that the operator and matrix give the same solution when the rhs + # is blockwise. + self.assertAC(mat_solve_v, fused_block_solve_v) + else: + op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve]) + + # Check that the operator and matrix give the same solution when the rhs is + # a `Tensor`. + self.assertAC(op_solve_v, mat_solve_v) + + +def _test_solve( + use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, blockwise_arg): + def test_solve(self: "LinearOperatorDerivedClassTest"): + _test_solve_base( + self, + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg, + with_batch=True) + return test_solve + + +def _test_solve_with_broadcast( + use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, blockwise_arg): + def test_solve_with_broadcast(self: "LinearOperatorDerivedClassTest"): + _test_solve_base( + self, + use_placeholder, + shapes_info, + dtype, + adjoint, + adjoint_arg, + blockwise_arg, + with_batch=False) + return test_solve_with_broadcast + + +def _test_inverse(use_placeholder, shapes_info, dtype): + def test_inverse(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_inverse_v, mat_inverse_v = sess.run([ + operator.inverse().to_dense(), linalg.inv(mat)]) + self.assertAC(op_inverse_v, mat_inverse_v, check_dtype=True) + return test_inverse + + +def _test_trace(use_placeholder, shapes_info, dtype): + def test_trace(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_trace = operator.trace() + mat_trace = math_ops.trace(mat) + if not use_placeholder: + self.assertAllEqual(op_trace.shape, mat_trace.shape) + op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace]) + self.assertAC(op_trace_v, mat_trace_v) + return test_trace + + +def _test_add_to_tensor(use_placeholder, shapes_info, dtype): + def test_add_to_tensor(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_plus_2mat = operator.add_to_tensor(2 * mat) + + if not use_placeholder: + self.assertAllEqual(shapes_info.shape, op_plus_2mat.shape) + + op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat]) + + self.assertAC(op_plus_2mat_v, 3 * mat_v) + return test_add_to_tensor + + +def _test_diag_part(use_placeholder, shapes_info, dtype): + def test_diag_part(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + op_diag_part = operator.diag_part() + mat_diag_part = array_ops.matrix_diag_part(mat) + + if not use_placeholder: + self.assertAllEqual(mat_diag_part.shape, + op_diag_part.shape) + + op_diag_part_, mat_diag_part_ = sess.run( + [op_diag_part, mat_diag_part]) + + self.assertAC(op_diag_part_, mat_diag_part_) + return test_diag_part + + +def _test_composite_tensor(use_placeholder, shapes_info, dtype): + @test_util.run_without_tensor_float_32("Use FP32 in matmul") + def test_composite_tensor(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + self.assertIsInstance(operator, composite_tensor.CompositeTensor) + + flat = nest.flatten(operator, expand_composites=True) + unflat = nest.pack_sequence_as(operator, flat, expand_composites=True) + self.assertIsInstance(unflat, type(operator)) + + # Input the operator to a `tf.function`. + x = self.make_x(operator, adjoint=False) + op_y = def_function.function(lambda op: op.matmul(x))(unflat) + mat_y = math_ops.matmul(mat, x) + + if not use_placeholder: + self.assertAllEqual(mat_y.shape, op_y.shape) + + # Test while_loop. + def body(op): + return type(op)(**op.parameters), + op_out, = while_v2.while_loop( + cond=lambda _: True, + body=body, + loop_vars=(operator,), + maximum_iterations=3) + loop_y = op_out.matmul(x) + + op_y_, loop_y_, mat_y_ = sess.run([op_y, loop_y, mat_y]) + self.assertAC(op_y_, mat_y_) + self.assertAC(loop_y_, mat_y_) + + # Ensure that the `TypeSpec` can be encoded. + nested_structure_coder.encode_structure(operator._type_spec) # pylint: disable=protected-access + + return test_composite_tensor + + +def _test_saved_model(use_placeholder, shapes_info, dtype): + @test_util.run_without_tensor_float_32("Use FP32 in matmul") + def test_saved_model(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + + if test_util.is_xla_enabled() and np.prod(shapes_info.shape) == 0: + self.skipTest("Saving XLA model fails for empty model.") + + operator, mat = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + x = self.make_x(operator, adjoint=False) + + class Model(module.Module): + + def __init__(self, init_x): + self.x = nest.map_structure( + lambda x_: variables.Variable(x_, shape=None), + init_x) + + @def_function.function(input_signature=(operator._type_spec,)) # pylint: disable=protected-access + def do_matmul(self, op): + return op.matmul(self.x) + + saved_model_dir = self.get_temp_dir() + m1 = Model(x) + sess.run([v.initializer for v in m1.variables]) + sess.run(m1.x.assign(m1.x + 1.)) + + save_model.save(m1, saved_model_dir) + m2 = load_model.load(saved_model_dir) + sess.run(m2.x.initializer) + + sess.run(m2.x.assign(m2.x + 1.)) + y_op = m2.do_matmul(operator) + y_mat = math_ops.matmul(mat, m2.x) + + y_op_, y_mat_ = sess.run([y_op, y_mat]) + self.assertAC(y_op_, y_mat_) + + return test_saved_model + + +def _test_composite_tensor_gradient(use_placeholder, shapes_info, dtype): + def test_composite_tensor_gradient(self: "LinearOperatorDerivedClassTest"): + with self.session(graph=ops.Graph()) as sess: + sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + operator, _ = self.operator_and_matrix( + shapes_info, dtype, use_placeholder=use_placeholder) + x = self.make_x(operator, adjoint=False) + y = operator.matmul(x) + + op_g, = gradients_impl.gradients( + y, + operator, + grad_ys=array_ops.ones_like(y)) # Complex dtypes need grad_ys. + + def _unflatten_and_matmul(components): + unflat_op = nest.pack_sequence_as( + operator, components, expand_composites=True) + return unflat_op.matmul(x) + + flat_op = nest.flatten(operator, expand_composites=True) + y_ = _unflatten_and_matmul(flat_op) + flat_g = gradients_impl.gradients( + y_, + flat_op, + grad_ys=array_ops.ones_like(y_)) + + if all(g is None for g in flat_g): + self.assertIsNone(op_g) + else: + self.assertIsInstance(op_g, operator.__class__) + for g, ug in zip(nest.flatten(op_g, expand_composites=True), + nest.flatten(flat_g, expand_composites=True)): + self.assertAllClose(g, ug) + return test_composite_tensor_gradient + +# pylint:enable=missing-docstring + + +def add_tests(test_cls): + """Add tests for LinearOperator methods.""" + test_name_dict = { + # All test classes should be added here. + "add_to_tensor": _test_add_to_tensor, + "adjoint": _test_adjoint, + "cholesky": _test_cholesky, + "cond": _test_cond, + "composite_tensor": _test_composite_tensor, + "composite_tensor_gradient": _test_composite_tensor_gradient, + "det": _test_det, + "diag_part": _test_diag_part, + "eigvalsh": _test_eigvalsh, + "inverse": _test_inverse, + "log_abs_det": _test_log_abs_det, + "operator_matmul_with_same_type": _test_operator_matmul_with_same_type, + "operator_solve_with_same_type": _test_operator_solve_with_same_type, + "matmul": _test_matmul, + "matmul_with_broadcast": _test_matmul_with_broadcast, + "saved_model": _test_saved_model, + "slicing": _test_slicing, + "solve": _test_solve, + "solve_with_broadcast": _test_solve_with_broadcast, + "to_dense": _test_to_dense, + "trace": _test_trace, + } + optional_tests = [ + # Test classes need to explicitly add these to cls.optional_tests. + "operator_matmul_with_same_type", + "operator_solve_with_same_type", + ] + tests_with_adjoint_args = [ + "matmul", + "matmul_with_broadcast", + "solve", + "solve_with_broadcast", + ] + if set(test_cls.skip_these_tests()).intersection(test_cls.optional_tests()): + raise ValueError( + "Test class {test_cls} had intersecting 'skip_these_tests' " + f"{test_cls.skip_these_tests()} and 'optional_tests' " + f"{test_cls.optional_tests()}.") + + for name, test_template_fn in test_name_dict.items(): + if name in test_cls.skip_these_tests(): + continue + if name in optional_tests and name not in test_cls.optional_tests(): + continue + + for dtype, use_placeholder, shape_info in itertools.product( + test_cls.dtypes_to_test(), + test_cls.use_placeholder_options(), + test_cls.operator_shapes_infos()): + base_test_name = "_".join([ + "test", name, "_shape={},dtype={},use_placeholder={}".format( + shape_info.shape, dtype, use_placeholder)]) + if name in tests_with_adjoint_args: + for adjoint in test_cls.adjoint_options(): + for adjoint_arg in test_cls.adjoint_arg_options(): + test_name = base_test_name + ",adjoint={},adjoint_arg={}".format( + adjoint, adjoint_arg) + if hasattr(test_cls, test_name): + raise RuntimeError("Test %s defined more than once" % test_name) + setattr( + test_cls, + test_name, + test_util.run_deprecated_v1( + test_template_fn( # pylint: disable=too-many-function-args + use_placeholder, shape_info, dtype, adjoint, + adjoint_arg, test_cls.use_blockwise_arg()))) + else: + if hasattr(test_cls, base_test_name): + raise RuntimeError("Test %s defined more than once" % base_test_name) + setattr( + test_cls, + base_test_name, + test_util.run_deprecated_v1(test_template_fn( + use_placeholder, shape_info, dtype))) + + +class SquareLinearOperatorDerivedClassTest( + LinearOperatorDerivedClassTest, metaclass=abc.ABCMeta): + """Base test class appropriate for square operators. + + Sub-classes must still define all abstractmethods from + LinearOperatorDerivedClassTest that are not defined here. + """ + + @staticmethod + def operator_shapes_infos(): + shapes_info = OperatorShapesInfo + # non-batch operators (n, n) and batch operators. + return [ + shapes_info((0, 0)), + shapes_info((1, 1)), + shapes_info((1, 3, 3)), + shapes_info((3, 4, 4)), + shapes_info((2, 1, 4, 4))] + + def make_rhs(self, operator, adjoint, with_batch=True): + # This operator is square, so rhs and x will have same shape. + # adjoint value makes no difference because the operator shape doesn't + # change since it is square, but be pedantic. + return self.make_x(operator, adjoint=not adjoint, with_batch=with_batch) + + def make_x(self, operator, adjoint, with_batch=True): + # Value of adjoint makes no difference because the operator is square. + # Return the number of systems to solve, R, equal to 1 or 2. + r = self._get_num_systems(operator) + # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of + # shape [B1,...,Bb, N, R], R = 1 or 2. + if operator.shape.is_fully_defined(): + batch_shape = operator.batch_shape.as_list() + n = operator.domain_dimension.value + if with_batch: + x_shape = batch_shape + [n, r] + else: + x_shape = [n, r] + else: + batch_shape = operator.batch_shape_tensor() + n = operator.domain_dimension_tensor() + if with_batch: + x_shape = array_ops.concat((batch_shape, [n, r]), 0) + else: + x_shape = [n, r] + + return random_normal(x_shape, dtype=operator.dtype) + + def _get_num_systems(self, operator): + """Get some number, either 1 or 2, depending on operator.""" + if operator.tensor_rank is None or operator.tensor_rank % 2: + return 1 + else: + return 2 + + +class NonSquareLinearOperatorDerivedClassTest( + LinearOperatorDerivedClassTest, metaclass=abc.ABCMeta): + """Base test class appropriate for generic rectangular operators. + + Square shapes are never tested by this class, so if you want to test your + operator with a square shape, create two test classes, the other subclassing + SquareLinearOperatorFullMatrixTest. + + Sub-classes must still define all abstractmethods from + LinearOperatorDerivedClassTest that are not defined here. + """ + + @staticmethod + def skip_these_tests(): + """List of test names to skip.""" + return [ + "cholesky", + "eigvalsh", + "inverse", + "solve", + "solve_with_broadcast", + "det", + "log_abs_det", + ] + + @staticmethod + def operator_shapes_infos(): + shapes_info = OperatorShapesInfo + # non-batch operators (n, n) and batch operators. + return [ + shapes_info((2, 1)), + shapes_info((1, 2)), + shapes_info((1, 3, 2)), + shapes_info((3, 3, 4)), + shapes_info((2, 1, 2, 4))] + + def make_rhs(self, operator, adjoint, with_batch=True): + # TODO(langmore) Add once we're testing solve_ls. + raise NotImplementedError( + "make_rhs not implemented because we don't test solve") + + def make_x(self, operator, adjoint, with_batch=True): + # Return the number of systems for the argument 'x' for .matmul(x) + r = self._get_num_systems(operator) + # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of + # shape [B1,...,Bb, N, R], R = 1 or 2. + if operator.shape.is_fully_defined(): + batch_shape = operator.batch_shape.as_list() + if adjoint: + n = operator.range_dimension.value + else: + n = operator.domain_dimension.value + if with_batch: + x_shape = batch_shape + [n, r] + else: + x_shape = [n, r] + else: + batch_shape = operator.batch_shape_tensor() + if adjoint: + n = operator.range_dimension_tensor() + else: + n = operator.domain_dimension_tensor() + if with_batch: + x_shape = array_ops.concat((batch_shape, [n, r]), 0) + else: + x_shape = [n, r] + + return random_normal(x_shape, dtype=operator.dtype) + + def _get_num_systems(self, operator): + """Get some number, either 1 or 2, depending on operator.""" + if operator.tensor_rank is None or operator.tensor_rank % 2: + return 1 + else: + return 2 + + +def random_positive_definite_matrix(shape, + dtype, + oversampling_ratio=4, + force_well_conditioned=False): + """[batch] positive definite Wisart matrix. + + A Wishart(N, S) matrix is the S sample covariance matrix of an N-variate + (standard) Normal random variable. + + Args: + shape: `TensorShape` or Python list. Shape of the returned matrix. + dtype: `TensorFlow` `dtype` or Python dtype. + oversampling_ratio: S / N in the above. If S < N, the matrix will be + singular (unless `force_well_conditioned is True`). + force_well_conditioned: Python bool. If `True`, add `1` to the diagonal + of the Wishart matrix, then divide by 2, ensuring most eigenvalues are + close to 1. + + Returns: + `Tensor` with desired shape and dtype. + """ + dtype = dtypes.as_dtype(dtype) + if not tensor_util.is_tf_type(shape): + shape = tensor_shape.TensorShape(shape) + # Matrix must be square. + shape.dims[-1].assert_is_compatible_with(shape.dims[-2]) + shape = shape.as_list() + n = shape[-2] + s = oversampling_ratio * shape[-1] + wigner_shape = shape[:-2] + [n, s] + + with ops.name_scope("random_positive_definite_matrix"): + wigner = random_normal( + wigner_shape, + dtype=dtype, + stddev=math_ops.cast(1 / np.sqrt(s), dtype.real_dtype)) + wishart = math_ops.matmul(wigner, wigner, adjoint_b=True) + if force_well_conditioned: + wishart += linalg_ops.eye(n, dtype=dtype) + wishart /= math_ops.cast(2, dtype) + return wishart + + +def random_tril_matrix(shape, + dtype, + force_well_conditioned=False, + remove_upper=True): + """[batch] lower triangular matrix. + + Args: + shape: `TensorShape` or Python `list`. Shape of the returned matrix. + dtype: `TensorFlow` `dtype` or Python dtype + force_well_conditioned: Python `bool`. If `True`, returned matrix will have + eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit + normal random variables. + remove_upper: Python `bool`. + If `True`, zero out the strictly upper triangle. + If `False`, the lower triangle of returned matrix will have desired + properties, but will not have the strictly upper triangle zero'd out. + + Returns: + `Tensor` with desired shape and dtype. + """ + with ops.name_scope("random_tril_matrix"): + # Totally random matrix. Has no nice properties. + tril = random_normal(shape, dtype=dtype) + if remove_upper: + tril = array_ops.matrix_band_part(tril, -1, 0) + + # Create a diagonal with entries having modulus in [1, 2]. + if force_well_conditioned: + maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype) + diag = random_sign_uniform( + shape[:-1], dtype=dtype, minval=1., maxval=maxval) + tril = array_ops.matrix_set_diag(tril, diag) + + return tril + + +def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): + """Tensor with (possibly complex) Gaussian entries. + + Samples are distributed like + + ``` + N(mean, stddev^2), if dtype is real, + X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. + ``` + + Args: + shape: `TensorShape` or Python list. Shape of the returned tensor. + mean: `Tensor` giving mean of normal to sample from. + stddev: `Tensor` giving stdev of normal to sample from. + dtype: `TensorFlow` `dtype` or numpy dtype + seed: Python integer seed for the RNG. + + Returns: + `Tensor` with desired shape and dtype. + """ + dtype = dtypes.as_dtype(dtype) + + with ops.name_scope("random_normal"): + samples = random_ops.random_normal( + shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) + if dtype.is_complex: + if seed is not None: + seed += 1234 + more_samples = random_ops.random_normal( + shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) + samples = math_ops.complex(samples, more_samples) + return samples + + +def random_uniform(shape, + minval=None, + maxval=None, + dtype=dtypes.float32, + seed=None): + """Tensor with (possibly complex) Uniform entries. + + Samples are distributed like + + ``` + Uniform[minval, maxval], if dtype is real, + X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. + ``` + + Args: + shape: `TensorShape` or Python list. Shape of the returned tensor. + minval: `0-D` `Tensor` giving the minimum values. + maxval: `0-D` `Tensor` giving the maximum values. + dtype: `TensorFlow` `dtype` or Python dtype + seed: Python integer seed for the RNG. + + Returns: + `Tensor` with desired shape and dtype. + """ + dtype = dtypes.as_dtype(dtype) + + with ops.name_scope("random_uniform"): + samples = random_ops.random_uniform( + shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) + if dtype.is_complex: + if seed is not None: + seed += 12345 + more_samples = random_ops.random_uniform( + shape, + dtype=dtype.real_dtype, + minval=minval, + maxval=maxval, + seed=seed) + samples = math_ops.complex(samples, more_samples) + return samples + + +def random_sign_uniform(shape, + minval=None, + maxval=None, + dtype=dtypes.float32, + seed=None): + """Tensor with (possibly complex) random entries from a "sign Uniform". + + Letting `Z` be a random variable equal to `-1` and `1` with equal probability, + Samples from this `Op` are distributed like + + ``` + Z * X, where X ~ Uniform[minval, maxval], if dtype is real, + Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. + ``` + + Args: + shape: `TensorShape` or Python list. Shape of the returned tensor. + minval: `0-D` `Tensor` giving the minimum values. + maxval: `0-D` `Tensor` giving the maximum values. + dtype: `TensorFlow` `dtype` or Python dtype + seed: Python integer seed for the RNG. + + Returns: + `Tensor` with desired shape and dtype. + """ + dtype = dtypes.as_dtype(dtype) + + with ops.name_scope("random_sign_uniform"): + unsigned_samples = random_uniform( + shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) + if seed is not None: + seed += 12 + signs = math_ops.sign( + random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed)) + return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) + + +def random_normal_correlated_columns(shape, + mean=0.0, + stddev=1.0, + dtype=dtypes.float32, + eps=1e-4, + seed=None): + """Batch matrix with (possibly complex) Gaussian entries and correlated cols. + + Returns random batch matrix `A` with specified element-wise `mean`, `stddev`, + living close to an embedded hyperplane. + + Suppose `shape[-2:] = (M, N)`. + + If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. + + If `M >= N`, then the columns of `A` will be made almost dependent as follows: + + ``` + L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) + B = random normal M x N-1 matrix, mean = 0, stddev = stddev. + + G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane + E = a random normal M x N matrix, mean = 0, stddev = eps + mu = a constant M x N matrix, equal to the argument "mean" + + A = G + E + mu + ``` + + Args: + shape: Python list of integers. + Shape of the returned tensor. Must be at least length two. + mean: `Tensor` giving mean of normal to sample from. + stddev: `Tensor` giving stdev of normal to sample from. + dtype: `TensorFlow` `dtype` or numpy dtype + eps: Distance each column is perturbed from the low-dimensional subspace. + seed: Python integer seed for the RNG. + + Returns: + `Tensor` with desired shape and dtype. + + Raises: + ValueError: If `shape` is not at least length 2. + """ + dtype = dtypes.as_dtype(dtype) + + if len(shape) < 2: + raise ValueError( + "Argument shape must be at least length 2. Found: %s" % shape) + + # Shape is the final shape, e.g. [..., M, N] + shape = list(shape) + batch_shape = shape[:-2] + m, n = shape[-2:] + + # If there is only one column, "they" are by definition correlated. + if n < 2 or n < m: + return random_normal( + shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + + # Shape of the matrix with only n - 1 columns that we will embed in higher + # dimensional space. + smaller_shape = batch_shape + [m, n - 1] + + # Shape of the embedding matrix, mapping batch matrices + # from [..., N-1, M] to [..., N, M] + embedding_mat_shape = batch_shape + [n, n - 1] + + # This stddev for the embedding_mat ensures final result has correct stddev. + stddev_mat = 1 / np.sqrt(n - 1) + + with ops.name_scope("random_normal_correlated_columns"): + smaller_mat = random_normal( + smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed) + + if seed is not None: + seed += 1287 + + embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed) + + embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True) + embedded = array_ops.matrix_transpose(embedded_t) + + mean_mat = array_ops.ones_like(embedded) * mean + + return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_toeplitz.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_toeplitz.py new file mode 100644 index 0000000000000000000000000000000000000000..ea87ef6a61b55829ae0230782bf5b3c763d9c244 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_toeplitz.py @@ -0,0 +1,292 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a Toeplitz matrix.""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_circulant +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.ops.signal import fft_ops +from tensorflow.python.util.tf_export import tf_export + +__all__ = ["LinearOperatorToeplitz",] + + +@tf_export("linalg.LinearOperatorToeplitz") +@linear_operator.make_composite_tensor +class LinearOperatorToeplitz(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] of toeplitz matrices. + + This operator acts like a [batch] Toeplitz matrix `A` with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + #### Description in terms of toeplitz matrices + + Toeplitz means that `A` has constant diagonals. Hence, `A` can be generated + with two vectors. One represents the first column of the matrix, and the + other represents the first row. + + Below is a 4 x 4 example: + + ``` + A = |a b c d| + |e a b c| + |f e a b| + |g f e a| + ``` + + #### Example of a Toeplitz operator. + + ```python + # Create a 3 x 3 Toeplitz operator. + col = [1., 2., 3.] + row = [1., 4., -9.] + operator = LinearOperatorToeplitz(col, row) + + operator.to_dense() + ==> [[1., 4., -9.], + [2., 1., 4.], + [3., 2., 1.]] + + operator.shape + ==> [3, 3] + + operator.log_abs_determinant() + ==> scalar Tensor + + x = ... Shape [3, 4] Tensor + operator.matmul(x) + ==> Shape [3, 4] Tensor + ``` + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + col, + row, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name="LinearOperatorToeplitz"): + r"""Initialize a `LinearOperatorToeplitz`. + + Args: + col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. + The first column of the operator. Allowed dtypes: `float16`, `float32`, + `float64`, `complex64`, `complex128`. Note that the first entry of + `col` is assumed to be the same as the first entry of `row`. + row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. + The first row of the operator. Allowed dtypes: `float16`, `float32`, + `float64`, `complex64`, `complex128`. Note that the first entry of + `row` is assumed to be the same as the first entry of `col`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `diag.dtype` is real, this is auto-set to `True`. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + """ + parameters = dict( + col=col, + row=row, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + with ops.name_scope(name, values=[row, col]): + self._row = linear_operator_util.convert_nonref_to_tensor(row, name="row") + self._col = linear_operator_util.convert_nonref_to_tensor(col, name="col") + self._check_row_col(self._row, self._col) + + if is_square is False: # pylint:disable=g-bool-id-comparison + raise ValueError("Only square Toeplitz operators currently supported.") + is_square = True + + super(LinearOperatorToeplitz, self).__init__( + dtype=self._row.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _check_row_col(self, row, col): + """Static check of row and column.""" + for name, tensor in [["row", row], ["col", col]]: + if tensor.shape.ndims is not None and tensor.shape.ndims < 1: + raise ValueError("Argument {} must have at least 1 dimension. " + "Found: {}".format(name, tensor)) + + if row.shape[-1] is not None and col.shape[-1] is not None: + if row.shape[-1] != col.shape[-1]: + raise ValueError( + "Expected square matrix, got row and col with mismatched " + "dimensions.") + + def _shape(self): + # If d_shape = [5, 3], we return [5, 3, 3]. + v_shape = array_ops.broadcast_static_shape( + self.row.shape, self.col.shape) + return v_shape.concatenate(v_shape[-1:]) + + def _shape_tensor(self, row=None, col=None): + row = self.row if row is None else row + col = self.col if col is None else col + v_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(row), + array_ops.shape(col)) + k = v_shape[-1] + return array_ops.concat((v_shape, [k]), 0) + + def _assert_self_adjoint(self): + return check_ops.assert_equal( + self.row, + self.col, + message=("row and col are not the same, and " + "so this operator is not self-adjoint.")) + + # TODO(srvasude): Add efficient solver and determinant calculations to this + # class (based on Levinson recursion.) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + # Given a Toeplitz matrix, we can embed it in a Circulant matrix to perform + # efficient matrix multiplications. Given a Toeplitz matrix with first row + # [t_0, t_1, ... t_{n-1}] and first column [t0, t_{-1}, ..., t_{-(n-1)}, + # let C by the circulant matrix with first column [t0, t_{-1}, ..., + # t_{-(n-1)}, 0, t_{n-1}, ..., t_1]. Also adjoin to our input vector `x` + # `n` zeros, to make it a vector of length `2n` (call it y). It can be shown + # that if we take the first n entries of `Cy`, this is equal to the Toeplitz + # multiplication. See: + # http://math.mit.edu/icg/resources/teaching/18.085-spring2015/toeplitz.pdf + # for more details. + x = linalg.adjoint(x) if adjoint_arg else x + expanded_x = array_ops.concat([x, array_ops.zeros_like(x)], axis=-2) + col = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.col) + row = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.row) + circulant_col = array_ops.concat( + [col, + array_ops.zeros_like(col[..., 0:1]), + array_ops.reverse(row[..., 1:], axis=[-1])], axis=-1) + circulant = linear_operator_circulant.LinearOperatorCirculant( + fft_ops.fft(_to_complex(circulant_col)), + input_output_dtype=row.dtype) + result = circulant.matmul(expanded_x, adjoint=adjoint, adjoint_arg=False) + + shape = self._shape_tensor(row=row, col=col) + return math_ops.cast( + result[..., :self._domain_dimension_tensor(shape=shape), :], + self.dtype) + + def _trace(self): + return math_ops.cast( + self.domain_dimension_tensor(), + dtype=self.dtype) * self.col[..., 0] + + def _diag_part(self): + diag_entry = self.col[..., 0:1] + return diag_entry * array_ops.ones( + [self.domain_dimension_tensor()], self.dtype) + + def _to_dense(self): + row = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.row) + col = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.col) + total_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(row), array_ops.shape(col)) + n = array_ops.shape(row)[-1] + row = array_ops.broadcast_to(row, total_shape) + col = array_ops.broadcast_to(col, total_shape) + # We concatenate the column in reverse order to the row. + # This gives us 2*n + 1 elements. + elements = array_ops.concat( + [array_ops.reverse(col, axis=[-1]), row[..., 1:]], axis=-1) + # Given the above vector, the i-th row of the Toeplitz matrix + # is the last n elements of the above vector shifted i right + # (hence the first row is just the row vector provided, and + # the first element of each row will belong to the column vector). + # We construct these set of indices below. + indices = math_ops.mod( + # How much to shift right. This corresponds to `i`. + math_ops.range(0, n) + + # Specifies the last `n` indices. + math_ops.range(n - 1, -1, -1)[..., array_ops.newaxis], + # Mod out by the total number of elements to ensure the index is + # non-negative (for tf.gather) and < 2 * n - 1. + 2 * n - 1) + return array_ops.gather(elements, indices, axis=-1) + + @property + def col(self): + return self._col + + @property + def row(self): + return self._row + + @property + def _composite_tensor_fields(self): + return ("col", "row") + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + return {"col": 1, "row": 1} + + +def _to_complex(x): + dtype = dtypes.complex64 + if x.dtype in [dtypes.float64, dtypes.complex128]: + dtype = dtypes.complex128 + return math_ops.cast(x, dtype) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_tridiag.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_tridiag.py new file mode 100644 index 0000000000000000000000000000000000000000..b5353cfc9ff9f8d1a07727a2339f1a302fc1cdb2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_tridiag.py @@ -0,0 +1,401 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a tridiagonal matrix.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import manip_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = ['LinearOperatorTridiag',] + +_COMPACT = 'compact' +_MATRIX = 'matrix' +_SEQUENCE = 'sequence' +_DIAGONAL_FORMATS = frozenset({_COMPACT, _MATRIX, _SEQUENCE}) + + +@tf_export('linalg.LinearOperatorTridiag') +@linear_operator.make_composite_tensor +class LinearOperatorTridiag(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] square tridiagonal matrix. + + This operator acts like a [batch] square tridiagonal matrix `A` with shape + `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x M` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + Example usage: + + Create a 3 x 3 tridiagonal linear operator. + + >>> superdiag = [3., 4., 5.] + >>> diag = [1., -1., 2.] + >>> subdiag = [6., 7., 8] + >>> operator = tf.linalg.LinearOperatorTridiag( + ... [superdiag, diag, subdiag], + ... diagonals_format='sequence') + >>> operator.to_dense() + + >>> operator.shape + TensorShape([3, 3]) + + Scalar Tensor output. + + >>> operator.log_abs_determinant() + + + Create a [2, 3] batch of 4 x 4 linear operators. + + >>> diagonals = tf.random.normal(shape=[2, 3, 3, 4]) + >>> operator = tf.linalg.LinearOperatorTridiag( + ... diagonals, + ... diagonals_format='compact') + + Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible + since the batch dimensions, [2, 1], are broadcast to + operator.batch_shape = [2, 3]. + + >>> y = tf.random.normal(shape=[2, 1, 4, 2]) + >>> x = operator.solve(y) + >>> x + + + #### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, N], with b >= 0 + x.shape = [C1,...,Cc] + [N, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb]. + ``` + + #### Performance + + Suppose `operator` is a `LinearOperatorTridiag` of shape `[N, N]`, + and `x.shape = [N, R]`. Then + + * `operator.matmul(x)` will take O(N * R) time. + * `operator.solve(x)` will take O(N * R) time. + + If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and + `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + diagonals, + diagonals_format=_COMPACT, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name='LinearOperatorTridiag'): + r"""Initialize a `LinearOperatorTridiag`. + + Args: + diagonals: `Tensor` or list of `Tensor`s depending on `diagonals_format`. + + If `diagonals_format=sequence`, this is a list of three `Tensor`'s each + with shape `[B1, ..., Bb, N]`, `b >= 0, N >= 0`, representing the + superdiagonal, diagonal and subdiagonal in that order. Note the + superdiagonal is padded with an element in the last position, and the + subdiagonal is padded with an element in the front. + + If `diagonals_format=matrix` this is a `[B1, ... Bb, N, N]` shaped + `Tensor` representing the full tridiagonal matrix. + + If `diagonals_format=compact` this is a `[B1, ... Bb, 3, N]` shaped + `Tensor` with the second to last dimension indexing the + superdiagonal, diagonal and subdiagonal in that order. Note the + superdiagonal is padded with an element in the last position, and the + subdiagonal is padded with an element in the front. + + In every case, these `Tensor`s are all floating dtype. + diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is + `compact`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. If `diag.dtype` is real, this is auto-set to `True`. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + Raises: + TypeError: If `diag.dtype` is not an allowed type. + ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`. + """ + parameters = dict( + diagonals=diagonals, + diagonals_format=diagonals_format, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name + ) + + with ops.name_scope(name, values=[diagonals]): + if diagonals_format not in _DIAGONAL_FORMATS: + raise ValueError( + f'Argument `diagonals_format` must be one of compact, matrix, or ' + f'sequence. Received : {diagonals_format}.') + if diagonals_format == _SEQUENCE: + self._diagonals = [linear_operator_util.convert_nonref_to_tensor( + d, name='diag_{}'.format(i)) for i, d in enumerate(diagonals)] + dtype = self._diagonals[0].dtype + else: + self._diagonals = linear_operator_util.convert_nonref_to_tensor( + diagonals, name='diagonals') + dtype = self._diagonals.dtype + self._diagonals_format = diagonals_format + + super(LinearOperatorTridiag, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + def _shape(self): + if self.diagonals_format == _MATRIX: + return self.diagonals.shape + if self.diagonals_format == _COMPACT: + # Remove the second to last dimension that contains the value 3. + d_shape = self.diagonals.shape[:-2].concatenate( + self.diagonals.shape[-1]) + else: + broadcast_shape = array_ops.broadcast_static_shape( + self.diagonals[0].shape[:-1], + self.diagonals[1].shape[:-1]) + broadcast_shape = array_ops.broadcast_static_shape( + broadcast_shape, + self.diagonals[2].shape[:-1]) + d_shape = broadcast_shape.concatenate(self.diagonals[1].shape[-1]) + return d_shape.concatenate(d_shape[-1]) + + def _shape_tensor(self, diagonals=None): + diagonals = diagonals if diagonals is not None else self.diagonals + if self.diagonals_format == _MATRIX: + return array_ops.shape(diagonals) + if self.diagonals_format == _COMPACT: + d_shape = array_ops.shape(diagonals[..., 0, :]) + else: + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(self.diagonals[0])[:-1], + array_ops.shape(self.diagonals[1])[:-1]) + broadcast_shape = array_ops.broadcast_dynamic_shape( + broadcast_shape, + array_ops.shape(self.diagonals[2])[:-1]) + d_shape = array_ops.concat( + [broadcast_shape, [array_ops.shape(self.diagonals[1])[-1]]], axis=0) + return array_ops.concat([d_shape, [d_shape[-1]]], axis=-1) + + def _assert_self_adjoint(self): + # Check the diagonal has non-zero imaginary, and the super and subdiagonals + # are conjugate. + + asserts = [] + diag_message = ( + 'This tridiagonal operator contained non-zero ' + 'imaginary values on the diagonal.') + off_diag_message = ( + 'This tridiagonal operator has non-conjugate ' + 'subdiagonal and superdiagonal.') + + if self.diagonals_format == _MATRIX: + asserts += [check_ops.assert_equal( + self.diagonals, linalg.adjoint(self.diagonals), + message='Matrix was not equal to its adjoint.')] + elif self.diagonals_format == _COMPACT: + diagonals = tensor_conversion.convert_to_tensor_v2_with_dispatch( + self.diagonals + ) + asserts += [linear_operator_util.assert_zero_imag_part( + diagonals[..., 1, :], message=diag_message)] + # Roll the subdiagonal so the shifted argument is at the end. + subdiag = manip_ops.roll(diagonals[..., 2, :], shift=-1, axis=-1) + asserts += [check_ops.assert_equal( + math_ops.conj(subdiag[..., :-1]), + diagonals[..., 0, :-1], + message=off_diag_message)] + else: + asserts += [linear_operator_util.assert_zero_imag_part( + self.diagonals[1], message=diag_message)] + subdiag = manip_ops.roll(self.diagonals[2], shift=-1, axis=-1) + asserts += [check_ops.assert_equal( + math_ops.conj(subdiag[..., :-1]), + self.diagonals[0][..., :-1], + message=off_diag_message)] + return control_flow_ops.group(asserts) + + def _construct_adjoint_diagonals(self, diagonals): + # Constructs adjoint tridiagonal matrix from diagonals. + if self.diagonals_format == _SEQUENCE: + diagonals = [math_ops.conj(d) for d in reversed(diagonals)] + # The subdiag and the superdiag swap places, so we need to shift the + # padding argument. + diagonals[0] = manip_ops.roll(diagonals[0], shift=-1, axis=-1) + diagonals[2] = manip_ops.roll(diagonals[2], shift=1, axis=-1) + return diagonals + elif self.diagonals_format == _MATRIX: + return linalg.adjoint(diagonals) + else: + diagonals = math_ops.conj(diagonals) + superdiag, diag, subdiag = array_ops_stack.unstack( + diagonals, num=3, axis=-2) + # The subdiag and the superdiag swap places, so we need + # to shift all arguments. + new_superdiag = manip_ops.roll(subdiag, shift=-1, axis=-1) + new_subdiag = manip_ops.roll(superdiag, shift=1, axis=-1) + return array_ops_stack.stack([new_superdiag, diag, new_subdiag], axis=-2) + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + diagonals = self.diagonals + if adjoint: + diagonals = self._construct_adjoint_diagonals(diagonals) + x = linalg.adjoint(x) if adjoint_arg else x + return linalg.tridiagonal_matmul( + diagonals, x, + diagonals_format=self.diagonals_format) + + def _solve(self, rhs, adjoint=False, adjoint_arg=False): + diagonals = self.diagonals + if adjoint: + diagonals = self._construct_adjoint_diagonals(diagonals) + + # TODO(b/144860784): Remove the broadcasting code below once + # tridiagonal_solve broadcasts. + + rhs_shape = array_ops.shape(rhs) + k = self._shape_tensor(diagonals)[-1] + broadcast_shape = array_ops.broadcast_dynamic_shape( + self._shape_tensor(diagonals)[:-2], rhs_shape[:-2]) + rhs = array_ops.broadcast_to( + rhs, array_ops.concat( + [broadcast_shape, rhs_shape[-2:]], axis=-1)) + if self.diagonals_format == _MATRIX: + diagonals = array_ops.broadcast_to( + diagonals, array_ops.concat( + [broadcast_shape, [k, k]], axis=-1)) + elif self.diagonals_format == _COMPACT: + diagonals = array_ops.broadcast_to( + diagonals, array_ops.concat( + [broadcast_shape, [3, k]], axis=-1)) + else: + diagonals = [ + array_ops.broadcast_to(d, array_ops.concat( + [broadcast_shape, [k]], axis=-1)) for d in diagonals] + + y = linalg.tridiagonal_solve( + diagonals, rhs, + diagonals_format=self.diagonals_format, + transpose_rhs=adjoint_arg, + conjugate_rhs=adjoint_arg) + return y + + def _diag_part(self): + if self.diagonals_format == _MATRIX: + return array_ops.matrix_diag_part(self.diagonals) + elif self.diagonals_format == _SEQUENCE: + diagonal = self.diagonals[1] + return array_ops.broadcast_to( + diagonal, self.shape_tensor()[:-1]) + else: + return self.diagonals[..., 1, :] + + def _to_dense(self): + if self.diagonals_format == _MATRIX: + return self.diagonals + + if self.diagonals_format == _COMPACT: + return gen_array_ops.matrix_diag_v3( + self.diagonals, + k=(-1, 1), + num_rows=-1, + num_cols=-1, + align='LEFT_RIGHT', + padding_value=0.) + + diagonals = [ + tensor_conversion.convert_to_tensor_v2_with_dispatch(d) + for d in self.diagonals + ] + diagonals = array_ops_stack.stack(diagonals, axis=-2) + + return gen_array_ops.matrix_diag_v3( + diagonals, + k=(-1, 1), + num_rows=-1, + num_cols=-1, + align='LEFT_RIGHT', + padding_value=0.) + + @property + def diagonals(self): + return self._diagonals + + @property + def diagonals_format(self): + return self._diagonals_format + + @property + def _composite_tensor_fields(self): + return ('diagonals', 'diagonals_format') + + @property + def _experimental_parameter_ndims_to_matrix_ndims(self): + diagonal_event_ndims = 2 + if self.diagonals_format == _SEQUENCE: + # For the diagonal and the super/sub diagonals. + diagonal_event_ndims = [1, 1, 1] + return { + 'diagonals': diagonal_event_ndims, + } diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_util.py new file mode 100644 index 0000000000000000000000000000000000000000..ae601ff8781ce639c492a51da7a4a0e373616ce2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_util.py @@ -0,0 +1,627 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Internal utilities for `LinearOperator` classes.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.module import module +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import variables as variables_module +from tensorflow.python.util import nest + + +################################################################################ +# To make more friendly for TF2. +################################################################################ + + +def convert_nonref_to_tensor(value, dtype=None, dtype_hint=None, name=None): + """Converts the given `value` to a `Tensor` if input is nonreference type. + + This function converts Python objects of various types to `Tensor` objects + except if the input has nonreference semantics. Reference semantics are + characterized by `is_ref` and is any object which is a + `tf.Variable` or instance of `tf.Module`. This function accepts any input + which `tf.convert_to_tensor` would also. + + Note: This function diverges from default Numpy behavior for `float` and + `string` types when `None` is present in a Python list or scalar. Rather + than silently converting `None` values, an error will be thrown. + + Args: + value: An object whose type has a registered `Tensor` conversion function. + dtype: Optional element type for the returned tensor. If missing, the + type is inferred from the type of `value`. + dtype_hint: Optional element type for the returned tensor, + used when dtype is None. In some cases, a caller may not have a + dtype in mind when converting to a tensor, so dtype_hint + can be used as a soft preference. If the conversion to + `dtype_hint` is not possible, this argument has no effect. + name: Optional name to use if a new `Tensor` is created. + + Returns: + tensor: A `Tensor` based on `value`. + + Raises: + TypeError: If no conversion function is registered for `value` to `dtype`. + RuntimeError: If a registered conversion function returns an invalid value. + ValueError: If the `value` is a tensor not of given `dtype` in graph mode. + + + #### Examples: + + ```python + + x = tf.Variable(0.) + y = convert_nonref_to_tensor(x) + x is y + # ==> True + + x = tf.constant(0.) + y = convert_nonref_to_tensor(x) + x is y + # ==> True + + x = np.array(0.) + y = convert_nonref_to_tensor(x) + x is y + # ==> False + tf.is_tensor(y) + # ==> True + + x = tfp.util.DeferredTensor(13.37, lambda x: x) + y = convert_nonref_to_tensor(x) + x is y + # ==> True + tf.is_tensor(y) + # ==> False + tf.equal(y, 13.37) + # ==> True + ``` + + """ + # We explicitly do not use a tf.name_scope to avoid graph clutter. + if value is None: + return None + if is_ref(value): + if dtype is None: + return value + dtype_base = base_dtype(dtype) + value_dtype_base = base_dtype(value.dtype) + if dtype_base != value_dtype_base: + raise TypeError( + f"Argument `value` must be of dtype `{dtype_name(dtype_base)}` " + f"Received: `{dtype_name(value_dtype_base)}`.") + return value + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + value, dtype=dtype, dtype_hint=dtype_hint, name=name + ) + + +def base_dtype(dtype): + """Returns a non-reference `dtype` based on this `dtype`.""" + dtype = dtypes.as_dtype(dtype) + if hasattr(dtype, "base_dtype"): + return dtype.base_dtype + return dtype + + +def dtype_name(dtype): + """Returns the string name for this `dtype`.""" + dtype = dtypes.as_dtype(dtype) + if hasattr(dtype, "name"): + return dtype.name + if hasattr(dtype, "__name__"): + return dtype.__name__ + return str(dtype) + + +def check_dtype(arg, dtype): + """Check that arg.dtype == self.dtype.""" + if arg.dtype.base_dtype != dtype: + raise TypeError( + f"Expected argument to have dtype {dtype}. Found: {arg.dtype} in " + f"tensor {arg}.") + + +def is_ref(x): + """Evaluates if the object has reference semantics. + + An object is deemed "reference" if it is a `tf.Variable` instance or is + derived from a `tf.Module` with `dtype` and `shape` properties. + + Args: + x: Any object. + + Returns: + is_ref: Python `bool` indicating input is has nonreference semantics, i.e., + is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties. + """ + return ( + # Note: we check that tf.Variable is a class because we might be using a + # different backend other than TF. + isinstance(x, variables_module.Variable) or + (isinstance(x, module.Module) and hasattr(x, "dtype") and + hasattr(x, "shape"))) + + +def assert_not_ref_type(x, arg_name): + if is_ref(x): + raise TypeError( + f"Argument {arg_name} cannot be reference type. Found: {type(x)}.") + + +################################################################################ +# Asserts. +################################################################################ + + +def assert_no_entries_with_modulus_zero( + x, message=None, name="assert_no_entries_with_modulus_zero"): + """Returns `Op` that asserts Tensor `x` has no entries with modulus zero. + + Args: + x: Numeric `Tensor`, real, integer, or complex. + message: A string message to prepend to failure message. + name: A name to give this `Op`. + + Returns: + An `Op` that asserts `x` has no entries with modulus zero. + """ + with ops.name_scope(name, values=[x]): + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + dtype = x.dtype.base_dtype + should_be_nonzero = math_ops.abs(x) + zero = tensor_conversion.convert_to_tensor_v2_with_dispatch( + 0, dtype=dtype.real_dtype + ) + return check_ops.assert_less(zero, should_be_nonzero, message=message) + + +def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"): + """Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts. + + Args: + x: Numeric `Tensor`, real, integer, or complex. + message: A string message to prepend to failure message. + name: A name to give this `Op`. + + Returns: + An `Op` that asserts `x` has no entries with modulus zero. + """ + with ops.name_scope(name, values=[x]): + x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name="x") + dtype = x.dtype.base_dtype + + if dtype.is_floating: + return control_flow_ops.no_op() + + zero = tensor_conversion.convert_to_tensor_v2_with_dispatch( + 0, dtype=dtype.real_dtype + ) + return check_ops.assert_equal(zero, math_ops.imag(x), message=message) + + +def assert_compatible_matrix_dimensions(operator, x): + """Assert that an argument to solve/matmul has proper domain dimension. + + If `operator.shape[-2:] = [M, N]`, and `x.shape[-2:] = [Q, R]`, then + `operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an + `Assert` that "fires" if this is not the case. Static checks are already + done by the base class `LinearOperator`. + + Args: + operator: `LinearOperator`. + x: `Tensor`. + + Returns: + `Assert` `Op`. + """ + # Static checks are done in the base class. Only tensor asserts here. + assert_same_dd = check_ops.assert_equal( + array_ops.shape(x)[-2], + operator.domain_dimension_tensor(), + # This error message made to look similar to error raised by static check + # in the base class. + message=("Dimensions are not compatible. " + "shape[-2] of argument to be the same as this operator")) + + return assert_same_dd + + +def assert_is_batch_matrix(tensor): + """Static assert that `tensor` has rank `2` or higher.""" + sh = tensor.shape + if sh.ndims is not None and sh.ndims < 2: + raise ValueError( + f"Expected [batch] matrix to have at least two dimensions. Found: " + f"{tensor}.") + + +def shape_tensor(shape, name=None): + """Convert Tensor using default type, unless empty list or tuple.""" + # Works just like random_ops._ShapeTensor. + if isinstance(shape, (tuple, list)) and not shape: + dtype = dtypes.int32 + else: + dtype = None + return tensor_conversion.convert_to_tensor_v2_with_dispatch( + shape, dtype=dtype, name=name + ) + + +################################################################################ +# Broadcasting versions of common linear algebra functions. +# TODO(b/77519145) Do this more efficiently in some special cases. +################################################################################ + + +def broadcast_matrix_batch_dims(batch_matrices, name=None): + """Broadcast leading dimensions of zero or more [batch] matrices. + + Example broadcasting one batch dim of two simple matrices. + + ```python + x = [[1, 2], + [3, 4]] # Shape [2, 2], no batch dims + + y = [[[1]]] # Shape [1, 1, 1], 1 batch dim of shape [1] + + x_bc, y_bc = broadcast_matrix_batch_dims([x, y]) + + x_bc + ==> [[[1, 2], + [3, 4]]] # Shape [1, 2, 2], 1 batch dim of shape [1]. + + y_bc + ==> same as y + ``` + + Example broadcasting many batch dims + + ```python + x = tf.random.normal(shape=(2, 3, 1, 4, 4)) + y = tf.random.normal(shape=(1, 3, 2, 5, 5)) + x_bc, y_bc = broadcast_matrix_batch_dims([x, y]) + + x_bc.shape + ==> (2, 3, 2, 4, 4) + + y_bc.shape + ==> (2, 3, 2, 5, 5) + ``` + + Args: + batch_matrices: Iterable of `Tensor`s, each having two or more dimensions. + name: A string name to prepend to created ops. + + Returns: + bcast_matrices: List of `Tensor`s, with `bcast_matrices[i]` containing + the values from `batch_matrices[i]`, with possibly broadcast batch dims. + + Raises: + ValueError: If any input `Tensor` is statically determined to have less + than two dimensions. + """ + with ops.name_scope( + name or "broadcast_matrix_batch_dims", values=batch_matrices): + check_ops.assert_proper_iterable(batch_matrices) + batch_matrices = list(batch_matrices) + + for i, mat in enumerate(batch_matrices): + batch_matrices[i] = tensor_conversion.convert_to_tensor_v2_with_dispatch( + mat + ) + assert_is_batch_matrix(batch_matrices[i]) + + if len(batch_matrices) < 2: + return batch_matrices + + # Try static broadcasting. + # bcast_batch_shape is the broadcast batch shape of ALL matrices. + # E.g. if batch_matrices = [x, y], with + # x.shape = [2, j, k] (batch shape = [2]) + # y.shape = [3, 1, l, m] (batch shape = [3, 1]) + # ==> bcast_batch_shape = [3, 2] + bcast_batch_shape = batch_matrices[0].shape[:-2] + for mat in batch_matrices[1:]: + bcast_batch_shape = array_ops.broadcast_static_shape( + bcast_batch_shape, + mat.shape[:-2]) + if bcast_batch_shape.is_fully_defined(): + for i, mat in enumerate(batch_matrices): + if mat.shape[:-2] != bcast_batch_shape: + bcast_shape = array_ops.concat( + [bcast_batch_shape.as_list(), array_ops.shape(mat)[-2:]], axis=0) + batch_matrices[i] = array_ops.broadcast_to(mat, bcast_shape) + return batch_matrices + + # Since static didn't work, do dynamic, which always copies data. + bcast_batch_shape = array_ops.shape(batch_matrices[0])[:-2] + for mat in batch_matrices[1:]: + bcast_batch_shape = array_ops.broadcast_dynamic_shape( + bcast_batch_shape, + array_ops.shape(mat)[:-2]) + for i, mat in enumerate(batch_matrices): + batch_matrices[i] = array_ops.broadcast_to( + mat, + array_ops.concat( + [bcast_batch_shape, array_ops.shape(mat)[-2:]], axis=0)) + + return batch_matrices + + +def matrix_solve_with_broadcast(matrix, rhs, adjoint=False, name=None): + """Solve systems of linear equations.""" + with ops.name_scope(name, "MatrixSolveWithBroadcast", [matrix, rhs]): + matrix = tensor_conversion.convert_to_tensor_v2_with_dispatch( + matrix, name="matrix" + ) + rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch( + rhs, name="rhs", dtype=matrix.dtype + ) + + # If either matrix/rhs has extra dims, we can reshape to get rid of them. + matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency( + matrix, rhs, adjoint_a=adjoint) + + # This will broadcast by brute force if we still need to. + matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs]) + + solution = linalg_ops.matrix_solve( + matrix, rhs, adjoint=adjoint and still_need_to_transpose) + + return reshape_inv(solution) + + +def _reshape_for_efficiency(a, + b, + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False): + """Maybe reshape a, b, and return an inverse map. For matmul/solve.""" + def identity(x): + return x + + # At this point, we have not taken transpose/adjoint of a/b. + still_need_to_transpose = True + + if a.shape.ndims is None or b.shape.ndims is None: + return a, b, identity, still_need_to_transpose + + # This could be handled in the future, but seems less common. + if a.shape.ndims >= b.shape.ndims: + return a, b, identity, still_need_to_transpose + + # From now on, we might modify b, but will not modify a. + + # Suppose: + # a.shape = C + [m, n], b.shape = + # b.shape = S + C + [n, r] + b_extra_ndims = b.shape.ndims - a.shape.ndims + + # b_extra_sh = S, b_main_sh = C + [n, r] + b_extra_sh = array_ops.shape(b)[:b_extra_ndims] + b_main_sh = array_ops.shape(b)[b_extra_ndims:] + + # No reason to flip unless the extra dims of b are big enough. Why? + # Assume adjoint/transpose = False. Then... + # By not flipping, we have to replicate a to shape + # b_extra_sh + a.shape, + # which could use extra memory. But in all cases, the final output has shape + # b_extra_sh + a.shape[:-1] + [b.shape[-1]] + # So we only end up creating a larger object if the end dim of b is smaller + # than the end dim of a. This often happens, e.g. if b was a vector that was + # expanded to a matrix (by appending a singleton). + + # Since adjoint/transpose may not be False, we must make adjustments here. + # The dim of b that holds the multiple equations. + a_domain_sz_ = a.shape[-2 if adjoint_a or transpose_a else -1] + b_eq_sz_ = b.shape[-2 if adjoint_b or transpose_b else -1] + b_extra_sz_ = ( + np.prod(b.shape[:b_extra_ndims].as_list()) + if b.shape[:b_extra_ndims].is_fully_defined() else None) + if (a_domain_sz_ is not None and b_eq_sz_ is not None and + b_extra_sz_ is not None): + if b_extra_sz_ < 2 or a_domain_sz_ <= b_eq_sz_: + return a, b, identity, still_need_to_transpose + + # At this point, we're flipping for sure! + # Any transposes/adjoints will happen here explicitly, rather than in calling + # code. Why? To avoid having to write separate complex code for each case. + if adjoint_a: + a = array_ops.matrix_transpose(a, conjugate=True) + elif transpose_a: + a = array_ops.matrix_transpose(a, conjugate=False) + if adjoint_b: + b = array_ops.matrix_transpose(b, conjugate=True) + elif transpose_a: + b = array_ops.matrix_transpose(b, conjugate=False) + still_need_to_transpose = False + + # Recompute shapes, since the transpose/adjoint may have changed them. + b_extra_sh = array_ops.shape(b)[:b_extra_ndims] + b_main_sh = array_ops.shape(b)[b_extra_ndims:] + + # Permutation to put the extra dims at the end. + perm = ( + np.concatenate( + (np.arange(b_extra_ndims, b.shape.ndims), + np.arange(0, b_extra_ndims)), 0)) + b_extra_on_end = array_ops.transpose(b, perm=perm) + + # Now squash this end into one long dim. + b_squashed_end = array_ops.reshape( + b_extra_on_end, array_ops.concat((b_main_sh[:-1], [-1]), 0)) + + def reshape_inv(y): + # Expand the extra dims hanging off the end, "b_extra_sh". + # Note we use y_sh[:-1] + [b_main_sh[-1]] rather than b_main_sh, because y + # Could have different batch dims than a and b, because of broadcasting. + y_extra_shape = array_ops.concat( + (array_ops.shape(y)[:-1], [b_main_sh[-1]], b_extra_sh), 0) + y_extra_on_end = array_ops.reshape(y, y_extra_shape) + inverse_perm = np.argsort(perm) + return array_ops.transpose(y_extra_on_end, perm=inverse_perm) + + return a, b_squashed_end, reshape_inv, still_need_to_transpose + + +################################################################################ +# Helpers for hints. +################################################################################ + + +def is_adjoint_pair(x, y): + """True iff x and y are adjoints of each other (by id, not entries).""" + if x is y: # Note that if x is y then all of their hints are the same! + if x.is_self_adjoint is False: # pylint:disable=g-bool-id-comparison + return False + if x.is_self_adjoint: + return True + # Use the fact that if x = LinearOperatorAdjoint(y), then x.H is y. + return x.H is y or y.H is x + + +def is_aat_form(operators): + """Returns True if operators is of the form A @ A.H, possibly recursively.""" + operators = list(operators) + if not operators: + raise ValueError("AAT form is undefined for empty operators") + + if len(operators) % 2: + return False + + # Check for forms like (A1 @ A2) @ (A2.H @ A1.H) + return all( + is_adjoint_pair(operators[i], operators[-1 - i]) + for i in range(len(operators) // 2)) + + +def use_operator_or_provided_hint_unless_contradicting( + operator, hint_attr_name, provided_hint_value, message): + """Get combined hint in the case where operator.hint should equal hint. + + Args: + operator: LinearOperator that a meta-operator was initialized with. + hint_attr_name: String name for the attribute. + provided_hint_value: Bool or None. Value passed by user in initialization. + message: Error message to print if hints contradict. + + Returns: + True, False, or None. + + Raises: + ValueError: If hints contradict. + """ + op_hint = getattr(operator, hint_attr_name) + # pylint: disable=g-bool-id-comparison + if op_hint is False and provided_hint_value: + raise ValueError(message) + if op_hint and provided_hint_value is False: + raise ValueError(message) + if op_hint or provided_hint_value: + return True + if op_hint is False or provided_hint_value is False: + return False + # pylint: enable=g-bool-id-comparison + return None + + +################################################################################ +# Utilities for blockwise operators. +################################################################################ + + +def arg_is_blockwise(block_dimensions, arg, arg_split_dim): + """Detect if input should be interpreted as a list of blocks.""" + # Tuples and lists of length equal to the number of operators may be + # blockwise. + if (isinstance(arg, (tuple, list)) and len(arg) == len(block_dimensions)): + # If the elements of the iterable are not nested, interpret the input as + # blockwise. + if not any(nest.is_nested(x) for x in arg): + return True + else: + arg_dims = [ + tensor_conversion.convert_to_tensor_v2_with_dispatch(x).shape[ + arg_split_dim + ] + for x in arg + ] + self_dims = [dim.value for dim in block_dimensions] + + # If none of the operator dimensions are known, interpret the input as + # blockwise if its matching dimensions are unequal. + if all(self_d is None for self_d in self_dims): + + # A nested tuple/list with a single outermost element is not blockwise + if len(arg_dims) == 1: + return False + elif any(dim != arg_dims[0] for dim in arg_dims): + return True + else: + raise ValueError( + "Parsing of the input structure is ambiguous. Please input " + "a blockwise iterable of `Tensor`s or a single `Tensor`.") + + # If input dimensions equal the respective (known) blockwise operator + # dimensions, then the input is blockwise. + if all(self_d == arg_d or self_d is None + for self_d, arg_d in zip(self_dims, arg_dims)): + return True + + # If input dimensions equals are all equal, and are greater than or equal + # to the sum of the known operator dimensions, interpret the input as + # blockwise. + # input is not blockwise. + self_dim = sum(self_d for self_d in self_dims if self_d is not None) + if all(s == arg_dims[0] for s in arg_dims) and arg_dims[0] >= self_dim: + return False + + # If none of these conditions is met, the input shape is mismatched. + raise ValueError("Input dimension does not match operator dimension.") + else: + return False + + +def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1): + """Split `x` into blocks matching `operators`'s `domain_dimension`. + + Specifically, if we have a blockwise lower-triangular matrix, with block + sizes along the diagonal `[M_j, M_j] j = 0,1,2..J`, this method splits `arg` + on `axis` into `J` tensors, whose shape at `axis` is `M_j`. + + Args: + block_dims: Iterable of `TensorShapes`. + block_dims_fn: Callable returning an iterable of `Tensor`s. + arg: `Tensor`. `arg` is split into `J` tensors. + axis: Python `Integer` representing the axis to split `arg` on. + + Returns: + A list of `Tensor`s. + """ + block_sizes = [dim.value for dim in block_dims] + if any(d is None for d in block_sizes): + block_sizes = block_dims_fn() + return array_ops.split(arg, block_sizes, axis=axis) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_zeros.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_zeros.py new file mode 100644 index 0000000000000000000000000000000000000000..8adee6efe904b6ccac60d8edf9e72729c5061e65 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/linear_operator_zeros.py @@ -0,0 +1,500 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`LinearOperator` acting like a zero matrix.""" + +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.ops.linalg import linear_operator +from tensorflow.python.ops.linalg import linear_operator_util +from tensorflow.python.util.tf_export import tf_export + +__all__ = [ + "LinearOperatorZeros", +] + + +@tf_export("linalg.LinearOperatorZeros") +@linear_operator.make_composite_tensor +class LinearOperatorZeros(linear_operator.LinearOperator): + """`LinearOperator` acting like a [batch] zero matrix. + + This operator acts like a [batch] zero matrix `A` with shape + `[B1,...,Bb, N, M]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is + an `N x M` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + `LinearOperatorZeros` is initialized with `num_rows`, and optionally + `num_columns, `batch_shape`, and `dtype` arguments. If `num_columns` is + `None`, then this operator will be initialized as a square matrix. If + `batch_shape` is `None`, this operator efficiently passes through all + arguments. If `batch_shape` is provided, broadcasting may occur, which will + require making copies. + + ```python + # Create a 2 x 2 zero matrix. + operator = LinearOperatorZero(num_rows=2, dtype=tf.float32) + + operator.to_dense() + ==> [[0., 0.] + [0., 0.]] + + operator.shape + ==> [2, 2] + + operator.determinant() + ==> 0. + + x = ... Shape [2, 4] Tensor + operator.matmul(x) + ==> Shape [2, 4] Tensor, same as x. + + # Create a 2-batch of 2x2 zero matrices + operator = LinearOperatorZeros(num_rows=2, batch_shape=[2]) + operator.to_dense() + ==> [[[0., 0.] + [0., 0.]], + [[0., 0.] + [0., 0.]]] + + # Here, even though the operator has a batch shape, the input is the same as + # the output, so x can be passed through without a copy. The operator is able + # to detect that no broadcast is necessary because both x and the operator + # have statically defined shape. + x = ... Shape [2, 2, 3] + operator.matmul(x) + ==> Shape [2, 2, 3] Tensor, same as tf.zeros_like(x) + + # Here the operator and x have different batch_shape, and are broadcast. + # This requires a copy, since the output is different size than the input. + x = ... Shape [1, 2, 3] + operator.matmul(x) + ==> Shape [2, 2, 3] Tensor, equal to tf.zeros_like([x, x]) + ``` + + ### Shape compatibility + + This operator acts on [batch] matrix with compatible shape. + `x` is a batch matrix with compatible shape for `matmul` and `solve` if + + ``` + operator.shape = [B1,...,Bb] + [N, M], with b >= 0 + x.shape = [C1,...,Cc] + [M, R], + and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] + ``` + + #### Matrix property hints + + This `LinearOperator` is initialized with boolean flags of the form `is_X`, + for `X = non_singular, self_adjoint, positive_definite, square`. + These have the following meaning: + + * If `is_X == True`, callers should expect the operator to have the + property `X`. This is a promise that should be fulfilled, but is *not* a + runtime assert. For example, finite floating point precision may result + in these promises being violated. + * If `is_X == False`, callers should expect the operator to not have `X`. + * If `is_X == None` (the default), callers should have no expectation either + way. + """ + + def __init__(self, + num_rows, + num_columns=None, + batch_shape=None, + dtype=None, + is_non_singular=False, + is_self_adjoint=True, + is_positive_definite=False, + is_square=True, + assert_proper_shapes=False, + name="LinearOperatorZeros"): + r"""Initialize a `LinearOperatorZeros`. + + The `LinearOperatorZeros` is initialized with arguments defining `dtype` + and shape. + + This operator is able to broadcast the leading (batch) dimensions, which + sometimes requires copying data. If `batch_shape` is `None`, the operator + can take arguments of any batch shape without copying. See examples. + + Args: + num_rows: Scalar non-negative integer `Tensor`. Number of rows in the + corresponding zero matrix. + num_columns: Scalar non-negative integer `Tensor`. Number of columns in + the corresponding zero matrix. If `None`, defaults to the value of + `num_rows`. + batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading + dimensions. If `None`, this operator has no leading dimensions. + dtype: Data type of the matrix that this operator represents. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form `x^H A x` has positive real part for all + nonzero `x`. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + assert_proper_shapes: Python `bool`. If `False`, only perform static + checks that initialization and method arguments have proper shape. + If `True`, and static checks are inconclusive, add asserts to the graph. + name: A name for this `LinearOperator` + + Raises: + ValueError: If `num_rows` is determined statically to be non-scalar, or + negative. + ValueError: If `num_columns` is determined statically to be non-scalar, + or negative. + ValueError: If `batch_shape` is determined statically to not be 1-D, or + negative. + ValueError: If any of the following is not `True`: + `{is_self_adjoint, is_non_singular, is_positive_definite}`. + """ + parameters = dict( + num_rows=num_rows, + num_columns=num_columns, + batch_shape=batch_shape, + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + assert_proper_shapes=assert_proper_shapes, + name=name + ) + + dtype = dtype or dtypes.float32 + self._assert_proper_shapes = assert_proper_shapes + + with ops.name_scope(name): + dtype = dtypes.as_dtype(dtype) + if not is_self_adjoint and is_square: + raise ValueError("A zero operator is always self adjoint.") + if is_non_singular: + raise ValueError("A zero operator is always singular.") + if is_positive_definite: + raise ValueError("A zero operator is always not positive-definite.") + + super(LinearOperatorZeros, self).__init__( + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters, + name=name) + + linear_operator_util.assert_not_ref_type(num_rows, "num_rows") + linear_operator_util.assert_not_ref_type(num_columns, "num_columns") + linear_operator_util.assert_not_ref_type(batch_shape, "batch_shape") + + self._num_rows = linear_operator_util.shape_tensor( + num_rows, name="num_rows") + self._num_rows_static = tensor_util.constant_value(self._num_rows) + + if num_columns is None: + num_columns = num_rows + + self._num_columns = linear_operator_util.shape_tensor( + num_columns, name="num_columns") + self._num_columns_static = tensor_util.constant_value(self._num_columns) + + self._check_domain_range_possibly_add_asserts() + + if (self._num_rows_static is not None and + self._num_columns_static is not None): + if is_square and self._num_rows_static != self._num_columns_static: + raise ValueError( + "LinearOperatorZeros initialized as is_square=True, but got " + "num_rows({}) != num_columns({})".format( + self._num_rows_static, + self._num_columns_static)) + + if batch_shape is None: + self._batch_shape_arg = None + else: + self._batch_shape_arg = linear_operator_util.shape_tensor( + batch_shape, name="batch_shape_arg") + self._batch_shape_static = tensor_util.constant_value( + self._batch_shape_arg) + self._check_batch_shape_possibly_add_asserts() + + def _shape(self): + matrix_shape = tensor_shape.TensorShape((self._num_rows_static, + self._num_columns_static)) + if self._batch_shape_arg is None: + return matrix_shape + + batch_shape = tensor_shape.TensorShape(self._batch_shape_static) + return batch_shape.concatenate(matrix_shape) + + def _shape_tensor(self): + matrix_shape = array_ops_stack.stack( + (self._num_rows, self._num_columns), axis=0) + if self._batch_shape_arg is None: + return matrix_shape + + return array_ops.concat((self._batch_shape_arg, matrix_shape), 0) + + def _assert_non_singular(self): + raise errors.InvalidArgumentError( + node_def=None, op=None, message="Zero operators are always " + "non-invertible.") + + def _assert_positive_definite(self): + raise errors.InvalidArgumentError( + node_def=None, op=None, message="Zero operators are always " + "non-positive definite.") + + def _assert_self_adjoint(self): + return control_flow_ops.no_op("assert_self_adjoint") + + def _possibly_broadcast_batch_shape(self, x): + """Return 'x', possibly after broadcasting the leading dimensions.""" + # If we have no batch shape, our batch shape broadcasts with everything! + if self._batch_shape_arg is None: + return x + + # Static attempt: + # If we determine that no broadcast is necessary, pass x through + # If we need a broadcast, add to an array of zeros. + # + # special_shape is the shape that, when broadcast with x's shape, will give + # the correct broadcast_shape. Note that + # We have already verified the second to last dimension of self.shape + # matches x's shape in assert_compatible_matrix_dimensions. + # Also, the final dimension of 'x' can have any shape. + # Therefore, the final two dimensions of special_shape are 1's. + special_shape = self.batch_shape.concatenate([1, 1]) + bshape = array_ops.broadcast_static_shape(x.shape, special_shape) + if special_shape.is_fully_defined(): + # bshape.is_fully_defined iff special_shape.is_fully_defined. + if bshape == x.shape: + return x + # Use the built in broadcasting of addition. + zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) + return x + zeros + + # Dynamic broadcast: + # Always add to an array of zeros, rather than using a "cond", since a + # cond would require copying data from GPU --> CPU. + special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0) + zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) + return x + zeros + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + if self._assert_proper_shapes: + x = linalg.adjoint(x) if adjoint_arg else x + aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x) + x = control_flow_ops.with_dependencies([aps], x) + if self.is_square: + # Note that adjoint has no effect since this matrix is self-adjoint. + if adjoint_arg: + output_shape = array_ops.concat([ + array_ops.shape(x)[:-2], + [array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0) + else: + output_shape = array_ops.shape(x) + + return self._possibly_broadcast_batch_shape( + array_ops.zeros(shape=output_shape, dtype=x.dtype)) + + x_shape = array_ops.shape(x) + n = self._num_columns if adjoint else self._num_rows + m = x_shape[-2] if adjoint_arg else x_shape[-1] + + output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0) + + zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype) + return self._possibly_broadcast_batch_shape(zeros) + + def _linop_matmul( + self, + left_operator: "LinearOperatorZeros", + right_operator: linear_operator.LinearOperator + ) -> linear_operator.LinearOperator: + if not left_operator.is_square or not right_operator.is_square: + raise ValueError("Matmul with non-square `LinearOperator`s or non-square " + "`LinearOperatorZeros` not supported at this time.") + return left_operator + + def _determinant(self): + if self.batch_shape.is_fully_defined(): + return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype) + else: + return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) + + def _trace(self): + # Get Tensor of all zeros of same shape as self.batch_shape. + if self.batch_shape.is_fully_defined(): + return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype) + else: + return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) + + def _diag_part(self): + return self._zeros_diag() + + def add_to_tensor(self, mat, name="add_to_tensor"): + """Add matrix represented by this operator to `mat`. Equiv to `I + mat`. + + Args: + mat: `Tensor` with same `dtype` and shape broadcastable to `self`. + name: A name to give this `Op`. + + Returns: + A `Tensor` with broadcast shape and same `dtype` as `self`. + """ + return self._possibly_broadcast_batch_shape(mat) + + def _check_domain_range_possibly_add_asserts(self): + """Static check of init arg `num_rows`, possibly add asserts.""" + # Possibly add asserts. + if self._assert_proper_shapes: + self._num_rows = control_flow_ops.with_dependencies([ + check_ops.assert_rank( + self._num_rows, + 0, + message="Argument num_rows must be a 0-D Tensor."), + check_ops.assert_non_negative( + self._num_rows, + message="Argument num_rows must be non-negative."), + ], self._num_rows) + self._num_columns = control_flow_ops.with_dependencies([ + check_ops.assert_rank( + self._num_columns, + 0, + message="Argument num_columns must be a 0-D Tensor."), + check_ops.assert_non_negative( + self._num_columns, + message="Argument num_columns must be non-negative."), + ], self._num_columns) + + # Static checks. + if not self._num_rows.dtype.is_integer: + raise TypeError("Argument num_rows must be integer type. Found:" + " %s" % self._num_rows) + + if not self._num_columns.dtype.is_integer: + raise TypeError("Argument num_columns must be integer type. Found:" + " %s" % self._num_columns) + + num_rows_static = self._num_rows_static + num_columns_static = self._num_columns_static + + if num_rows_static is not None: + if num_rows_static.ndim != 0: + raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" + " %s" % num_rows_static) + + if num_rows_static < 0: + raise ValueError("Argument num_rows must be non-negative. Found:" + " %s" % num_rows_static) + if num_columns_static is not None: + if num_columns_static.ndim != 0: + raise ValueError("Argument num_columns must be a 0-D Tensor. Found:" + " %s" % num_columns_static) + + if num_columns_static < 0: + raise ValueError("Argument num_columns must be non-negative. Found:" + " %s" % num_columns_static) + + def _check_batch_shape_possibly_add_asserts(self): + """Static check of init arg `batch_shape`, possibly add asserts.""" + if self._batch_shape_arg is None: + return + + # Possibly add asserts + if self._assert_proper_shapes: + self._batch_shape_arg = control_flow_ops.with_dependencies([ + check_ops.assert_rank( + self._batch_shape_arg, + 1, + message="Argument batch_shape must be a 1-D Tensor."), + check_ops.assert_non_negative( + self._batch_shape_arg, + message="Argument batch_shape must be non-negative."), + ], self._batch_shape_arg) + + # Static checks + if not self._batch_shape_arg.dtype.is_integer: + raise TypeError("Argument batch_shape must be integer type. Found:" + " %s" % self._batch_shape_arg) + + if self._batch_shape_static is None: + return # Cannot do any other static checks. + + if self._batch_shape_static.ndim != 1: + raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:" + " %s" % self._batch_shape_static) + + if np.any(self._batch_shape_static < 0): + raise ValueError("Argument batch_shape must be non-negative. Found:" + "%s" % self._batch_shape_static) + + def _min_matrix_dim(self): + """Minimum of domain/range dimension, if statically available, else None.""" + domain_dim = self.domain_dimension.value + range_dim = self.range_dimension.value + if domain_dim is None or range_dim is None: + return None + return min(domain_dim, range_dim) + + def _min_matrix_dim_tensor(self): + """Minimum of domain/range dimension, as a tensor.""" + return math_ops.reduce_min(self.shape_tensor()[-2:]) + + def _zeros_diag(self): + """Returns the diagonal of this operator as all zeros.""" + if self.shape.is_fully_defined(): + d_shape = self.batch_shape.concatenate([self._min_matrix_dim()]) + else: + d_shape = array_ops.concat( + [self.batch_shape_tensor(), + [self._min_matrix_dim_tensor()]], axis=0) + + return array_ops.zeros(shape=d_shape, dtype=self.dtype) + + def _eigvals(self): + return self._zeros_diag() + + @property + def _composite_tensor_prefer_static_fields(self): + return ("num_rows", "num_columns", "batch_shape") + + @property + def _composite_tensor_fields(self): + return ("num_rows", "num_columns", "batch_shape", "dtype", + "assert_proper_shapes") + + def __getitem__(self, slices): + # Slice the batch shape and return a new LinearOperatorIdentity. + # Use a proxy shape and slice it. Use this as the new batch shape + new_batch_shape = array_ops.shape( + array_ops.ones(self._batch_shape_arg)[slices]) + parameters = dict(self.parameters, batch_shape=new_batch_shape) + return LinearOperatorZeros(**parameters) + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/property_hint_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/property_hint_util.py new file mode 100644 index 0000000000000000000000000000000000000000..7967348fb48c0c951d4f7c54f5d76af64312c339 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/property_hint_util.py @@ -0,0 +1,87 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Common utilities for LinearOperator property hints.""" + + +# Note: only use this method in the commuting case. +def combined_commuting_self_adjoint_hint(operator_a, operator_b): + """Get combined hint for self-adjoint-ness.""" + + # The property is preserved under composition when the operators commute. + if operator_a.is_self_adjoint and operator_b.is_self_adjoint: + return True + + # The property is not preserved when an operator with the property is composed + # with an operator without the property. + + # pylint:disable=g-bool-id-comparison + if ((operator_a.is_self_adjoint is True and + operator_b.is_self_adjoint is False) or + (operator_a.is_self_adjoint is False and + operator_b.is_self_adjoint is True)): + return False + # pylint:enable=g-bool-id-comparison + + # The property is not known when operators are not known to have the property + # or both operators don't have the property (the property for the complement + # class is not closed under composition). + return None + + +def is_square(operator_a, operator_b): + """Return a hint to whether the composition is square.""" + if operator_a.is_square and operator_b.is_square: + return True + if operator_a.is_square is False and operator_b.is_square is False: # pylint:disable=g-bool-id-comparison + # Let A have shape [B, M, N], B have shape [B, N, L]. + m = operator_a.range_dimension + l = operator_b.domain_dimension + if m is not None and l is not None: + return m == l + + if (operator_a.is_square != operator_b.is_square) and ( + operator_a.is_square is not None and operator_b.is_square is not None): + return False + + return None + + +# Note: Positive definiteness is only guaranteed to be preserved +# when the operators commute and are symmetric. Only use this method in +# commuting cases. +def combined_commuting_positive_definite_hint(operator_a, operator_b): + """Get combined PD hint for compositions.""" + # pylint:disable=g-bool-id-comparison + if (operator_a.is_positive_definite is True and + operator_a.is_self_adjoint is True and + operator_b.is_positive_definite is True and + operator_b.is_self_adjoint is True): + return True + # pylint:enable=g-bool-id-comparison + + return None + + +def combined_non_singular_hint(operator_a, operator_b): + """Get combined hint for when .""" + # If either operator is not-invertible the composition isn't. + + # pylint:disable=g-bool-id-comparison + if (operator_a.is_non_singular is False or + operator_b.is_non_singular is False): + return False + # pylint:enable=g-bool-id-comparison + + return operator_a.is_non_singular and operator_b.is_non_singular diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/slicing.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..d099b420b1a18330afcd52dff3df0d9867c4b684 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/slicing.py @@ -0,0 +1,184 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for slicing in to a `LinearOperator`.""" + +import collections +import functools +import numpy as np + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.util import nest + + +__all__ = ['batch_slice'] + + +def _prefer_static_where(condition, x, y): + args = [condition, x, y] + constant_args = [tensor_util.constant_value(a) for a in args] + # Do this statically. + if all(arg is not None for arg in constant_args): + condition_, x_, y_ = constant_args + return np.where(condition_, x_, y_) + return array_ops.where(condition, x, y) + + +def _broadcast_parameter_with_batch_shape( + param, param_ndims_to_matrix_ndims, batch_shape): + """Broadcasts `param` with the given batch shape, recursively.""" + if hasattr(param, 'batch_shape_tensor'): + # Recursively broadcast every parameter inside the operator. + override_dict = {} + for name, ndims in param._experimental_parameter_ndims_to_matrix_ndims.items(): # pylint:disable=protected-access,line-too-long + sub_param = getattr(param, name) + override_dict[name] = nest.map_structure_up_to( + sub_param, functools.partial( + _broadcast_parameter_with_batch_shape, + batch_shape=batch_shape), sub_param, ndims) + parameters = dict(param.parameters, **override_dict) + return type(param)(**parameters) + + base_shape = array_ops.concat( + [batch_shape, array_ops.ones( + [param_ndims_to_matrix_ndims], dtype=dtypes.int32)], axis=0) + return array_ops.broadcast_to( + param, + array_ops.broadcast_dynamic_shape(base_shape, array_ops.shape(param))) + + +def _sanitize_slices(slices, intended_shape, deficient_shape): + """Restricts slices to avoid overflowing size-1 (broadcast) dimensions. + + Args: + slices: iterable of slices received by `__getitem__`. + intended_shape: int `Tensor` shape for which the slices were intended. + deficient_shape: int `Tensor` shape to which the slices will be applied. + Must have the same rank as `intended_shape`. + Returns: + sanitized_slices: Python `list` of slice objects. + """ + sanitized_slices = [] + idx = 0 + for slc in slices: + if slc is Ellipsis: # Switch over to negative indexing. + if idx < 0: + raise ValueError('Found multiple `...` in slices {}'.format(slices)) + num_remaining_non_newaxis_slices = sum( + s is not array_ops.newaxis for s in slices[ + slices.index(Ellipsis) + 1:]) + idx = -num_remaining_non_newaxis_slices + elif slc is array_ops.newaxis: + pass + else: + is_broadcast = intended_shape[idx] > deficient_shape[idx] + if isinstance(slc, slice): + # Slices are denoted by start:stop:step. + start, stop, step = slc.start, slc.stop, slc.step + if start is not None: + start = _prefer_static_where(is_broadcast, 0, start) + if stop is not None: + stop = _prefer_static_where(is_broadcast, 1, stop) + if step is not None: + step = _prefer_static_where(is_broadcast, 1, step) + slc = slice(start, stop, step) + else: # int, or int Tensor, e.g. d[d.batch_shape_tensor()[0] // 2] + slc = _prefer_static_where(is_broadcast, 0, slc) + idx += 1 + sanitized_slices.append(slc) + return sanitized_slices + + +def _slice_single_param( + param, param_ndims_to_matrix_ndims, slices, batch_shape): + """Slices into the batch shape of a single parameter. + + Args: + param: The original parameter to slice; either a `Tensor` or an object + with batch shape (LinearOperator). + param_ndims_to_matrix_ndims: `int` number of right-most dimensions used for + inferring matrix shape of the `LinearOperator`. For non-Tensor + parameters, this is the number of this param's batch dimensions used by + the matrix shape of the parent object. + slices: iterable of slices received by `__getitem__`. + batch_shape: The parameterized object's batch shape `Tensor`. + + Returns: + new_param: Instance of the same type as `param`, batch-sliced according to + `slices`. + """ + # Broadcast the parammeter to have full batch rank. + param = _broadcast_parameter_with_batch_shape( + param, param_ndims_to_matrix_ndims, array_ops.ones_like(batch_shape)) + + if hasattr(param, 'batch_shape_tensor'): + param_batch_shape = param.batch_shape_tensor() + else: + param_batch_shape = array_ops.shape(param) + # Truncate by param_ndims_to_matrix_ndims + param_batch_rank = array_ops.size(param_batch_shape) + param_batch_shape = param_batch_shape[ + :(param_batch_rank - param_ndims_to_matrix_ndims)] + + # At this point the param should have full batch rank, *unless* it's an + # atomic object like `tfb.Identity()` incapable of having any batch rank. + if (tensor_util.constant_value(array_ops.size(batch_shape)) != 0 and + tensor_util.constant_value(array_ops.size(param_batch_shape)) == 0): + return param + param_slices = _sanitize_slices( + slices, intended_shape=batch_shape, deficient_shape=param_batch_shape) + + # Extend `param_slices` (which represents slicing into the + # parameter's batch shape) with the parameter's event ndims. For example, if + # `params_ndims == 1`, then `[i, ..., j]` would become `[i, ..., j, :]`. + if param_ndims_to_matrix_ndims > 0: + if Ellipsis not in [ + slc for slc in slices if not tensor_util.is_tensor(slc)]: + param_slices.append(Ellipsis) + param_slices += [slice(None)] * param_ndims_to_matrix_ndims + return param.__getitem__(tuple(param_slices)) + + +def batch_slice(linop, params_overrides, slices): + """Slices `linop` along its batch dimensions. + + Args: + linop: A `LinearOperator` instance. + params_overrides: A `dict` of parameter overrides. + slices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple` + thereof. (e.g. the argument of a `__getitem__` method). + + Returns: + new_linop: A batch-sliced `LinearOperator`. + """ + if not isinstance(slices, collections.abc.Sequence): + slices = (slices,) + if len(slices) == 1 and slices[0] is Ellipsis: + override_dict = {} + else: + batch_shape = linop.batch_shape_tensor() + override_dict = {} + for param_name, param_ndims_to_matrix_ndims in linop._experimental_parameter_ndims_to_matrix_ndims.items(): # pylint:disable=protected-access,line-too-long + param = getattr(linop, param_name) + # These represent optional `Tensor` parameters. + if param is not None: + override_dict[param_name] = nest.map_structure_up_to( + param, functools.partial( + _slice_single_param, slices=slices, batch_shape=batch_shape), + param, param_ndims_to_matrix_ndims) + override_dict.update(params_overrides) + parameters = dict(linop.parameters, **override_dict) + return type(linop)(**parameters) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/conjugate_gradient.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/conjugate_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..08f764797dd146e0404b0440a4d744614b60af9e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/conjugate_gradient.py @@ -0,0 +1,138 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Preconditioned Conjugate Gradient.""" + +import collections + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import while_loop +from tensorflow.python.ops.linalg import linalg_impl as linalg +from tensorflow.python.util import dispatch +from tensorflow.python.util.tf_export import tf_export + + +@tf_export('linalg.experimental.conjugate_gradient') +@dispatch.add_dispatch_support +def conjugate_gradient(operator, + rhs, + preconditioner=None, + x=None, + tol=1e-5, + max_iter=20, + name='conjugate_gradient'): + r"""Conjugate gradient solver. + + Solves a linear system of equations `A*x = rhs` for self-adjoint, positive + definite matrix `A` and right-hand side vector `rhs`, using an iterative, + matrix-free algorithm where the action of the matrix A is represented by + `operator`. The iteration terminates when either the number of iterations + exceeds `max_iter` or when the residual norm has been reduced to `tol` + times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\). + + Args: + operator: A `LinearOperator` that is self-adjoint and positive definite. + rhs: A possibly batched vector of shape `[..., N]` containing the right-hand + size vector. + preconditioner: A `LinearOperator` that approximates the inverse of `A`. + An efficient preconditioner could dramatically improve the rate of + convergence. If `preconditioner` represents matrix `M`(`M` approximates + `A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate + `A^{-1}x`. For this to be useful, the cost of applying `M` should be + much lower than computing `A^{-1}` directly. + x: A possibly batched vector of shape `[..., N]` containing the initial + guess for the solution. + tol: A float scalar convergence tolerance. + max_iter: An integer giving the maximum number of iterations. + name: A name scope for the operation. + + Returns: + output: A namedtuple representing the final state with fields: + - i: A scalar `int32` `Tensor`. Number of iterations executed. + - x: A rank-1 `Tensor` of shape `[..., N]` containing the computed + solution. + - r: A rank-1 `Tensor` of shape `[.., M]` containing the residual vector. + - p: A rank-1 `Tensor` of shape `[..., N]`. `A`-conjugate basis vector. + - gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when + `preconditioner=None`. + """ + if not (operator.is_self_adjoint and operator.is_positive_definite): + raise ValueError('Expected a self-adjoint, positive definite operator.') + + cg_state = collections.namedtuple('CGState', ['i', 'x', 'r', 'p', 'gamma']) + + def stopping_criterion(i, state): + return math_ops.logical_and( + i < max_iter, + math_ops.reduce_any(linalg.norm(state.r, axis=-1) > tol)) + + def dot(x, y): + return array_ops.squeeze( + math_ops.matvec( + x[..., array_ops.newaxis], + y, adjoint_a=True), axis=-1) + + def cg_step(i, state): # pylint: disable=missing-docstring + z = math_ops.matvec(operator, state.p) + alpha = state.gamma / dot(state.p, z) + x = state.x + alpha[..., array_ops.newaxis] * state.p + r = state.r - alpha[..., array_ops.newaxis] * z + if preconditioner is None: + q = r + else: + q = preconditioner.matvec(r) + gamma = dot(r, q) + beta = gamma / state.gamma + p = q + beta[..., array_ops.newaxis] * state.p + return i + 1, cg_state(i + 1, x, r, p, gamma) + + # We now broadcast initial shapes so that we have fixed shapes per iteration. + + with ops.name_scope(name): + broadcast_shape = array_ops.broadcast_dynamic_shape( + array_ops.shape(rhs)[:-1], + operator.batch_shape_tensor()) + if preconditioner is not None: + broadcast_shape = array_ops.broadcast_dynamic_shape( + broadcast_shape, + preconditioner.batch_shape_tensor() + ) + broadcast_rhs_shape = array_ops.concat([ + broadcast_shape, [array_ops.shape(rhs)[-1]]], axis=-1) + r0 = array_ops.broadcast_to(rhs, broadcast_rhs_shape) + tol *= linalg.norm(r0, axis=-1) + + if x is None: + x = array_ops.zeros( + broadcast_rhs_shape, dtype=rhs.dtype.base_dtype) + else: + r0 = rhs - math_ops.matvec(operator, x) + if preconditioner is None: + p0 = r0 + else: + p0 = math_ops.matvec(preconditioner, r0) + gamma0 = dot(r0, p0) + i = constant_op.constant(0, dtype=dtypes.int32) + state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0) + _, state = while_loop.while_loop(stopping_criterion, cg_step, [i, state]) + return cg_state( + state.i, + x=state.x, + r=state.r, + p=state.p, + gamma=state.gamma) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/gen_sparse_csr_matrix_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/gen_sparse_csr_matrix_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b109248bd3c981c4ade276e9b753f3bd8c404cce --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/gen_sparse_csr_matrix_ops.py @@ -0,0 +1,1388 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated +_CSRSparseMatrixComponentsOutput = collections.namedtuple( + "CSRSparseMatrixComponents", + ["row_ptrs", "col_inds", "values"]) + + +TV_CSRSparseMatrixComponents_type = TypeVar("TV_CSRSparseMatrixComponents_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def csr_sparse_matrix_components(csr_sparse_matrix: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int32], type: TV_CSRSparseMatrixComponents_type, name=None): + r"""Reads out the CSR components at batch `index`. + + This op is meant only for debugging / testing, and its interface is not expected + to be stable. + + Args: + csr_sparse_matrix: A `Tensor` of type `variant`. + A batched CSRSparseMatrix. + index: A `Tensor` of type `int32`. + The index in `csr_sparse_matrix`'s batch. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (row_ptrs, col_inds, values). + + row_ptrs: A `Tensor` of type `int32`. + col_inds: A `Tensor` of type `int32`. + values: A `Tensor` of type `type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CSRSparseMatrixComponents", name, csr_sparse_matrix, index, + "type", type) + _result = _CSRSparseMatrixComponentsOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return csr_sparse_matrix_components_eager_fallback( + csr_sparse_matrix, index, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CSRSparseMatrixComponents", csr_sparse_matrix=csr_sparse_matrix, + index=index, type=type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CSRSparseMatrixComponents", _inputs_flat, _attrs, _result) + _result = _CSRSparseMatrixComponentsOutput._make(_result) + return _result + +CSRSparseMatrixComponents = tf_export("raw_ops.CSRSparseMatrixComponents")(_ops.to_raw_op(csr_sparse_matrix_components)) + + +def csr_sparse_matrix_components_eager_fallback(csr_sparse_matrix: Annotated[Any, _atypes.Variant], index: Annotated[Any, _atypes.Int32], type: TV_CSRSparseMatrixComponents_type, name, ctx): + type = _execute.make_type(type, "type") + csr_sparse_matrix = _ops.convert_to_tensor(csr_sparse_matrix, _dtypes.variant) + index = _ops.convert_to_tensor(index, _dtypes.int32) + _inputs_flat = [csr_sparse_matrix, index] + _attrs = ("type", type) + _result = _execute.execute(b"CSRSparseMatrixComponents", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CSRSparseMatrixComponents", _inputs_flat, _attrs, _result) + _result = _CSRSparseMatrixComponentsOutput._make(_result) + return _result + + +TV_CSRSparseMatrixToDense_type = TypeVar("TV_CSRSparseMatrixToDense_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def csr_sparse_matrix_to_dense(sparse_input: Annotated[Any, _atypes.Variant], type: TV_CSRSparseMatrixToDense_type, name=None) -> Annotated[Any, TV_CSRSparseMatrixToDense_type]: + r"""Convert a (possibly batched) CSRSparseMatrix to dense. + + Args: + sparse_input: A `Tensor` of type `variant`. A batched CSRSparseMatrix. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CSRSparseMatrixToDense", name, sparse_input, "type", type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return csr_sparse_matrix_to_dense_eager_fallback( + sparse_input, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CSRSparseMatrixToDense", sparse_input=sparse_input, type=type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CSRSparseMatrixToDense", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +CSRSparseMatrixToDense = tf_export("raw_ops.CSRSparseMatrixToDense")(_ops.to_raw_op(csr_sparse_matrix_to_dense)) + + +def csr_sparse_matrix_to_dense_eager_fallback(sparse_input: Annotated[Any, _atypes.Variant], type: TV_CSRSparseMatrixToDense_type, name, ctx) -> Annotated[Any, TV_CSRSparseMatrixToDense_type]: + type = _execute.make_type(type, "type") + sparse_input = _ops.convert_to_tensor(sparse_input, _dtypes.variant) + _inputs_flat = [sparse_input] + _attrs = ("type", type) + _result = _execute.execute(b"CSRSparseMatrixToDense", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CSRSparseMatrixToDense", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_CSRSparseMatrixToSparseTensorOutput = collections.namedtuple( + "CSRSparseMatrixToSparseTensor", + ["indices", "values", "dense_shape"]) + + +TV_CSRSparseMatrixToSparseTensor_type = TypeVar("TV_CSRSparseMatrixToSparseTensor_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def csr_sparse_matrix_to_sparse_tensor(sparse_matrix: Annotated[Any, _atypes.Variant], type: TV_CSRSparseMatrixToSparseTensor_type, name=None): + r"""Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. + + Args: + sparse_matrix: A `Tensor` of type `variant`. + A (possibly batched) CSRSparseMatrix. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (indices, values, dense_shape). + + indices: A `Tensor` of type `int64`. + values: A `Tensor` of type `type`. + dense_shape: A `Tensor` of type `int64`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "CSRSparseMatrixToSparseTensor", name, sparse_matrix, "type", + type) + _result = _CSRSparseMatrixToSparseTensorOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return csr_sparse_matrix_to_sparse_tensor_eager_fallback( + sparse_matrix, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "CSRSparseMatrixToSparseTensor", sparse_matrix=sparse_matrix, + type=type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "CSRSparseMatrixToSparseTensor", _inputs_flat, _attrs, _result) + _result = _CSRSparseMatrixToSparseTensorOutput._make(_result) + return _result + +CSRSparseMatrixToSparseTensor = tf_export("raw_ops.CSRSparseMatrixToSparseTensor")(_ops.to_raw_op(csr_sparse_matrix_to_sparse_tensor)) + + +def csr_sparse_matrix_to_sparse_tensor_eager_fallback(sparse_matrix: Annotated[Any, _atypes.Variant], type: TV_CSRSparseMatrixToSparseTensor_type, name, ctx): + type = _execute.make_type(type, "type") + sparse_matrix = _ops.convert_to_tensor(sparse_matrix, _dtypes.variant) + _inputs_flat = [sparse_matrix] + _attrs = ("type", type) + _result = _execute.execute(b"CSRSparseMatrixToSparseTensor", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "CSRSparseMatrixToSparseTensor", _inputs_flat, _attrs, _result) + _result = _CSRSparseMatrixToSparseTensorOutput._make(_result) + return _result + + +TV_DenseToCSRSparseMatrix_T = TypeVar("TV_DenseToCSRSparseMatrix_T", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def dense_to_csr_sparse_matrix(dense_input: Annotated[Any, TV_DenseToCSRSparseMatrix_T], indices: Annotated[Any, _atypes.Int64], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Converts a dense tensor to a (possibly batched) CSRSparseMatrix. + + Args: + dense_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `complex64`, `complex128`. + A Dense tensor. + indices: A `Tensor` of type `int64`. Indices of nonzero elements. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DenseToCSRSparseMatrix", name, dense_input, indices) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return dense_to_csr_sparse_matrix_eager_fallback( + dense_input, indices, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DenseToCSRSparseMatrix", dense_input=dense_input, indices=indices, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DenseToCSRSparseMatrix", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +DenseToCSRSparseMatrix = tf_export("raw_ops.DenseToCSRSparseMatrix")(_ops.to_raw_op(dense_to_csr_sparse_matrix)) + + +def dense_to_csr_sparse_matrix_eager_fallback(dense_input: Annotated[Any, TV_DenseToCSRSparseMatrix_T], indices: Annotated[Any, _atypes.Int64], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_T, (dense_input,) = _execute.args_to_matching_eager([dense_input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + indices = _ops.convert_to_tensor(indices, _dtypes.int64) + _inputs_flat = [dense_input, indices] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"DenseToCSRSparseMatrix", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DenseToCSRSparseMatrix", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixAdd_T = TypeVar("TV_SparseMatrixAdd_T", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def sparse_matrix_add(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, _atypes.Variant], alpha: Annotated[Any, TV_SparseMatrixAdd_T], beta: Annotated[Any, TV_SparseMatrixAdd_T], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Sparse addition of two CSR matrices, C = alpha * A + beta * B. + + The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not + currently defined (TensorFlow will return zeros for these entries). + + Args: + a: A `Tensor` of type `variant`. A CSRSparseMatrix. + b: A `Tensor` of type `variant`. A CSRSparseMatrix. + alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `complex64`, `complex128`. + A constant scalar. + beta: A `Tensor`. Must have the same type as `alpha`. A constant scalar. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixAdd", name, a, b, alpha, beta) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_add_eager_fallback( + a, b, alpha, beta, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixAdd", a=a, b=b, alpha=alpha, beta=beta, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixAdd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixAdd = tf_export("raw_ops.SparseMatrixAdd")(_ops.to_raw_op(sparse_matrix_add)) + + +def sparse_matrix_add_eager_fallback(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, _atypes.Variant], alpha: Annotated[Any, TV_SparseMatrixAdd_T], beta: Annotated[Any, TV_SparseMatrixAdd_T], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, beta], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + (alpha, beta) = _inputs_T + a = _ops.convert_to_tensor(a, _dtypes.variant) + b = _ops.convert_to_tensor(b, _dtypes.variant) + _inputs_flat = [a, b, alpha, beta] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SparseMatrixAdd", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixAdd", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixMatMul_T = TypeVar("TV_SparseMatrixMatMul_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def sparse_matrix_mat_mul(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, TV_SparseMatrixMatMul_T], transpose_a:bool=False, transpose_b:bool=False, adjoint_a:bool=False, adjoint_b:bool=False, transpose_output:bool=False, conjugate_output:bool=False, name=None) -> Annotated[Any, TV_SparseMatrixMatMul_T]: + r"""Matrix-multiplies a sparse matrix with a dense matrix. + + Returns a dense matrix. + For inputs A and B, where A is CSR and B is dense; this op returns a dense C; + + If transpose_output is false, returns: + ``` + C = A . B + ``` + + If transpose_output is `true`, returns: + ``` + C = transpose(A . B) = transpose(B) . transpose(A) + ``` + where the transposition is performed along the two innermost (matrix) + dimensions. + + If conjugate_output is `true`, returns: + ``` + C = conjugate(A . B) = conjugate(A) . conjugate(B) + ``` + + If both conjugate_output and transpose_output are `true`, returns: + ``` + C = conjugate(transpose(A . B)) = conjugate(transpose(B)) . + conjugate(transpose(A)) + ``` + + Args: + a: A `Tensor` of type `variant`. A CSRSparseMatrix. + b: A `Tensor`. A dense tensor. + transpose_a: An optional `bool`. Defaults to `False`. + Indicates whether `a` should be transposed. + transpose_b: An optional `bool`. Defaults to `False`. + Indicates whether `b` should be transposed. + adjoint_a: An optional `bool`. Defaults to `False`. + Indicates whether `a` should be conjugate-transposed. + adjoint_b: An optional `bool`. Defaults to `False`. + Indicates whether `b` should be conjugate-transposed. + transpose_output: An optional `bool`. Defaults to `False`. + Transposes the product of `a` and `b`. + conjugate_output: An optional `bool`. Defaults to `False`. + Conjugates the product of `a` and `b`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `b`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixMatMul", name, a, b, "transpose_a", transpose_a, + "transpose_b", transpose_b, "adjoint_a", adjoint_a, "adjoint_b", + adjoint_b, "transpose_output", transpose_output, "conjugate_output", + conjugate_output) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_mat_mul_eager_fallback( + a, b, transpose_a=transpose_a, transpose_b=transpose_b, + adjoint_a=adjoint_a, adjoint_b=adjoint_b, + transpose_output=transpose_output, + conjugate_output=conjugate_output, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if adjoint_a is None: + adjoint_a = False + adjoint_a = _execute.make_bool(adjoint_a, "adjoint_a") + if adjoint_b is None: + adjoint_b = False + adjoint_b = _execute.make_bool(adjoint_b, "adjoint_b") + if transpose_output is None: + transpose_output = False + transpose_output = _execute.make_bool(transpose_output, "transpose_output") + if conjugate_output is None: + conjugate_output = False + conjugate_output = _execute.make_bool(conjugate_output, "conjugate_output") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixMatMul", a=a, b=b, transpose_a=transpose_a, + transpose_b=transpose_b, adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + transpose_output=transpose_output, + conjugate_output=conjugate_output, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "transpose_a", + _op._get_attr_bool("transpose_a"), "transpose_b", + _op._get_attr_bool("transpose_b"), "adjoint_a", + _op._get_attr_bool("adjoint_a"), "adjoint_b", + _op._get_attr_bool("adjoint_b"), "transpose_output", + _op._get_attr_bool("transpose_output"), "conjugate_output", + _op._get_attr_bool("conjugate_output")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixMatMul = tf_export("raw_ops.SparseMatrixMatMul")(_ops.to_raw_op(sparse_matrix_mat_mul)) + + +def sparse_matrix_mat_mul_eager_fallback(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, TV_SparseMatrixMatMul_T], transpose_a: bool, transpose_b: bool, adjoint_a: bool, adjoint_b: bool, transpose_output: bool, conjugate_output: bool, name, ctx) -> Annotated[Any, TV_SparseMatrixMatMul_T]: + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if adjoint_a is None: + adjoint_a = False + adjoint_a = _execute.make_bool(adjoint_a, "adjoint_a") + if adjoint_b is None: + adjoint_b = False + adjoint_b = _execute.make_bool(adjoint_b, "adjoint_b") + if transpose_output is None: + transpose_output = False + transpose_output = _execute.make_bool(transpose_output, "transpose_output") + if conjugate_output is None: + conjugate_output = False + conjugate_output = _execute.make_bool(conjugate_output, "conjugate_output") + _attr_T, (b,) = _execute.args_to_matching_eager([b], ctx, []) + a = _ops.convert_to_tensor(a, _dtypes.variant) + _inputs_flat = [a, b] + _attrs = ("T", _attr_T, "transpose_a", transpose_a, "transpose_b", + transpose_b, "adjoint_a", adjoint_a, "adjoint_b", adjoint_b, + "transpose_output", transpose_output, "conjugate_output", conjugate_output) + _result = _execute.execute(b"SparseMatrixMatMul", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixMul_T = TypeVar("TV_SparseMatrixMul_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +def sparse_matrix_mul(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, TV_SparseMatrixMul_T], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Element-wise multiplication of a sparse matrix with a dense tensor. + + Returns a sparse matrix. + + The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3 + `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the + multiply operation broadcasts. + + **NOTE** even if `b` is zero, the sparsity structure of the output does not + change. + + Args: + a: A `Tensor` of type `variant`. A CSRSparseMatrix. + b: A `Tensor`. A dense tensor. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixMul", name, a, b) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_mul_eager_fallback( + a, b, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixMul", a=a, b=b, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixMul = tf_export("raw_ops.SparseMatrixMul")(_ops.to_raw_op(sparse_matrix_mul)) + + +def sparse_matrix_mul_eager_fallback(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, TV_SparseMatrixMul_T], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_T, (b,) = _execute.args_to_matching_eager([b], ctx, []) + a = _ops.convert_to_tensor(a, _dtypes.variant) + _inputs_flat = [a, b] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SparseMatrixMul", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def sparse_matrix_nnz(sparse_matrix: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Int32]: + r"""Returns the number of nonzeroes of `sparse_matrix`. + + Args: + sparse_matrix: A `Tensor` of type `variant`. A CSRSparseMatrix. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixNNZ", name, sparse_matrix) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_nnz_eager_fallback( + sparse_matrix, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixNNZ", sparse_matrix=sparse_matrix, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixNNZ", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixNNZ = tf_export("raw_ops.SparseMatrixNNZ")(_ops.to_raw_op(sparse_matrix_nnz)) + + +def sparse_matrix_nnz_eager_fallback(sparse_matrix: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Int32]: + sparse_matrix = _ops.convert_to_tensor(sparse_matrix, _dtypes.variant) + _inputs_flat = [sparse_matrix] + _attrs = None + _result = _execute.execute(b"SparseMatrixNNZ", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixNNZ", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def sparse_matrix_ordering_amd(input: Annotated[Any, _atypes.Variant], name=None) -> Annotated[Any, _atypes.Int32]: + r"""Computes the Approximate Minimum Degree (AMD) ordering of `input`. + + Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix. + + The returned permutation may be used to permute the rows and columns of the + given sparse matrix. This typically results in permuted sparse matrix's sparse + Cholesky (or other decompositions) in having fewer zero fill-in compared to + decomposition of the original matrix. + + The input sparse matrix may have rank 2 or rank 3. The output Tensor, + representing would then have rank 1 or 2 respectively, with the same batch + shape as the input. + + Each component of the input sparse matrix must represent a square symmetric + matrix; only the lower triangular part of the matrix is read. The values of the + sparse matrix does not affect the returned permutation, only the sparsity + pattern of the sparse matrix is used. Hence, a single AMD ordering may be + reused for the Cholesky decompositions of sparse matrices with the same sparsity + pattern but with possibly different values. + + Each batch component of the output permutation represents a permutation of `N` + elements, where the input sparse matrix components each have `N` rows. That is, + the component contains each of the integers `{0, .. N-1}` exactly once. The + `i`th element represents the row index that the `i`th row maps to. + + Usage example: + + ```python + from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + + a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) + a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) + a_dense_shape = [4, 4] + + with tf.Session() as sess: + # Define (COO format) SparseTensor over Numpy array. + a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape) + + # Convert SparseTensors to CSR SparseMatrix. + a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + a_st.indices, a_st.values, a_st.dense_shape) + + # Obtain the AMD Ordering for the CSR SparseMatrix. + ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) + + ordering_amd_value = sess.run(ordering_amd) + ``` + + `ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`. + + input: A `CSRSparseMatrix`. + + Args: + input: A `Tensor` of type `variant`. A `CSRSparseMatrix`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixOrderingAMD", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_ordering_amd_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixOrderingAMD", input=input, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixOrderingAMD", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixOrderingAMD = tf_export("raw_ops.SparseMatrixOrderingAMD")(_ops.to_raw_op(sparse_matrix_ordering_amd)) + + +def sparse_matrix_ordering_amd_eager_fallback(input: Annotated[Any, _atypes.Variant], name, ctx) -> Annotated[Any, _atypes.Int32]: + input = _ops.convert_to_tensor(input, _dtypes.variant) + _inputs_flat = [input] + _attrs = None + _result = _execute.execute(b"SparseMatrixOrderingAMD", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixOrderingAMD", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixSoftmax_type = TypeVar("TV_SparseMatrixSoftmax_type", _atypes.Float32, _atypes.Float64) + +def sparse_matrix_softmax(logits: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixSoftmax_type, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Calculates the softmax of a CSRSparseMatrix. + + Calculate the softmax of the innermost dimensions of a SparseMatrix. + + Missing values are treated as `-inf` (i.e., logits of zero probability); and + the output has the same sparsity structure as the input (though missing values + in the output may now be treated as having probability zero). + + Args: + logits: A `Tensor` of type `variant`. A CSRSparseMatrix. + type: A `tf.DType` from: `tf.float32, tf.float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixSoftmax", name, logits, "type", type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_softmax_eager_fallback( + logits, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixSoftmax", logits=logits, type=type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixSoftmax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixSoftmax = tf_export("raw_ops.SparseMatrixSoftmax")(_ops.to_raw_op(sparse_matrix_softmax)) + + +def sparse_matrix_softmax_eager_fallback(logits: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixSoftmax_type, name, ctx) -> Annotated[Any, _atypes.Variant]: + type = _execute.make_type(type, "type") + logits = _ops.convert_to_tensor(logits, _dtypes.variant) + _inputs_flat = [logits] + _attrs = ("type", type) + _result = _execute.execute(b"SparseMatrixSoftmax", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixSoftmax", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixSoftmaxGrad_type = TypeVar("TV_SparseMatrixSoftmaxGrad_type", _atypes.Float32, _atypes.Float64) + +def sparse_matrix_softmax_grad(softmax: Annotated[Any, _atypes.Variant], grad_softmax: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixSoftmaxGrad_type, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Calculates the gradient of the SparseMatrixSoftmax op. + + Args: + softmax: A `Tensor` of type `variant`. A CSRSparseMatrix. + grad_softmax: A `Tensor` of type `variant`. The gradient of `softmax`. + type: A `tf.DType` from: `tf.float32, tf.float64`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixSoftmaxGrad", name, softmax, grad_softmax, "type", + type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_softmax_grad_eager_fallback( + softmax, grad_softmax, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixSoftmaxGrad", softmax=softmax, grad_softmax=grad_softmax, + type=type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixSoftmaxGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixSoftmaxGrad = tf_export("raw_ops.SparseMatrixSoftmaxGrad")(_ops.to_raw_op(sparse_matrix_softmax_grad)) + + +def sparse_matrix_softmax_grad_eager_fallback(softmax: Annotated[Any, _atypes.Variant], grad_softmax: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixSoftmaxGrad_type, name, ctx) -> Annotated[Any, _atypes.Variant]: + type = _execute.make_type(type, "type") + softmax = _ops.convert_to_tensor(softmax, _dtypes.variant) + grad_softmax = _ops.convert_to_tensor(grad_softmax, _dtypes.variant) + _inputs_flat = [softmax, grad_softmax] + _attrs = ("type", type) + _result = _execute.execute(b"SparseMatrixSoftmaxGrad", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixSoftmaxGrad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixSparseCholesky_type = TypeVar("TV_SparseMatrixSparseCholesky_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def sparse_matrix_sparse_cholesky(input: Annotated[Any, _atypes.Variant], permutation: Annotated[Any, _atypes.Int32], type: TV_SparseMatrixSparseCholesky_type, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Computes the sparse Cholesky decomposition of `input`. + + Computes the Sparse Cholesky decomposition of a sparse matrix, with the given + fill-in reducing permutation. + + The input sparse matrix and the fill-in reducing permutation `permutation` must + have compatible shapes. If the sparse matrix has rank 3; with the batch + dimension `B`, then the `permutation` must be of rank 2; with the same batch + dimension `B`. There is no support for broadcasting. + + Furthermore, each component vector of `permutation` must be of length `N`, + containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is + the number of rows of each component of the sparse matrix. + + Each component of the input sparse matrix must represent a symmetric positive + definite (SPD) matrix; although only the lower triangular part of the matrix is + read. If any individual component is not SPD, then an InvalidArgument error is + thrown. + + The returned sparse matrix has the same dense shape as the input sparse matrix. + For each component `A` of the input sparse matrix, the corresponding output + sparse matrix represents `L`, the lower triangular Cholesky factor satisfying + the following identity: + + ``` + A = L * Lt + ``` + + where Lt denotes the transpose of L (or its conjugate transpose, if `type` is + `complex64` or `complex128`). + + The `type` parameter denotes the type of the matrix elements. The supported + types are: `float32`, `float64`, `complex64` and `complex128`. + + Usage example: + + ```python + from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + + a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) + a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) + a_dense_shape = [4, 4] + + with tf.Session() as sess: + # Define (COO format) SparseTensor over Numpy array. + a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape) + + # Convert SparseTensors to CSR SparseMatrix. + a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + a_st.indices, a_st.values, a_st.dense_shape) + + # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero + # fill-in (number of structural non-zeros in the sparse Cholesky factor). + ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) + cholesky_sparse_matrices = ( + sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky( + sparse_matrix, ordering_amd, type=tf.float32)) + + # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor + dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( + cholesky_sparse_matrices, tf.float32) + + # Evaluate the dense Tensor value. + dense_cholesky_value = sess.run(dense_cholesky) + ``` + + `dense_cholesky_value` stores the dense Cholesky factor: + + ``` + [[ 1. 0. 0. 0.] + [ 0. 1.41 0. 0.] + [ 0. 0.70 1.58 0.] + [ 0. 0. 0. 2.]] + ``` + + + input: A `CSRSparseMatrix`. + permutation: A `Tensor`. + type: The type of `input`. + + Args: + input: A `Tensor` of type `variant`. A `CSRSparseMatrix`. + permutation: A `Tensor` of type `int32`. + A fill-in reducing permutation matrix. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixSparseCholesky", name, input, permutation, "type", + type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_sparse_cholesky_eager_fallback( + input, permutation, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixSparseCholesky", input=input, permutation=permutation, + type=type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixSparseCholesky", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixSparseCholesky = tf_export("raw_ops.SparseMatrixSparseCholesky")(_ops.to_raw_op(sparse_matrix_sparse_cholesky)) + + +def sparse_matrix_sparse_cholesky_eager_fallback(input: Annotated[Any, _atypes.Variant], permutation: Annotated[Any, _atypes.Int32], type: TV_SparseMatrixSparseCholesky_type, name, ctx) -> Annotated[Any, _atypes.Variant]: + type = _execute.make_type(type, "type") + input = _ops.convert_to_tensor(input, _dtypes.variant) + permutation = _ops.convert_to_tensor(permutation, _dtypes.int32) + _inputs_flat = [input, permutation] + _attrs = ("type", type) + _result = _execute.execute(b"SparseMatrixSparseCholesky", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixSparseCholesky", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixSparseMatMul_type = TypeVar("TV_SparseMatrixSparseMatMul_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def sparse_matrix_sparse_mat_mul(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixSparseMatMul_type, transpose_a:bool=False, transpose_b:bool=False, adjoint_a:bool=False, adjoint_b:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Sparse-matrix-multiplies two CSR matrices `a` and `b`. + + Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix + `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or + adjointed. + + Each matrix may be transposed or adjointed (conjugated and transposed) + according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b` + and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True. + Similarly, at most one of `transpose_b` or `adjoint_b` may be True. + + The inputs must have compatible shapes. That is, the inner dimension of `a` + must be equal to the outer dimension of `b`. This requirement is adjusted + according to whether either `a` or `b` is transposed or adjointed. + + The `type` parameter denotes the type of the matrix elements. Both `a` and `b` + must have the same type. The supported types are: `float32`, `float64`, + `complex64` and `complex128`. + + Both `a` and `b` must have the same rank. Broadcasting is not supported. If they + have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the + same dense shape. + + The sparse matrix product may have numeric (non-structural) zeros. + TODO(anudhyan): Consider adding a boolean attribute to control whether to prune + zeros. + + Usage example: + + ```python + from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + + a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]]) + a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32) + a_dense_shape = [4, 5] + + b_indices = np.array([[0, 0], [3, 0], [3, 1]]) + b_values = np.array([2.0, 7.0, 8.0], np.float32) + b_dense_shape = [5, 3] + + with tf.Session() as sess: + # Define (COO format) Sparse Tensors over Numpy arrays + a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape) + b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape) + + # Convert SparseTensors to CSR SparseMatrix + a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + a_st.indices, a_st.values, a_st.dense_shape) + b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + b_st.indices, b_st.values, b_st.dense_shape) + + # Compute the CSR SparseMatrix matrix multiplication + c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul( + a=a_sm, b=b_sm, type=tf.float32) + + # Convert the CSR SparseMatrix product to a dense Tensor + c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( + c_sm, tf.float32) + # Evaluate the dense Tensor value + c_sm_dense_value = sess.run(c_sm_dense) + ``` + + `c_sm_dense_value` stores the dense matrix product: + + ``` + [[ 2. 0. 0.] + [ 0. 0. 0.] + [ 35. 40. 0.] + [ -4. 0. 0.]] + ``` + + a: A `CSRSparseMatrix`. + b: A `CSRSparseMatrix` with the same type and rank as `a`. + type: The type of both `a` and `b`. + transpose_a: If True, `a` transposed before multiplication. + transpose_b: If True, `b` transposed before multiplication. + adjoint_a: If True, `a` adjointed before multiplication. + adjoint_b: If True, `b` adjointed before multiplication. + + Args: + a: A `Tensor` of type `variant`. A CSRSparseMatrix. + b: A `Tensor` of type `variant`. A CSRSparseMatrix. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + transpose_a: An optional `bool`. Defaults to `False`. + Indicates whether `a` should be transposed. + transpose_b: An optional `bool`. Defaults to `False`. + Indicates whether `b` should be transposed. + adjoint_a: An optional `bool`. Defaults to `False`. + Indicates whether `a` should be conjugate-transposed. + adjoint_b: An optional `bool`. Defaults to `False`. + Indicates whether `b` should be conjugate-transposed. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixSparseMatMul", name, a, b, "type", type, + "transpose_a", transpose_a, "transpose_b", transpose_b, "adjoint_a", + adjoint_a, "adjoint_b", adjoint_b) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_sparse_mat_mul_eager_fallback( + a, b, type=type, transpose_a=transpose_a, transpose_b=transpose_b, + adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if adjoint_a is None: + adjoint_a = False + adjoint_a = _execute.make_bool(adjoint_a, "adjoint_a") + if adjoint_b is None: + adjoint_b = False + adjoint_b = _execute.make_bool(adjoint_b, "adjoint_b") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixSparseMatMul", a=a, b=b, type=type, + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, adjoint_b=adjoint_b, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type"), "transpose_a", + _op._get_attr_bool("transpose_a"), "transpose_b", + _op._get_attr_bool("transpose_b"), "adjoint_a", + _op._get_attr_bool("adjoint_a"), "adjoint_b", + _op._get_attr_bool("adjoint_b")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixSparseMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixSparseMatMul = tf_export("raw_ops.SparseMatrixSparseMatMul")(_ops.to_raw_op(sparse_matrix_sparse_mat_mul)) + + +def sparse_matrix_sparse_mat_mul_eager_fallback(a: Annotated[Any, _atypes.Variant], b: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixSparseMatMul_type, transpose_a: bool, transpose_b: bool, adjoint_a: bool, adjoint_b: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + type = _execute.make_type(type, "type") + if transpose_a is None: + transpose_a = False + transpose_a = _execute.make_bool(transpose_a, "transpose_a") + if transpose_b is None: + transpose_b = False + transpose_b = _execute.make_bool(transpose_b, "transpose_b") + if adjoint_a is None: + adjoint_a = False + adjoint_a = _execute.make_bool(adjoint_a, "adjoint_a") + if adjoint_b is None: + adjoint_b = False + adjoint_b = _execute.make_bool(adjoint_b, "adjoint_b") + a = _ops.convert_to_tensor(a, _dtypes.variant) + b = _ops.convert_to_tensor(b, _dtypes.variant) + _inputs_flat = [a, b] + _attrs = ("type", type, "transpose_a", transpose_a, "transpose_b", + transpose_b, "adjoint_a", adjoint_a, "adjoint_b", adjoint_b) + _result = _execute.execute(b"SparseMatrixSparseMatMul", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixSparseMatMul", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixTranspose_type = TypeVar("TV_SparseMatrixTranspose_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def sparse_matrix_transpose(input: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixTranspose_type, conjugate:bool=False, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Transposes the inner (matrix) dimensions of a CSRSparseMatrix. + + Transposes the inner (matrix) dimensions of a SparseMatrix and optionally + conjugates its values. + + Args: + input: A `Tensor` of type `variant`. A CSRSparseMatrix. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + conjugate: An optional `bool`. Defaults to `False`. + Indicates whether `input` should be conjugated. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixTranspose", name, input, "conjugate", conjugate, + "type", type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_transpose_eager_fallback( + input, conjugate=conjugate, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + if conjugate is None: + conjugate = False + conjugate = _execute.make_bool(conjugate, "conjugate") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixTranspose", input=input, type=type, conjugate=conjugate, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("conjugate", _op._get_attr_bool("conjugate"), "type", + _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixTranspose", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixTranspose = tf_export("raw_ops.SparseMatrixTranspose")(_ops.to_raw_op(sparse_matrix_transpose)) + + +def sparse_matrix_transpose_eager_fallback(input: Annotated[Any, _atypes.Variant], type: TV_SparseMatrixTranspose_type, conjugate: bool, name, ctx) -> Annotated[Any, _atypes.Variant]: + type = _execute.make_type(type, "type") + if conjugate is None: + conjugate = False + conjugate = _execute.make_bool(conjugate, "conjugate") + input = _ops.convert_to_tensor(input, _dtypes.variant) + _inputs_flat = [input] + _attrs = ("conjugate", conjugate, "type", type) + _result = _execute.execute(b"SparseMatrixTranspose", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixTranspose", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseMatrixZeros_type = TypeVar("TV_SparseMatrixZeros_type", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def sparse_matrix_zeros(dense_shape: Annotated[Any, _atypes.Int64], type: TV_SparseMatrixZeros_type, name=None) -> Annotated[Any, _atypes.Variant]: + r"""Creates an all-zeros CSRSparseMatrix with shape `dense_shape`. + + Args: + dense_shape: A `Tensor` of type `int64`. The desired matrix shape. + type: A `tf.DType` from: `tf.float32, tf.float64, tf.complex64, tf.complex128`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseMatrixZeros", name, dense_shape, "type", type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_matrix_zeros_eager_fallback( + dense_shape, type=type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + type = _execute.make_type(type, "type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseMatrixZeros", dense_shape=dense_shape, type=type, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("type", _op._get_attr_type("type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseMatrixZeros", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseMatrixZeros = tf_export("raw_ops.SparseMatrixZeros")(_ops.to_raw_op(sparse_matrix_zeros)) + + +def sparse_matrix_zeros_eager_fallback(dense_shape: Annotated[Any, _atypes.Int64], type: TV_SparseMatrixZeros_type, name, ctx) -> Annotated[Any, _atypes.Variant]: + type = _execute.make_type(type, "type") + dense_shape = _ops.convert_to_tensor(dense_shape, _dtypes.int64) + _inputs_flat = [dense_shape] + _attrs = ("type", type) + _result = _execute.execute(b"SparseMatrixZeros", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseMatrixZeros", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_SparseTensorToCSRSparseMatrix_T = TypeVar("TV_SparseTensorToCSRSparseMatrix_T", _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64) + +def sparse_tensor_to_csr_sparse_matrix(indices: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_SparseTensorToCSRSparseMatrix_T], dense_shape: Annotated[Any, _atypes.Int64], name=None) -> Annotated[Any, _atypes.Variant]: + r"""Converts a SparseTensor to a (possibly batched) CSRSparseMatrix. + + Args: + indices: A `Tensor` of type `int64`. SparseTensor indices. + values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `complex64`, `complex128`. + SparseTensor values. + dense_shape: A `Tensor` of type `int64`. SparseTensor dense shape. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "SparseTensorToCSRSparseMatrix", name, indices, values, + dense_shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return sparse_tensor_to_csr_sparse_matrix_eager_fallback( + indices, values, dense_shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "SparseTensorToCSRSparseMatrix", indices=indices, values=values, + dense_shape=dense_shape, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "SparseTensorToCSRSparseMatrix", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +SparseTensorToCSRSparseMatrix = tf_export("raw_ops.SparseTensorToCSRSparseMatrix")(_ops.to_raw_op(sparse_tensor_to_csr_sparse_matrix)) + + +def sparse_tensor_to_csr_sparse_matrix_eager_fallback(indices: Annotated[Any, _atypes.Int64], values: Annotated[Any, TV_SparseTensorToCSRSparseMatrix_T], dense_shape: Annotated[Any, _atypes.Int64], name, ctx) -> Annotated[Any, _atypes.Variant]: + _attr_T, (values,) = _execute.args_to_matching_eager([values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ]) + indices = _ops.convert_to_tensor(indices, _dtypes.int64) + dense_shape = _ops.convert_to_tensor(dense_shape, _dtypes.int64) + _inputs_flat = [indices, values, dense_shape] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"SparseTensorToCSRSparseMatrix", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "SparseTensorToCSRSparseMatrix", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..1162ab04e359623126f8f2dd38004275a255c01f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse.py @@ -0,0 +1,26 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Public API for tf.linalg.sparse namespace.""" + +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.python.ops.linalg.sparse.conjugate_gradient import conjugate_gradient +from tensorflow.python.ops.linalg.sparse.sparse_csr_matrix_grad import * +from tensorflow.python.ops.linalg.sparse.sparse_csr_matrix_ops import * +# pylint: enable=wildcard-import + +__all__ = [ + 'conjugate_gradient' +] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..dc931e5d6f756c51035e02bdf187cf7c3c879b18 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py @@ -0,0 +1,364 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""CSR Sparse Matrix Gradients.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + + +@ops.RegisterGradient("DenseToCSRSparseMatrix") +def _DenseToCSRSparseMatrixGrad(op: ops.Operation, grad): + """Gradient for dense_to_csr_sparse_matrix op.""" + grad_values = ( + sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( + grad, type=op.get_attr("T"))) + # inputs to fw op were: params, indices. + return (grad_values, None) + + +@ops.RegisterGradient("CSRSparseMatrixToDense") +def _CSRSparseMatrixToDenseGrad(op: ops.Operation, grad): + """Gradient for csr_sparse_matrix_to_dense op.""" + coo_sparse_tensor = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( + op.inputs[0], type=grad.dtype) + return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + indices=coo_sparse_tensor.indices, + values=array_ops.gather_nd(grad, coo_sparse_tensor.indices), + dense_shape=grad.shape) + + +@ops.RegisterGradient("SparseTensorToCSRSparseMatrix") +def _SparseTensorToCSRSparseMatrixGrad(op: ops.Operation, grad): + """Gradient for sparse_tensor_to_csr_sparse_matrix op.""" + grad_values = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( + grad, type=op.get_attr("T")).values + return (None, grad_values, None) + + +@ops.RegisterGradient("CSRSparseMatrixToSparseTensor") +def _CSRSparseMatrixToSparseTensorGrad(op: ops.Operation, *grads): + """Gradient for csr_sparse_matrix_to_sparse_tensor op.""" + return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + indices=op.outputs[0], values=grads[1], dense_shape=op.outputs[2]) + + +ops.NotDifferentiable("SparseMatrixNNZ") + +ops.NotDifferentiable("SparseMatrixZeros") + + +def _PruneSparseTensor(unpruned, pruned_pattern): + """Helper function to prune COO sparse tensor. + + Given two sparse tensors 'unpruned' and 'pruned_pattern', generates another + sparse tensor with indices and values fron 'unpruned' only if its indices also + occur in pruned_pattern. + + Args: + unpruned: COO matrix with unpruned indices + pruned_pattern: COO matrix with pruned pattern. + + TODO(tabakg): This is far from optimal. Consider a C++ implementation. + + Returns: + Indices, values, and dense_shape of the pruned matrix. + """ + pruned_indices = sparse_ops.sparse_reshape( + pruned_pattern, shape=(-1,)).indices[..., 0] + unpruned_indices = sparse_ops.sparse_reshape( + unpruned, shape=(-1,)).indices[..., 0] + best_match = array_ops.searchsorted(unpruned_indices, pruned_indices) + keep_indices = array_ops.gather( + best_match, + array_ops.where( + math_ops.equal( + array_ops.gather(unpruned_indices, best_match), pruned_indices))) + return (array_ops.gather_nd(unpruned.indices, keep_indices), + array_ops.gather_nd(unpruned.values, + keep_indices), pruned_pattern.dense_shape) + + +def _PruneCSRMatrix(unpruned, pruned_pattern): + """TODO(tabakg): Consider re-writing in C++.""" + _, dtype = sparse_csr_matrix_ops.dense_shape_and_type(pruned_pattern) + coo_unpruned = sparse_tensor.SparseTensor( + *sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( + unpruned, type=dtype)) + coo_pruned_pattern = sparse_tensor.SparseTensor( + *sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( + pruned_pattern, type=dtype)) + return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + *_PruneSparseTensor(coo_unpruned, coo_pruned_pattern)) + + +@ops.RegisterGradient("SparseMatrixAdd") +def _SparseMatrixAddGrad(op: ops.Operation, grad): + """Gradient for sparse_matrix_add op.""" + # input to sparse_matrix_add is (a, b, alpha, beta) + # with a, b CSR and alpha beta scalars. + # output is: alpha * a + beta * b + + # d(a*A + b*B)/dA . grad = a * grad + + # May have gotten the transposes wrong below. + # d(a*A + b*B)/da . grad = tr(A' . grad) + + # For now, only implement gradients w.r.t. A and B. + # TODO(ebrevdo): Implement reduce_sum for SparseMatrix so that we + # can implement gradients w.r.t. a and b. + (a_csr, b_csr, alpha, beta) = op.inputs + return (sparse_csr_matrix_ops.sparse_matrix_mul( + _PruneCSRMatrix(grad, a_csr), alpha), + sparse_csr_matrix_ops.sparse_matrix_mul( + _PruneCSRMatrix(grad, b_csr), beta), None, None) + + +def _PrunedDenseMatrixMultiplication(a, + b, + indices, + transpose_a=False, + adjoint_a=False, + transpose_b=False, + adjoint_b=False): + """Multiplies two dense matrices at selected indices. + + The two inputs `a` and `b` must have matching rank (2 or 3). If using rank 3, + the first rank is used for the batch number. The last two dimensions should + also be compatible for matrix multiplication. + + TODO(tabakg): Consider C++ implementation. There is also a more efficient way + to handle transposes here. + + Args: + a: The left dense matrix (or batched matrices). + b: The right dense matrix (or batched matrices). + indices: The selected output indices where values should be produced. Other + indices will be pruned (not computed in the first place). Indices are + specified as a tensor of shape (length, rank), where length is the number + of entries and rank is the rank of the dense inputs (2 or 3). + transpose_a: Whether to transpose a. + adjoint_a: Whether to take the conjugate transpose of a. + transpose_b: Whether to transpose b. + adjoint_b: Whether to take the conjugate transpose of b. + + Returns: + A CSR matrix. + """ + transpose_a = transpose_a or adjoint_a + transpose_b = transpose_b or adjoint_b + + a = math_ops.conj(a) if adjoint_a else a + b = math_ops.conj(b) if adjoint_b else b + + rank = len(a.shape) + dense_shape = (a.shape[-1] if transpose_a else a.shape[-2], + b.shape[-2] if transpose_b else b.shape[-1]) + if rank == 2: + rows = indices[:, 0] + cols = indices[:, 1] + transpose = array_ops.transpose + gather_op = array_ops.gather + elif rank == 3: + dense_shape = (a.shape[0],) + dense_shape + rows = indices[:, :2] + cols = array_ops_stack.stack([indices[:, 0], indices[:, 2]], axis=1) + transpose = lambda x: array_ops.transpose(x, perm=[0, 2, 1]) + gather_op = array_ops.gather_nd + + a_rows = gather_op(transpose(a) if transpose_a else a, indices=rows) + b_cols = gather_op(b if transpose_b else transpose(b), indices=cols) + values = math_ops.reduce_sum(a_rows * b_cols, axis=1) + + return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + indices=indices, values=values, dense_shape=dense_shape) + + +@ops.RegisterGradient("SparseMatrixTranspose") +def _SparseMatrixTransposeGrad(op: ops.Operation, grad): + """Gradient for sparse_matrix_transpose op.""" + return sparse_csr_matrix_ops.sparse_matrix_transpose( + grad, type=op.get_attr("type"), conjugate=op.get_attr("conjugate")) + + +@ops.RegisterGradient("SparseMatrixSoftmax") +def _SparseMatrixSoftmaxGrad(op: ops.Operation, grad_softmax): + """Gradient for sparse_matrix_softmax op.""" + softmax = op.outputs[0] + return sparse_csr_matrix_ops.sparse_matrix_softmax_grad( + softmax, grad_softmax, type=op.get_attr("type")) + + +@ops.RegisterGradient("SparseMatrixMatMul") +def _SparseMatrixMatMulGrad(op: ops.Operation, grad): + """Gradient for sparse_matrix_mat_mul op.""" + # input to sparse_matrix_mat_mul is (A, B) with CSR A and dense B. + # Output is dense: + # C = opA(A) . opB(B) if transpose_output = false + # C = (opA(A) . opB(B))' = opB(B)' . opA(A)' if transpose_output = true. + # where opA = transpose if transpose_a = True else identity + # and opB = transpose if transpose_b = True else identity + + t_a = op.get_attr("transpose_a") + t_b = op.get_attr("transpose_b") + adj_a = op.get_attr("adjoint_a") + adj_b = op.get_attr("adjoint_b") + transpose_output = op.get_attr("transpose_output") + conjugate_output = op.get_attr("conjugate_output") + a = op.inputs[0] # sparse matrix + b = op.inputs[1] # dense matrix + conj = math_ops.conj + sparse_matmul = sparse_csr_matrix_ops.sparse_matrix_mat_mul + + def matmul(x, y, **kwargs): # pylint: disable=invalid-name + return _PrunedDenseMatrixMultiplication( + x, + y, + indices=sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor( + a, type=x.dtype).indices, + **kwargs) + + if conjugate_output: + grad = conj(grad) + if not transpose_output: + # C = opA(A) . opB(B) + if not adj_a and not adj_b: + a = conj(a) + b = conj(b) + if not t_a: + grad_a = matmul(grad, b, transpose_b=not t_b) + else: + grad_a = matmul(b, grad, transpose_a=t_b, transpose_b=True) + grad_b = sparse_matmul(a, grad, transpose_a=not t_a, transpose_output=t_b) + elif not t_a and not t_b: + if not adj_a: + grad_a = matmul(grad, b, adjoint_b=not adj_b) + else: + grad_a = matmul(b, grad, adjoint_a=adj_b, adjoint_b=True) + grad_b = sparse_matmul( + a, + grad, + adjoint_a=not adj_a, + transpose_output=adj_b, + conjugate_output=adj_b) + elif adj_a and t_b: + grad_a = matmul(b, grad, transpose_a=True, adjoint_b=True) + grad_b = sparse_matmul(a, grad, transpose_output=True) + elif t_a and adj_b: + grad_a = matmul(b, grad, transpose_a=True, transpose_b=True) + grad_b = sparse_matmul( + conj(a), grad, transpose_output=True, conjugate_output=True) + else: + # C = (opA(A) . opB(B))' = opB(B)' . opA(A)' + if not adj_a and not adj_b: + a = conj(a) + b = conj(b) + if not t_a: + grad_a = matmul(grad, b, transpose_a=True, transpose_b=not t_b) + else: + grad_a = matmul(b, grad, transpose_a=t_b) + grad_b = sparse_matmul( + a, grad, transpose_a=not t_a, transpose_b=True, transpose_output=t_b) + elif not t_a and not t_b: + if not adj_a: + grad_a = matmul(grad, b, transpose_a=True, adjoint_b=not adj_b) + else: + grad_a = matmul(b, conj(grad), adjoint_a=adj_b) + grad_b = sparse_matmul( + a, + grad, + adjoint_a=not adj_a, + transpose_b=True, + transpose_output=adj_b, + conjugate_output=adj_b) + elif adj_a and t_b: + grad_a = matmul(b, conj(grad), transpose_a=True) + grad_b = sparse_matmul(a, grad, transpose_b=True, transpose_output=True) + elif t_a and adj_b: + grad_a = matmul(b, grad, transpose_a=True) + grad_b = sparse_matmul(a, grad, adjoint_b=True, transpose_output=True) + + return (grad_a, grad_b) + + +@ops.RegisterGradient("SparseMatrixSparseMatMul") +def _SparseMatrixSparseMatMulGrad(op: ops.Operation, grad): + """Gradient for sparse_matrix_sparse_mat_mul op.""" + t_a = op.get_attr("transpose_a") + t_b = op.get_attr("transpose_b") + adj_a = op.get_attr("adjoint_a") + adj_b = op.get_attr("adjoint_b") + dtype = op.get_attr("type") + + # input to sparse_matrix_sparse_mat_mul is (A, B) with CSR A and B. + # Output is CSR: + # C = opA(A) . opB(B) + # where opA = transpose if transpose_a = True else identity + # and opB = transpose if transpose_b = True else identity + a = op.inputs[0] + b = op.inputs[1] + conj = math_ops.conj + matmul = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul + if not t_a and not t_b: + if not adj_a: + if not adj_b: + grad_a = matmul(grad, b, adjoint_b=True, type=dtype) + grad_b = matmul(a, grad, adjoint_a=True, type=dtype) + else: + grad_a = matmul(grad, b, type=dtype) + grad_b = matmul(grad, a, adjoint_a=True, type=dtype) + else: + if not adj_b: + grad_a = matmul(b, grad, adjoint_b=True, type=dtype) + grad_b = matmul(a, grad, type=dtype) + else: + grad_a = matmul(b, grad, adjoint_a=True, adjoint_b=True, type=dtype) + grad_b = matmul(grad, a, adjoint_a=True, adjoint_b=True, type=dtype) + elif not adj_a and not adj_b: + if not t_a and t_b: + grad_a = matmul(grad, conj(b), type=dtype) + grad_b = matmul(grad, conj(a), transpose_a=True, type=dtype) + elif t_a and not t_b: + grad_a = matmul(conj(b), grad, transpose_b=True, type=dtype) + grad_b = matmul(conj(a), grad, type=dtype) + else: + grad_a = matmul(b, grad, adjoint_a=True, transpose_b=True, type=dtype) + grad_b = matmul(grad, a, transpose_a=True, adjoint_b=True, type=dtype) + elif adj_a and t_b: + grad_a = matmul(b, grad, transpose_a=True, adjoint_b=True, type=dtype) + grad_b = matmul(grad, a, transpose_a=True, transpose_b=True, type=dtype) + elif t_a and adj_b: + grad_a = matmul(b, grad, transpose_a=True, transpose_b=True, type=dtype) + grad_b = matmul(grad, a, adjoint_a=True, transpose_b=True, type=dtype) + + # TODO(tabakg): There should be a C++ function for sparse-sparse + # multiplication with pre-determined indices, instead of pruning after the + # multiplication. + return (_PruneCSRMatrix(grad_a, a), _PruneCSRMatrix(grad_b, b)) + + +@ops.RegisterGradient("SparseMatrixMul") +def _SparseMatrixMulGrad(op: ops.Operation, grad): + """Gradient for sparse_matrix_mul op.""" + # input to sparse_matrix_mul is (A, B) with CSR A and dense B. + # Output is CSR: + # C = A .* B + del op + del grad + raise NotImplementedError diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9e5ab10faaabd41b7a85aa39313bdb9db6c0fd40 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py @@ -0,0 +1,376 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""CSR Sparse Matrix Operations.""" + +import abc +import collections + +# pylint: disable=g-direct-tensorflow-import, wildcard-import +from tensorflow.python.eager import context +from tensorflow.python.framework import cpp_shape_inference_pb2 +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops.linalg.sparse import gen_sparse_csr_matrix_ops as sm_ops +from tensorflow.python.ops.linalg.sparse.gen_sparse_csr_matrix_ops import * + + +__all__ = [ + "SparseMatrix", + "CSRSparseMatrix", + "matmul", + "dense_shape_and_type", +] +# pylint: disable=invalid-name +__all__ += [_x for _x in dir(sm_ops) if not _x.startswith("_")] + + +class DenseShapeAndType( + collections.namedtuple("DenseShapeAndType", ("shape", "dtype"))): + pass + + +def _get_handle_data(tensor): + return resource_variable_ops.get_eager_safe_handle_data(tensor) + + +def _create_handle_data_proto(shape_proto, dtype_enum): + """Create handle data based on shape and dtype protos.""" + variant_shape_and_type_data = \ + cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData() + variant_shape_and_type_data.is_set = True + # NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf. + variant_shape_and_type_data.shape_and_type.extend([ + cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType( + shape=shape_proto, dtype=dtype_enum) + ]) + return variant_shape_and_type_data + + +def _make_handle_data(tensor): + """Create handle data based on tensor shape and dtype.""" + return _create_handle_data_proto(tensor.shape.as_proto(), + tensor.dtype.as_datatype_enum) + + +def get_shape_and_type(matrix): + """Return matrix's shape and type if available.""" + handle_data = getattr(matrix, "_handle_data", None) + if handle_data is None: + return None + if len(handle_data.shape_and_type) != 1: + raise ValueError( + "shape_and_type array in _handle_data must have length one, but saw: %d" + % len(handle_data.shape_and_type)) + return handle_data.shape_and_type[0] + + +def dense_shape_and_type(matrix): + """Get dense shape and dtype of the tf.Tensor containing the matrix. + + Args: + matrix: A `tf.Tensor` of type `tf.variant` storing a sparse matrix. + + Returns: + An instance of `ShapeAndType` with properties `shape` (a `tf.TensorShape`) + and `dtype` (a `tf.DType`). + + Raises: + TypeError: if `matrix` is not a tensor or its dtype is not variant. + ValueError: if `matrix` lacks static handle data containing the dense + shape and dtype. + """ + if not isinstance(matrix, tensor_lib.Tensor): + raise TypeError("matrix should be a tensor, but saw: %s" % (matrix,)) + if matrix.dtype != dtypes.variant: + raise TypeError( + "expected matrix to be type tf.variant, but saw: %s" % (matrix.dtype,)) + handle_data = _get_handle_data(matrix) + if not handle_data or not handle_data.is_set: + raise ValueError("matrix has missing handle data: %s" % (matrix,)) + if len(handle_data.shape_and_type) != 1: + raise ValueError("len(matrix.handle_data.shape_and_type) != 1: '%s'" % + (handle_data.shape_and_type,)) + return DenseShapeAndType( + tensor_shape.TensorShape(handle_data.shape_and_type[0].shape), + dtypes.DType(handle_data.shape_and_type[0].dtype)) + + +def matmul_shape_inference(a, b, c, transpose_a, transpose_b, adjoint_a, + adjoint_b): + """Helper function for matmul to set the result matrix's handle data.""" + c_handle = getattr(c, "_handle_data", None) + a_shape_and_type = get_shape_and_type(a) + b_shape_and_type = get_shape_and_type(b) + if (c_handle is None and a_shape_and_type is not None and + b_shape_and_type is not None): + + transpose_a = transpose_a or adjoint_a + transpose_b = transpose_b or adjoint_b + + a_shape = a_shape_and_type.shape + b_shape = b_shape_and_type.shape + rank = len(a_shape.dim) + + # Creates the output shape. + c_rows = a_shape.dim[rank - (1 if transpose_a else 2)].size + c_cols = b_shape.dim[rank - (2 if transpose_b else 1)].size + c_shape = tensor_shape.TensorShape(a_shape) + c_shape = tensor_shape.TensorShape(c_shape[:rank - 2] + [c_rows, c_cols]) + c_handle = _create_handle_data_proto(c_shape.as_proto(), + a_shape_and_type.dtype) + return c_handle + + +def matmul(a, + b, + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + name=None): + """Perform a sparse matrix matmul between `a` and `b`. + + Performs a contraction between `a` and `b` along the two innermost dimensions. + If both `a` and `b` are instances of `SparseMatrix`, returns a new instance + of `SparseMatrix` (same type as `a`). If one is not an instance of + `SparseMatrix`, returns a dense `Tensor`: + + ``` + c = opA(a) . opB(b) + ``` + where `opA` (resp. `opB`) is the transpose or hermitian transpose depending + on the values of `transpose_a` (resp. `transpose_b`) and `adjoint_a` + (resp. `adjoint_b`). + + Args: + a: `Tensor` or `SparseMatrix`, having rank `2` or `3`. + b: `Tensor` or `SparseMatrix`, having rank `2` or `3`. + transpose_a: Python `bool`. + transpose_b: Python `bool`. + adjoint_a: Python `bool`. + adjoint_b: Python `bool`. + name: Optional name to use when creating ops. + + Returns: + A `SparseMatrix` if both `a` and `b` are instances of `SparseMatrix`, + otherwise a dense `Tensor`. + """ + if not isinstance(a, SparseMatrix) and not isinstance(b, SparseMatrix): + return math_ops.matmul( + a, + b, + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + name=name) + + # pylint: disable=protected-access + a_matrix = a._matrix if isinstance(a, SparseMatrix) else a + b_matrix = b._matrix if isinstance(b, SparseMatrix) else b + with ops.name_scope(name, "SparseMatrixMatMul", [a_matrix, b_matrix]): + if isinstance(a, SparseMatrix) and isinstance(b, SparseMatrix): + if not (isinstance(a, type(b)) or isinstance(b, type(a))): + raise TypeError("SparseMatrix types don't inherit from each other: " + "%s and %s" % (type(a), type(b))) + c = sm_ops.sparse_matrix_sparse_mat_mul( + a_matrix, + b_matrix, + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + type=a.dtype) + + # In eager mode, shape inference functions are not called, and the output + # shape is not set. We have to infer the output shape here. + # TODO(penporn): Set this from the C++ kernel instead. + c_handle = matmul_shape_inference(a_matrix, b_matrix, c, transpose_a, + transpose_b, adjoint_a, adjoint_b) + return a._from_matrix(c, handle_data=c_handle) + + elif isinstance(a, SparseMatrix): + return sm_ops.sparse_matrix_mat_mul( + a_matrix, + b, + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b) + else: + # opA(A) . opB(B) = t(nopB(B) . nopA(A)) + if not adjoint_a and not adjoint_b: + return sm_ops.sparse_matrix_mat_mul( + b_matrix, + a, + transpose_a=not transpose_b, + transpose_b=not transpose_a, + transpose_output=True) + elif not transpose_a and not transpose_b: + return sm_ops.sparse_matrix_mat_mul( + b_matrix, + a, + adjoint_a=not adjoint_b, + adjoint_b=not adjoint_a, + transpose_output=True, + conjugate_output=True) + else: + return sm_ops.sparse_matrix_mat_mul( + b_matrix, + math_ops.conj(a), + transpose_output=True, + conjugate_output=adjoint_b) + + +class SparseMatrix(metaclass=abc.ABCMeta): + """Abstract class for sparse matrix types.""" + + @abc.abstractmethod + def __init__(self): + self._eager_mode = context.executing_eagerly() + + @abc.abstractproperty + def _matrix(self): + pass + + @abc.abstractmethod + def _from_matrix(self, matrix, handle_data=None): + pass + + @abc.abstractmethod + def to_dense(self): + pass + + @abc.abstractmethod + def to_sparse_tensor(self): + pass + + @property + def graph(self): + return self._matrix.graph + + @property + def shape(self): + return dense_shape_and_type(self._matrix).shape + + @property + def dtype(self): + return dense_shape_and_type(self._matrix).dtype + + @property + def eager_handle_data(self): + """Return the matrix's handle data iff in eager mode.""" + return _get_handle_data(self._matrix) if self._eager_mode else None + + def conj(self): + return self._from_matrix( + math_ops.conj(self._matrix), self.eager_handle_data) + + def hermitian_transpose(self): + """Return the hermitian transpose of the matrix.""" + return self._from_matrix( + sm_ops.sparse_matrix_transpose( + self._matrix, conjugate=True, type=self.dtype), + self.eager_handle_data) + + def nnz(self): + """Number of stored values, including explicit zeros.""" + return sm_ops.sparse_matrix_nnz(self._matrix) + + nonzero = nnz + + def sorted_indices(self): + # TODO(ebrevdo): A more efficient implementation? + return self.to_sparse_tensor().indices + + def transpose(self): + return self._from_matrix( + sm_ops.sparse_matrix_transpose(self._matrix, type=self.dtype), + self.eager_handle_data) + + +class CSRSparseMatrix(SparseMatrix): + """(Optionally batched) CSR Sparse Matrix.""" + + def __init__(self, value, indices=None, name=None): + """Construct a CSRSparseMatrix from a dense matrix or SparseTensor. + + Args: + value: A dense `2D` or `3D` Tensor or `SparseTensor`. + indices: The nonzero indices of `value` + (if `value` is not a `SparseTensor`). + name: Optional op name. + + Raises: + ValueError: if `value` is a `SparseTensor` and `indices` is not `None`. + """ + del name # Unused. + super(CSRSparseMatrix, self).__init__() + if isinstance(value, sparse_tensor.SparseTensor): + if indices is not None: + raise ValueError("indices must be None if value is a SparseTensor.") + self._dtype = value.dtype + self._csr_matrix = sm_ops.sparse_tensor_to_csr_sparse_matrix( + indices=value.indices, + values=value.values, + dense_shape=value.dense_shape) + else: + value = ops.convert_to_tensor(value) + self._dtype = value.dtype + if indices is not None: + indices = ops.convert_to_tensor(indices, dtype=dtypes.int64) + else: + indices = array_ops.stop_gradient(array_ops.where(value)) + self._csr_matrix = sm_ops.dense_to_csr_sparse_matrix(value, indices) + + # Eager mode doesn't call shape inference functions, so we have to set the + # shape and dtype handle data directly. + if self._eager_mode: + # pylint: disable=protected-access + self._csr_matrix._handle_data = _make_handle_data(value) + # pylint: enable=protected-access + + @property + def _matrix(self): + return self._csr_matrix + + def _from_matrix(self, matrix, handle_data=None): + assert ( + isinstance(matrix, tensor_lib.Tensor) and matrix.dtype == dtypes.variant + ) + ret = type(self).__new__(type(self)) + # pylint: disable=protected-access + ret._dtype = self._dtype + if self._eager_mode: + if matrix._handle_data is None: + matrix._handle_data = handle_data + assert matrix._handle_data is not None + ret._csr_matrix = matrix + # pylint: enable=protected-access + return ret + + def to_dense(self): + return sm_ops.csr_sparse_matrix_to_dense(self._matrix, type=self.dtype) + + def to_sparse_tensor(self): + r = sm_ops.csr_sparse_matrix_to_sparse_tensor(self._matrix, type=self.dtype) + return sparse_tensor.SparseTensor( + indices=r.indices, values=r.values, dense_shape=r.dense_shape) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff2391f8131d574975faf416cc34fe746e7e71b5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mel_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mel_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f42978742d318fed1bb0d0ed806921e1fd0a1ab Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mel_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mfcc_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mfcc_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ea88898b95e745cb4f33893e9eaea823785353 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/mfcc_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/reconstruction_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/reconstruction_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84aa7c65075f7351956ec635bf2c11fde1f104cf Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/reconstruction_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/shape_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/shape_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9739a1502931a6285ed10f985563872250a2c636 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/shape_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/signal.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/signal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..928aebfe422db5bc46af7bec4b4ae363906f46f9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/signal.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/util_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/util_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb28c21bc6094d4c3361f5e55a83cc7ea5cbe19d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/util_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/window_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/window_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6676a9a2e5d4f5729b15370431d611f13937199e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/signal/__pycache__/window_ops.cpython-310.pyc differ